From 8124967dc87701ecd962674f4f392e7f5d5cbf5f Mon Sep 17 00:00:00 2001 From: Emil Ernerfeldt Date: Mon, 15 Jul 2024 13:09:27 +0200 Subject: [PATCH] [4/4] Remove `TensorBuffer::JPEG`, `DecodedTensor`, `TensorDecodeCache` (#6884) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Part of https://github.com/rerun-io/rerun/issues/6844 * Closes https://github.com/rerun-io/rerun/issues/3803 ⚠️ This breaks any existing `JPEG`-encoded RRDs ### Rust API * Removed `TensorBuffer::JPEG` * Removed `TensorData::from_jpeg_bytes` * Deprecated `Image::from_file_path` and `from_file_contents` For all of these, use `ImageEncoded` instead. ### PR train * Prev: https://github.com/rerun-io/rerun/pull/6882 * Prev: https://github.com/rerun-io/rerun/pull/6874 * Prev: https://github.com/rerun-io/rerun/pull/6883 ### Checklist * [x] I have read and agree to [Contributor Guide](https://github.com/rerun-io/rerun/blob/main/CONTRIBUTING.md) and the [Code of Conduct](https://github.com/rerun-io/rerun/blob/main/CODE_OF_CONDUCT.md) * [x] I've included a screenshot or gif (if applicable) * [x] I have tested the web demo (if applicable): * Using examples from latest `main` build: [rerun.io/viewer](https://rerun.io/viewer/pr/6884?manifest_url=https://app.rerun.io/version/main/examples_manifest.json) * Using full set of examples from `nightly` build: [rerun.io/viewer](https://rerun.io/viewer/pr/6884?manifest_url=https://app.rerun.io/version/nightly/examples_manifest.json) * [x] The PR title and labels are set such as to maximize their usefulness for the next release's CHANGELOG * [x] If applicable, add a new check to the [release checklist](https://github.com/rerun-io/rerun/blob/main/tests/python/release_checklist)! * [x] If have noted any breaking changes to the log API in `CHANGELOG.md` and the migration guide - [PR Build Summary](https://build.rerun.io/pr/6884) - [Recent benchmark results](https://build.rerun.io/graphs/crates.html) - [Wasm size tracking](https://build.rerun.io/graphs/sizes.html) To run all checks from `main`, comment on the PR with `@rerun-bot full-check`. --- .../re_data_loader/src/loader_archetype.rs | 11 +- crates/store/re_types/Cargo.toml | 2 +- .../rerun/components/tensor_data.fbs | 6 +- .../rerun/datatypes/tensor_buffer.fbs | 8 - .../rerun/datatypes/tensor_data.fbs | 6 +- .../re_types/src/archetypes/asset3d_ext.rs | 8 +- .../src/archetypes/image_encoded_ext.rs | 8 +- .../re_types/src/archetypes/image_ext.rs | 28 +-- .../re_types/src/archetypes/tensor_ext.rs | 44 ++++ .../re_types/src/components/tensor_data.rs | 6 +- .../re_types/src/datatypes/tensor_buffer.rs | 160 +------------- .../src/datatypes/tensor_buffer_ext.rs | 5 +- .../re_types/src/datatypes/tensor_data.rs | 6 +- .../re_types/src/datatypes/tensor_data_ext.rs | 155 +++++++------ crates/store/re_types/src/tensor_data.rs | 208 +----------------- crates/viewer/re_data_ui/src/image.rs | 142 ++++-------- .../src/space_view_class.rs | 7 - .../src/pickable_image.rs | 4 +- crates/viewer/re_space_view_spatial/src/ui.rs | 50 ++--- .../src/visualizers/depth_images.rs | 29 +-- .../src/visualizers/image_encoded.rs | 2 +- .../src/visualizers/images.rs | 14 +- .../src/visualizers/segmentation_images.rs | 14 +- .../visualizers/utilities/textured_rect.rs | 7 +- .../src/space_view_class.rs | 12 +- .../src/tensor_slice_to_gpu.rs | 12 +- .../src/visualizer_system.rs | 33 +-- crates/viewer/re_viewer/src/reflection/mod.rs | 2 +- .../src/gpu_bridge/tensor_to_gpu.rs | 33 +-- crates/viewer/re_viewer_context/src/lib.rs | 2 +- .../src/tensor/image_decode_cache.rs | 10 +- .../re_viewer_context/src/tensor/mod.rs | 2 - .../src/tensor/tensor_decode_cache.rs | 115 ---------- .../reference/migration/migration-0-18.md | 12 + .../reference/types/components/tensor_data.md | 6 +- .../types/datatypes/tensor_buffer.md | 1 - .../reference/types/datatypes/tensor_data.md | 6 +- .../src/rerun/components/tensor_data.hpp | 6 +- .../src/rerun/datatypes/tensor_buffer.cpp | 12 - .../src/rerun/datatypes/tensor_buffer.hpp | 31 +-- .../src/rerun/datatypes/tensor_buffer_ext.cpp | 7 +- rerun_cpp/src/rerun/datatypes/tensor_data.hpp | 6 +- .../rerun_sdk/rerun/archetypes/image_ext.py | 11 - .../rerun_sdk/rerun/components/tensor_data.py | 6 +- .../rerun/datatypes/tensor_buffer.py | 16 +- .../rerun_sdk/rerun/datatypes/tensor_data.py | 12 +- .../rerun/datatypes/tensor_data_ext.py | 7 +- 47 files changed, 312 insertions(+), 978 deletions(-) delete mode 100644 crates/viewer/re_viewer_context/src/tensor/tensor_decode_cache.rs diff --git a/crates/store/re_data_loader/src/loader_archetype.rs b/crates/store/re_data_loader/src/loader_archetype.rs index 80ed003eb200..0e59512b3df3 100644 --- a/crates/store/re_data_loader/src/loader_archetype.rs +++ b/crates/store/re_data_loader/src/loader_archetype.rs @@ -1,5 +1,6 @@ use re_chunk::{Chunk, RowId}; use re_log_types::{EntityPath, TimeInt, TimePoint}; +use re_types::components::MediaType; use crate::{DataLoader, DataLoaderError, LoadedData}; @@ -138,10 +139,12 @@ fn load_image( let rows = [ { - let arch = re_types::archetypes::Image::from_file_contents( - contents, - image::ImageFormat::from_path(filepath).ok(), - )?; + let mut arch = re_types::archetypes::ImageEncoded::from_file_contents(contents); + + if let Ok(format) = image::ImageFormat::from_path(filepath) { + arch.media_type = Some(MediaType::from(format.to_mime_type())); + } + Chunk::builder(entity_path) .with_archetype(RowId::new(), timepoint, &arch) .build()? diff --git a/crates/store/re_types/Cargo.toml b/crates/store/re_types/Cargo.toml index 494869f0b54f..3ba68c8b578b 100644 --- a/crates/store/re_types/Cargo.toml +++ b/crates/store/re_types/Cargo.toml @@ -55,7 +55,7 @@ re_tracing.workspace = true re_types_core.workspace = true # External -anyhow.workspace = true +anyhow.workspace = true # TODO(#1845): Use thiserror instead array-init.workspace = true arrow2 = { workspace = true, features = [ "io_ipc", diff --git a/crates/store/re_types/definitions/rerun/components/tensor_data.fbs b/crates/store/re_types/definitions/rerun/components/tensor_data.fbs index f6d85ce9997d..fa139f7f875d 100644 --- a/crates/store/re_types/definitions/rerun/components/tensor_data.fbs +++ b/crates/store/re_types/definitions/rerun/components/tensor_data.fbs @@ -18,10 +18,8 @@ namespace rerun.components; /// These dimensions are combined with an index to look up values from the `buffer` field, /// which stores a contiguous array of typed values. /// -/// Note that the buffer may be encoded in a compressed format such as `jpeg` or -/// in a format with downsampled chroma, such as NV12 or YUY2. -/// For file formats, the shape is used as a hint, for chroma downsampled format -/// the shape has to be the shape of the decoded image. +/// Note that the buffer may in a format with downsampled chroma, such as NV12 or YUY2. +/// For chroma downsampled formats the shape has to be the shape of the decoded image. table TensorData ( "attr.arrow.transparent", "attr.rust.derive": "Default, PartialEq", diff --git a/crates/store/re_types/definitions/rerun/datatypes/tensor_buffer.fbs b/crates/store/re_types/definitions/rerun/datatypes/tensor_buffer.fbs index 551d6f9d1fcf..2af18fe3fb14 100644 --- a/crates/store/re_types/definitions/rerun/datatypes/tensor_buffer.fbs +++ b/crates/store/re_types/definitions/rerun/datatypes/tensor_buffer.fbs @@ -49,11 +49,6 @@ table F64Buffer(order: 100, transparent) { data: [double] (order: 100); } -/// Raw bytes of a JPEG file. -table JPEGBuffer(order: 100, transparent) { - data: [ubyte] (order: 100); -} - table NV12Buffer(order: 100, transparent) { data: [ubyte] (order: 100); } @@ -102,9 +97,6 @@ union TensorBuffer ( /// 64bit IEEE-754 floating point, also known as `double`. F64: F64Buffer (transparent), - /// Raw bytes of a JPEG file. - JPEG: JPEGBuffer (transparent), - /// NV12 is a YUV 4:2:0 chroma downsamples format with 8 bits per channel. /// /// First comes entire image in Y, followed by interleaved lines ordered as U0, V0, U1, V1, etc. diff --git a/crates/store/re_types/definitions/rerun/datatypes/tensor_data.fbs b/crates/store/re_types/definitions/rerun/datatypes/tensor_data.fbs index f6bb305cef94..2f80053c895a 100644 --- a/crates/store/re_types/definitions/rerun/datatypes/tensor_data.fbs +++ b/crates/store/re_types/definitions/rerun/datatypes/tensor_data.fbs @@ -17,10 +17,8 @@ namespace rerun.datatypes; /// These dimensions are combined with an index to look up values from the `buffer` field, /// which stores a contiguous array of typed values. /// -/// Note that the buffer may be encoded in a compressed format such as `jpeg` or -/// in a format with downsampled chroma, such as NV12 or YUY2. -/// For file formats, the shape is used as a hint, for chroma downsampled format -/// the shape has to be the shape of the decoded image. +/// Note that the buffer may in a format with downsampled chroma, such as NV12 or YUY2. +/// For chroma downsampled formats the shape has to be the shape of the decoded image. table TensorData ( "attr.python.aliases": "npt.ArrayLike", "attr.python.array_aliases": "npt.ArrayLike", diff --git a/crates/store/re_types/src/archetypes/asset3d_ext.rs b/crates/store/re_types/src/archetypes/asset3d_ext.rs index 98c55c058ec7..ad11edb9ba5b 100644 --- a/crates/store/re_types/src/archetypes/asset3d_ext.rs +++ b/crates/store/re_types/src/archetypes/asset3d_ext.rs @@ -10,13 +10,13 @@ impl Asset3D { /// /// If no [`MediaType`] can be guessed at the moment, the Rerun Viewer will try to guess one /// from the data at render-time. If it can't, rendering will fail with an error. + /// + /// Returns an error if the file cannot be read. #[cfg(not(target_arch = "wasm32"))] #[inline] - pub fn from_file(filepath: impl AsRef) -> anyhow::Result { - use anyhow::Context as _; + pub fn from_file(filepath: impl AsRef) -> std::io::Result { let filepath = filepath.as_ref(); - let contents = std::fs::read(filepath) - .with_context(|| format!("could not read file contents: {filepath:?}"))?; + let contents = std::fs::read(filepath)?; Ok(Self::from_file_contents( contents, MediaType::guess_from_path(filepath), diff --git a/crates/store/re_types/src/archetypes/image_encoded_ext.rs b/crates/store/re_types/src/archetypes/image_encoded_ext.rs index 81e64abf3071..bde2eb3a5e5a 100644 --- a/crates/store/re_types/src/archetypes/image_encoded_ext.rs +++ b/crates/store/re_types/src/archetypes/image_encoded_ext.rs @@ -6,13 +6,13 @@ impl ImageEncoded { /// Creates a new image from the file contents at `path`. /// /// The [`MediaType`][crate::components::MediaType] will first be guessed from the file contents. + /// + /// Returns an error if the file cannot be read. #[cfg(not(target_arch = "wasm32"))] #[inline] - pub fn from_file(filepath: impl AsRef) -> anyhow::Result { - use anyhow::Context as _; + pub fn from_file(filepath: impl AsRef) -> std::io::Result { let filepath = filepath.as_ref(); - let contents = std::fs::read(filepath) - .with_context(|| format!("could not read file contents: {filepath:?}"))?; + let contents = std::fs::read(filepath)?; Ok(Self::from_file_contents(contents)) } diff --git a/crates/store/re_types/src/archetypes/image_ext.rs b/crates/store/re_types/src/archetypes/image_ext.rs index fd505ce36e5c..919891e0fb06 100644 --- a/crates/store/re_types/src/archetypes/image_ext.rs +++ b/crates/store/re_types/src/archetypes/image_ext.rs @@ -3,6 +3,8 @@ use crate::{ image::{find_non_empty_dim_indices, ImageConstructionError}, }; +use super::ImageEncoded; + use super::Image; impl Image { @@ -48,36 +50,24 @@ impl Image { /// Creates a new [`Image`] from a file. /// /// The image format will be inferred from the path (extension), or the contents if that fails. - #[cfg(feature = "image")] + #[deprecated = "Use ImageEncoded::from_file instead"] #[cfg(not(target_arch = "wasm32"))] #[inline] - pub fn from_file_path(filepath: impl AsRef) -> anyhow::Result { - let filepath = filepath.as_ref(); - Ok(Self::new(crate::datatypes::TensorData::from_image_file( - filepath, - )?)) + pub fn from_file_path(filepath: impl AsRef) -> std::io::Result { + ImageEncoded::from_file(filepath) } /// Creates a new [`Image`] from the contents of a file. /// /// If unspecified, the image format will be inferred from the contents. + #[deprecated = "Use ImageEncoded::from_file_contents instead"] #[cfg(feature = "image")] #[inline] pub fn from_file_contents( contents: Vec, - format: Option, - ) -> anyhow::Result { - let format = if let Some(format) = format { - format - } else { - image::guess_format(&contents)? - }; - - let tensor = crate::components::TensorData(crate::datatypes::TensorData::from_image_bytes( - contents, format, - )?); - - Ok(Self::new(tensor)) + _format: Option, + ) -> ImageEncoded { + ImageEncoded::from_file_contents(contents) } } diff --git a/crates/store/re_types/src/archetypes/tensor_ext.rs b/crates/store/re_types/src/archetypes/tensor_ext.rs index 5a9e0a1e8496..9be45f97550b 100644 --- a/crates/store/re_types/src/archetypes/tensor_ext.rs +++ b/crates/store/re_types/src/archetypes/tensor_ext.rs @@ -54,6 +54,50 @@ impl Tensor { } } +#[cfg(feature = "image")] +impl Tensor { + /// Construct a tensor from something that can be turned into a [`image::DynamicImage`]. + /// + /// Requires the `image` feature. + pub fn from_image( + image: impl Into, + ) -> Result { + TensorData::from_image(image).map(|data| Self { data: data.into() }) + } + + /// Construct a tensor from [`image::DynamicImage`]. + /// + /// Requires the `image` feature. + pub fn from_dynamic_image( + image: image::DynamicImage, + ) -> Result { + TensorData::from_dynamic_image(image).map(|data| Self { data: data.into() }) + } +} + +impl AsRef for Tensor { + #[inline(always)] + fn as_ref(&self) -> &TensorData { + &self.data + } +} + +impl std::ops::Deref for Tensor { + type Target = TensorData; + + #[inline(always)] + fn deref(&self) -> &TensorData { + &self.data + } +} + +impl std::borrow::Borrow for Tensor { + #[inline(always)] + fn borrow(&self) -> &TensorData { + &self.data + } +} + // ---------------------------------------------------------------------------- // Make it possible to create an ArrayView directly from a Tensor. diff --git a/crates/store/re_types/src/components/tensor_data.rs b/crates/store/re_types/src/components/tensor_data.rs index 9d3d2dff741b..3076398d2bf4 100644 --- a/crates/store/re_types/src/components/tensor_data.rs +++ b/crates/store/re_types/src/components/tensor_data.rs @@ -27,10 +27,8 @@ use ::re_types_core::{DeserializationError, DeserializationResult}; /// These dimensions are combined with an index to look up values from the `buffer` field, /// which stores a contiguous array of typed values. /// -/// Note that the buffer may be encoded in a compressed format such as `jpeg` or -/// in a format with downsampled chroma, such as NV12 or YUY2. -/// For file formats, the shape is used as a hint, for chroma downsampled format -/// the shape has to be the shape of the decoded image. +/// Note that the buffer may in a format with downsampled chroma, such as NV12 or YUY2. +/// For chroma downsampled formats the shape has to be the shape of the decoded image. #[derive(Clone, Debug, Default, PartialEq)] #[repr(transparent)] pub struct TensorData(pub crate::datatypes::TensorData); diff --git a/crates/store/re_types/src/datatypes/tensor_buffer.rs b/crates/store/re_types/src/datatypes/tensor_buffer.rs index 9c21017aca33..4ab1943a7c25 100644 --- a/crates/store/re_types/src/datatypes/tensor_buffer.rs +++ b/crates/store/re_types/src/datatypes/tensor_buffer.rs @@ -56,9 +56,6 @@ pub enum TensorBuffer { /// 64bit IEEE-754 floating point, also known as `double`. F64(::re_types_core::ArrowBuffer), - /// Raw bytes of a JPEG file. - Jpeg(::re_types_core::ArrowBuffer), - /// NV12 is a YUV 4:2:0 chroma downsamples format with 8 bits per channel. /// /// First comes entire image in Y, followed by interleaved lines ordered as U0, V0, U1, V1, etc. @@ -86,7 +83,6 @@ impl ::re_types_core::SizeBytes for TensorBuffer { Self::F16(v) => v.heap_size_bytes(), Self::F32(v) => v.heap_size_bytes(), Self::F64(v) => v.heap_size_bytes(), - Self::Jpeg(v) => v.heap_size_bytes(), Self::Nv12(v) => v.heap_size_bytes(), Self::Yuy2(v) => v.heap_size_bytes(), } @@ -107,7 +103,6 @@ impl ::re_types_core::SizeBytes for TensorBuffer { && <::re_types_core::ArrowBuffer>::is_pod() && <::re_types_core::ArrowBuffer>::is_pod() && <::re_types_core::ArrowBuffer>::is_pod() - && <::re_types_core::ArrowBuffer>::is_pod() } } @@ -227,15 +222,6 @@ impl ::re_types_core::Loggable for TensorBuffer { ))), false, ), - Field::new( - "JPEG", - DataType::List(std::sync::Arc::new(Field::new( - "item", - DataType::UInt8, - false, - ))), - false, - ), Field::new( "NV12", DataType::List(std::sync::Arc::new(Field::new( @@ -257,7 +243,7 @@ impl ::re_types_core::Loggable for TensorBuffer { ]), Some(std::sync::Arc::new(vec![ 0i32, 1i32, 2i32, 3i32, 4i32, 5i32, 6i32, 7i32, 8i32, 9i32, 10i32, 11i32, 12i32, - 13i32, 14i32, + 13i32, ])), UnionMode::Dense, ) @@ -296,9 +282,8 @@ impl ::re_types_core::Loggable for TensorBuffer { Some(Self::F16(_)) => 9i8, Some(Self::F32(_)) => 10i8, Some(Self::F64(_)) => 11i8, - Some(Self::Jpeg(_)) => 12i8, - Some(Self::Nv12(_)) => 13i8, - Some(Self::Yuy2(_)) => 14i8, + Some(Self::Nv12(_)) => 12i8, + Some(Self::Yuy2(_)) => 13i8, }) .collect(); let fields = vec![ @@ -711,46 +696,6 @@ impl ::re_types_core::Loggable for TensorBuffer { .boxed() } }, - { - let jpeg: Vec<_> = data - .iter() - .filter_map(|datum| match datum.as_deref() { - Some(Self::Jpeg(v)) => Some(v.clone()), - _ => None, - }) - .collect(); - let jpeg_bitmap: Option = None; - { - use arrow2::{buffer::Buffer, offset::OffsetsBuffer}; - let offsets = arrow2::offset::Offsets::::try_from_lengths( - jpeg.iter().map(|datum| datum.num_instances()), - )? - .into(); - let jpeg_inner_data: Buffer<_> = jpeg - .iter() - .map(|b| b.as_slice()) - .collect::>() - .concat() - .into(); - let jpeg_inner_bitmap: Option = None; - ListArray::try_new( - DataType::List(std::sync::Arc::new(Field::new( - "item", - DataType::UInt8, - false, - ))), - offsets, - PrimitiveArray::new( - DataType::UInt8, - jpeg_inner_data, - jpeg_inner_bitmap, - ) - .boxed(), - jpeg_bitmap, - )? - .boxed() - } - }, { let nv12: Vec<_> = data .iter() @@ -844,7 +789,6 @@ impl ::re_types_core::Loggable for TensorBuffer { let mut f16_offset = 0; let mut f32_offset = 0; let mut f64_offset = 0; - let mut jpeg_offset = 0; let mut nv12_offset = 0; let mut yuy2_offset = 0; let mut nulls_offset = 0; @@ -910,11 +854,6 @@ impl ::re_types_core::Loggable for TensorBuffer { f64_offset += 1; offset } - Some(Self::Jpeg(_)) => { - let offset = jpeg_offset; - jpeg_offset += 1; - offset - } Some(Self::Nv12(_)) => { let offset = nv12_offset; nv12_offset += 1; @@ -1719,79 +1658,11 @@ impl ::re_types_core::Loggable for TensorBuffer { } .collect::>() }; - let jpeg = { + let nv12 = { if 12usize >= arrow_data_arrays.len() { return Ok(Vec::new()); } let arrow_data = &*arrow_data_arrays[12usize]; - { - let arrow_data = arrow_data - .as_any() - .downcast_ref::>() - .ok_or_else(|| { - let expected = DataType::List(std::sync::Arc::new(Field::new( - "item", - DataType::UInt8, - false, - ))); - let actual = arrow_data.data_type().clone(); - DeserializationError::datatype_mismatch(expected, actual) - }) - .with_context("rerun.datatypes.TensorBuffer#JPEG")?; - if arrow_data.is_empty() { - Vec::new() - } else { - let arrow_data_inner = { - let arrow_data_inner = &**arrow_data.values(); - arrow_data_inner - .as_any() - .downcast_ref::() - .ok_or_else(|| { - let expected = DataType::UInt8; - let actual = arrow_data_inner.data_type().clone(); - DeserializationError::datatype_mismatch(expected, actual) - }) - .with_context("rerun.datatypes.TensorBuffer#JPEG")? - .values() - }; - let offsets = arrow_data.offsets(); - arrow2::bitmap::utils::ZipValidity::new_with_validity( - offsets.iter().zip(offsets.lengths()), - arrow_data.validity(), - ) - .map(|elem| { - elem.map(|(start, len)| { - let start = *start as usize; - let end = start + len; - if end > arrow_data_inner.len() { - return Err(DeserializationError::offset_slice_oob( - (start, end), - arrow_data_inner.len(), - )); - } - - #[allow(unsafe_code, clippy::undocumented_unsafe_blocks)] - let data = unsafe { - arrow_data_inner - .clone() - .sliced_unchecked(start, end - start) - }; - let data = ::re_types_core::ArrowBuffer::from(data); - Ok(data) - }) - .transpose() - }) - .collect::>>>()? - } - .into_iter() - } - .collect::>() - }; - let nv12 = { - if 13usize >= arrow_data_arrays.len() { - return Ok(Vec::new()); - } - let arrow_data = &*arrow_data_arrays[13usize]; { let arrow_data = arrow_data .as_any() @@ -1856,10 +1727,10 @@ impl ::re_types_core::Loggable for TensorBuffer { .collect::>() }; let yuy2 = { - if 14usize >= arrow_data_arrays.len() { + if 13usize >= arrow_data_arrays.len() { return Ok(Vec::new()); } - let arrow_data = &*arrow_data_arrays[14usize]; + let arrow_data = &*arrow_data_arrays[13usize]; { let arrow_data = arrow_data .as_any() @@ -2097,22 +1968,7 @@ impl ::re_types_core::Loggable for TensorBuffer { .ok_or_else(DeserializationError::missing_data) .with_context("rerun.datatypes.TensorBuffer#F64")? }), - 12i8 => Self::Jpeg({ - if offset as usize >= jpeg.len() { - return Err(DeserializationError::offset_oob( - offset as _, - jpeg.len(), - )) - .with_context("rerun.datatypes.TensorBuffer#JPEG"); - } - - #[allow(unsafe_code, clippy::undocumented_unsafe_blocks)] - unsafe { jpeg.get_unchecked(offset as usize) } - .clone() - .ok_or_else(DeserializationError::missing_data) - .with_context("rerun.datatypes.TensorBuffer#JPEG")? - }), - 13i8 => Self::Nv12({ + 12i8 => Self::Nv12({ if offset as usize >= nv12.len() { return Err(DeserializationError::offset_oob( offset as _, @@ -2127,7 +1983,7 @@ impl ::re_types_core::Loggable for TensorBuffer { .ok_or_else(DeserializationError::missing_data) .with_context("rerun.datatypes.TensorBuffer#NV12")? }), - 14i8 => Self::Yuy2({ + 13i8 => Self::Yuy2({ if offset as usize >= yuy2.len() { return Err(DeserializationError::offset_oob( offset as _, diff --git a/crates/store/re_types/src/datatypes/tensor_buffer_ext.rs b/crates/store/re_types/src/datatypes/tensor_buffer_ext.rs index e58293e9309d..2dd92f4daea4 100644 --- a/crates/store/re_types/src/datatypes/tensor_buffer_ext.rs +++ b/crates/store/re_types/src/datatypes/tensor_buffer_ext.rs @@ -18,7 +18,6 @@ impl TensorBuffer { Self::F16(_) => TensorDataType::F16, Self::F32(_) => TensorDataType::F32, Self::F64(_) => TensorDataType::F64, - Self::Jpeg(_) => TensorDataType::U8, Self::Nv12(_) => TensorDataType::U8, Self::Yuy2(_) => TensorDataType::U8, } @@ -39,7 +38,6 @@ impl TensorBuffer { Self::F16(buf) => buf.size_in_bytes(), Self::F32(buf) => buf.size_in_bytes(), Self::F64(buf) => buf.size_in_bytes(), - Self::Jpeg(buf) => buf.size_in_bytes(), Self::Nv12(buf) => buf.size_in_bytes(), Self::Yuy2(buf) => buf.size_in_bytes(), } @@ -66,7 +64,7 @@ impl TensorBuffer { | Self::F32(_) | Self::F64(_) => false, - Self::Jpeg(_) | Self::Nv12(_) | Self::Yuy2(_) => true, + Self::Nv12(_) | Self::Yuy2(_) => true, } } } @@ -85,7 +83,6 @@ impl std::fmt::Debug for TensorBuffer { Self::F16(_) => write!(f, "F16({} bytes)", self.size_in_bytes()), Self::F32(_) => write!(f, "F32({} bytes)", self.size_in_bytes()), Self::F64(_) => write!(f, "F64({} bytes)", self.size_in_bytes()), - Self::Jpeg(_) => write!(f, "JPEG({} bytes)", self.size_in_bytes()), Self::Nv12(_) => write!(f, "NV12({} bytes)", self.size_in_bytes()), Self::Yuy2(_) => write!(f, "YUY2({} bytes)", self.size_in_bytes()), } diff --git a/crates/store/re_types/src/datatypes/tensor_data.rs b/crates/store/re_types/src/datatypes/tensor_data.rs index 653ebda3beae..6a135819fcf7 100644 --- a/crates/store/re_types/src/datatypes/tensor_data.rs +++ b/crates/store/re_types/src/datatypes/tensor_data.rs @@ -27,10 +27,8 @@ use ::re_types_core::{DeserializationError, DeserializationResult}; /// These dimensions are combined with an index to look up values from the `buffer` field, /// which stores a contiguous array of typed values. /// -/// Note that the buffer may be encoded in a compressed format such as `jpeg` or -/// in a format with downsampled chroma, such as NV12 or YUY2. -/// For file formats, the shape is used as a hint, for chroma downsampled format -/// the shape has to be the shape of the decoded image. +/// Note that the buffer may in a format with downsampled chroma, such as NV12 or YUY2. +/// For chroma downsampled formats the shape has to be the shape of the decoded image. #[derive(Clone, Debug, PartialEq)] pub struct TensorData { /// The shape of the tensor, including optional names for each dimension. diff --git a/crates/store/re_types/src/datatypes/tensor_data_ext.rs b/crates/store/re_types/src/datatypes/tensor_data_ext.rs index 3502e1d555a6..bd764eff5a7c 100644 --- a/crates/store/re_types/src/datatypes/tensor_data_ext.rs +++ b/crates/store/re_types/src/datatypes/tensor_data_ext.rs @@ -1,7 +1,10 @@ use crate::tensor_data::{TensorCastError, TensorDataType, TensorElement}; #[cfg(feature = "image")] -use crate::tensor_data::{DecodedTensor, TensorImageLoadError, TensorImageSaveError}; +use crate::tensor_data::{TensorImageLoadError, TensorImageSaveError}; + +#[allow(unused_imports)] // Used for docstring links +use crate::archetypes::ImageEncoded; use super::{TensorBuffer, TensorData, TensorDimension}; @@ -80,6 +83,7 @@ impl TensorData { _ => None, } } + // In the case of YUY2, return the shape of the RGB image, not the tensor size. TensorBuffer::Yuy2(_) => { // YUY2 encodes a color image in 2 "channels" -> 1 luma (per pixel) + (1U + 1V) (per 2 pixels). @@ -88,8 +92,8 @@ impl TensorData { _ => None, } } - TensorBuffer::Jpeg(_) - | TensorBuffer::U8(_) + + TensorBuffer::U8(_) | TensorBuffer::U16(_) | TensorBuffer::U32(_) | TensorBuffer::U64(_) @@ -187,7 +191,7 @@ impl TensorData { /// Get the value of the element at the given index. /// - /// Return `None` if out-of-bounds, or if the tensor is encoded (e.g. [`TensorBuffer::Jpeg`]). + /// Return `None` if out-of-bounds. pub fn get(&self, index: &[u64]) -> Option { let mut stride: usize = 1; let mut offset: usize = 0; @@ -211,7 +215,6 @@ impl TensorData { TensorBuffer::F16(buf) => Some(TensorElement::F16(buf[offset])), TensorBuffer::F32(buf) => Some(TensorElement::F32(buf[offset])), TensorBuffer::F64(buf) => Some(TensorElement::F64(buf[offset])), - TensorBuffer::Jpeg(_) => None, // Too expensive to unpack here. TensorBuffer::Nv12(_) => { { // Returns the U32 packed RGBA value of the pixel at index [y, x] if it is valid. @@ -581,8 +584,9 @@ impl TryFrom<::ndarray::Array> for Tensor impl TensorData { /// Construct a tensor from the contents of an image file on disk. /// - /// JPEGs will be kept encoded, left to the viewer to decode on-the-fly. - /// Other images types will be decoded directly. + /// This will spend CPU cycles reading the file and decoding the image. + /// To save CPU time and storage, we recommend you instead use + /// [`ImageEncoded::from_file`]. /// /// Requires the `image` feature. #[cfg(not(target_arch = "wasm32"))] @@ -604,89 +608,29 @@ impl TensorData { image::guess_format(&img_bytes)? }; - Self::from_image_bytes(img_bytes, img_format) - } - - /// Construct a tensor from the contents of a JPEG file on disk. - /// - /// Requires the `image` feature. - #[cfg(not(target_arch = "wasm32"))] - #[inline] - pub fn from_jpeg_file(path: &std::path::Path) -> Result { - re_tracing::profile_function!(path.to_string_lossy()); - let jpeg_bytes = { - re_tracing::profile_scope!("fs::read"); - std::fs::read(path)? - }; - Self::from_jpeg_bytes(jpeg_bytes) - } - - /// Construct a new tensor from the contents of a `.jpeg` file at the given path. - #[deprecated = "Renamed 'from_jpeg_file'"] - #[cfg(not(target_arch = "wasm32"))] - #[inline] - pub fn tensor_from_jpeg_file( - image_path: impl AsRef, - ) -> Result { - Self::from_jpeg_file(image_path.as_ref()) + Self::from_image_bytes(&img_bytes, img_format) } /// Construct a tensor from the contents of an image file. /// - /// JPEGs will be kept encoded, left to the viewer to decode on-the-fly. - /// Other images types will be decoded directly. + /// This will spend CPU cycles reading the file and decoding the image. + /// To save CPU time and storage, we recommend you instead use + /// [`ImageEncoded::from_file_contents`]. /// /// Requires the `image` feature. #[inline] pub fn from_image_bytes( - bytes: Vec, + bytes: &[u8], format: image::ImageFormat, ) -> Result { re_tracing::profile_function!(format!("{format:?}")); - if format == image::ImageFormat::Jpeg { - Self::from_jpeg_bytes(bytes) - } else { - let image = image::load_from_memory_with_format(&bytes, format)?; - Self::from_image(image) - } - } - - /// Construct a tensor from the contents of a JPEG file, without decoding it now. - /// - /// Requires the `image` feature. - pub fn from_jpeg_bytes(jpeg_bytes: Vec) -> Result { - re_tracing::profile_function!(); - - // Parse JPEG header: - use image::ImageDecoder as _; - let jpeg = image::codecs::jpeg::JpegDecoder::new(std::io::Cursor::new(&jpeg_bytes))?; - let (w, h) = jpeg.dimensions(); - let depth = jpeg.color_type().channel_count(); - - Ok(Self { - shape: vec![ - TensorDimension::height(h as _), - TensorDimension::width(w as _), - TensorDimension::depth(depth as _), - ], - buffer: TensorBuffer::Jpeg(jpeg_bytes.into()), - }) - } - - /// Construct a new tensor from the contents of a `.jpeg` file. - #[deprecated = "Renamed 'from_jpeg_bytes'"] - #[cfg(not(target_arch = "wasm32"))] - #[inline] - pub fn tensor_from_jpeg_bytes(jpeg_bytes: Vec) -> Result { - Self::from_jpeg_bytes(jpeg_bytes) + let image = image::load_from_memory_with_format(bytes, format)?; + Self::from_image(image) } /// Construct a tensor from something that can be turned into a [`image::DynamicImage`]. /// /// Requires the `image` feature. - /// - /// This is a convenience function that calls [`DecodedTensor::from_image`]. - #[inline] pub fn from_image(image: impl Into) -> Result { Self::from_dynamic_image(image.into()) } @@ -694,11 +638,66 @@ impl TensorData { /// Construct a tensor from [`image::DynamicImage`]. /// /// Requires the `image` feature. - /// - /// This is a convenience function that calls [`DecodedTensor::from_dynamic_image`]. - #[inline] pub fn from_dynamic_image(image: image::DynamicImage) -> Result { - DecodedTensor::from_dynamic_image(image).map(DecodedTensor::into_inner) + re_tracing::profile_function!(); + + let (w, h) = (image.width(), image.height()); + + let (depth, buffer) = match image { + image::DynamicImage::ImageLuma8(image) => { + (1, TensorBuffer::U8(image.into_raw().into())) + } + image::DynamicImage::ImageRgb8(image) => (3, TensorBuffer::U8(image.into_raw().into())), + image::DynamicImage::ImageRgba8(image) => { + (4, TensorBuffer::U8(image.into_raw().into())) + } + image::DynamicImage::ImageLuma16(image) => { + (1, TensorBuffer::U16(image.into_raw().into())) + } + image::DynamicImage::ImageRgb16(image) => { + (3, TensorBuffer::U16(image.into_raw().into())) + } + image::DynamicImage::ImageRgba16(image) => { + (4, TensorBuffer::U16(image.into_raw().into())) + } + image::DynamicImage::ImageRgb32F(image) => { + (3, TensorBuffer::F32(image.into_raw().into())) + } + image::DynamicImage::ImageRgba32F(image) => { + (4, TensorBuffer::F32(image.into_raw().into())) + } + image::DynamicImage::ImageLumaA8(image) => { + re_log::warn!( + "Rerun doesn't have native support for 8-bit Luma + Alpha. The image will be convert to RGBA." + ); + return Self::from_image(image::DynamicImage::ImageLumaA8(image).to_rgba8()); + } + image::DynamicImage::ImageLumaA16(image) => { + re_log::warn!( + "Rerun doesn't have native support for 16-bit Luma + Alpha. The image will be convert to RGBA." + ); + return Self::from_image(image::DynamicImage::ImageLumaA16(image).to_rgba16()); + } + _ => { + // It is very annoying that DynamicImage is #[non_exhaustive] + return Err(TensorImageLoadError::UnsupportedImageColorType( + image.color(), + )); + } + }; + let shape = if depth == 1 { + vec![ + TensorDimension::height(h as _), + TensorDimension::width(w as _), + ] + } else { + vec![ + TensorDimension::height(h as _), + TensorDimension::width(w as _), + TensorDimension::depth(depth), + ] + }; + Ok(Self { shape, buffer }) } /// Predicts if [`Self::to_dynamic_image`] is likely to succeed, without doing anything expensive diff --git a/crates/store/re_types/src/tensor_data.rs b/crates/store/re_types/src/tensor_data.rs index 7bedef2bcb14..adced256520a 100644 --- a/crates/store/re_types/src/tensor_data.rs +++ b/crates/store/re_types/src/tensor_data.rs @@ -3,11 +3,12 @@ use half::f16; -use crate::datatypes::{TensorBuffer, TensorData}; - #[cfg(feature = "image")] use crate::datatypes::TensorDimension; +#[allow(unused_imports)] // Used for docstring links +use crate::datatypes::TensorData; + // Much of the following duplicates code from: `crates/re_components/src/tensor.rs`, which // will eventually go away as the Tensor migration is completed. @@ -36,9 +37,6 @@ pub enum TensorImageLoadError { #[error(transparent)] Image(std::sync::Arc), - #[error("Expected a HxW, HxWx1 or HxWx3 tensor, but got {0:?}")] - UnexpectedJpegShape(Vec), - #[error("Unsupported color type: {0:?}. We support 8-bit, 16-bit, and f32 images, and RGB, RGBA, Luminance, and Luminance-Alpha.")] UnsupportedImageColorType(image::ColorType), @@ -395,24 +393,6 @@ impl std::fmt::Display for TensorElement { // ---------------------------------------------------------------------------- -/// A thin wrapper around a [`TensorData`] that is guaranteed to not be compressed (never a jpeg). -/// -/// All clones are shallow, like for [`TensorData`]. -#[derive(Clone)] -pub struct DecodedTensor(TensorData); - -impl DecodedTensor { - #[inline(always)] - pub fn inner(&self) -> &TensorData { - &self.0 - } - - #[inline(always)] - pub fn into_inner(self) -> TensorData { - self.0 - } -} - // Backwards comparabillity shim // TODO(jleibs): fully express this in terms of indicator components #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] @@ -427,185 +407,3 @@ pub enum TensorDataMeaning { /// Image data interpreted as depth map. Depth, } - -impl TryFrom for DecodedTensor { - type Error = TensorData; - - fn try_from(tensor: TensorData) -> Result { - match &tensor.buffer { - TensorBuffer::U8(_) - | TensorBuffer::U16(_) - | TensorBuffer::U32(_) - | TensorBuffer::U64(_) - | TensorBuffer::I8(_) - | TensorBuffer::I16(_) - | TensorBuffer::I32(_) - | TensorBuffer::I64(_) - | TensorBuffer::F16(_) - | TensorBuffer::F32(_) - | TensorBuffer::F64(_) => Ok(Self(tensor)), - TensorBuffer::Jpeg(_) | TensorBuffer::Nv12(_) | TensorBuffer::Yuy2(_) => Err(tensor), - } - } -} - -#[cfg(feature = "image")] -impl DecodedTensor { - /// Construct a tensor from something that can be turned into a [`image::DynamicImage`]. - /// - /// Requires the `image` feature. - pub fn from_image(image: impl Into) -> Result { - Self::from_dynamic_image(image.into()) - } - - /// Construct a tensor from [`image::DynamicImage`]. - /// - /// Requires the `image` feature. - pub fn from_dynamic_image(image: image::DynamicImage) -> Result { - re_tracing::profile_function!(); - - let (w, h) = (image.width(), image.height()); - - let (depth, buffer) = match image { - image::DynamicImage::ImageLuma8(image) => { - (1, TensorBuffer::U8(image.into_raw().into())) - } - image::DynamicImage::ImageRgb8(image) => (3, TensorBuffer::U8(image.into_raw().into())), - image::DynamicImage::ImageRgba8(image) => { - (4, TensorBuffer::U8(image.into_raw().into())) - } - image::DynamicImage::ImageLuma16(image) => { - (1, TensorBuffer::U16(image.into_raw().into())) - } - image::DynamicImage::ImageRgb16(image) => { - (3, TensorBuffer::U16(image.into_raw().into())) - } - image::DynamicImage::ImageRgba16(image) => { - (4, TensorBuffer::U16(image.into_raw().into())) - } - image::DynamicImage::ImageRgb32F(image) => { - (3, TensorBuffer::F32(image.into_raw().into())) - } - image::DynamicImage::ImageRgba32F(image) => { - (4, TensorBuffer::F32(image.into_raw().into())) - } - image::DynamicImage::ImageLumaA8(image) => { - re_log::warn!( - "Rerun doesn't have native support for 8-bit Luma + Alpha. The image will be convert to RGBA." - ); - return Self::from_image(image::DynamicImage::ImageLumaA8(image).to_rgba8()); - } - image::DynamicImage::ImageLumaA16(image) => { - re_log::warn!( - "Rerun doesn't have native support for 16-bit Luma + Alpha. The image will be convert to RGBA." - ); - return Self::from_image(image::DynamicImage::ImageLumaA16(image).to_rgba16()); - } - _ => { - // It is very annoying that DynamicImage is #[non_exhaustive] - return Err(TensorImageLoadError::UnsupportedImageColorType( - image.color(), - )); - } - }; - let shape = if depth == 1 { - vec![ - TensorDimension::height(h as _), - TensorDimension::width(w as _), - ] - } else { - vec![ - TensorDimension::height(h as _), - TensorDimension::width(w as _), - TensorDimension::depth(depth), - ] - }; - let tensor = TensorData { shape, buffer }; - Ok(Self(tensor)) - } - - /// Try to decode this tensor, if it was encoded as a JPEG, - /// otherwise just return the tensor. - pub fn try_decode(maybe_encoded_tensor: TensorData) -> Result { - match &maybe_encoded_tensor.buffer { - TensorBuffer::U8(_) - | TensorBuffer::U16(_) - | TensorBuffer::U32(_) - | TensorBuffer::U64(_) - | TensorBuffer::I8(_) - | TensorBuffer::I16(_) - | TensorBuffer::I32(_) - | TensorBuffer::I64(_) - | TensorBuffer::F16(_) - | TensorBuffer::F32(_) - | TensorBuffer::F64(_) - | TensorBuffer::Nv12(_) - | TensorBuffer::Yuy2(_) => Ok(Self(maybe_encoded_tensor)), // Decoding happens on the GPU - - TensorBuffer::Jpeg(jpeg_bytes) => { - let [h, w, c] = maybe_encoded_tensor - .image_height_width_channels() - .ok_or_else(|| { - TensorImageLoadError::UnexpectedJpegShape( - maybe_encoded_tensor.shape().to_vec(), - ) - })?; - - Self::decode_jpeg_bytes(jpeg_bytes.as_slice(), [h, w, c]) - } - } - } - - /// Decode the contents of a JPEG file, with the given expected size. - /// - /// Returns an error if the size does not match. - pub fn decode_jpeg_bytes( - jpeg_bytes: &[u8], - [expected_height, expected_width, expected_channels]: [u64; 3], - ) -> Result { - re_tracing::profile_function!(format!("{expected_width}x{expected_height}")); - - use image::io::Reader as ImageReader; - let mut reader = ImageReader::new(std::io::Cursor::new(jpeg_bytes)); - reader.set_format(image::ImageFormat::Jpeg); - let img = { - re_tracing::profile_scope!("decode_jpeg"); - reader.decode()? - }; - - let (w, h) = (img.width() as u64, img.height() as u64); - let channels = img.color().channel_count() as u64; - - if (w, h, channels) != (expected_width, expected_height, expected_channels) { - return Err(TensorImageLoadError::InvalidMetaData { - expected: [expected_height, expected_width, expected_channels].into(), - found: [h, w, channels].into(), - }); - } - - Self::from_image(img) - } -} - -impl AsRef for DecodedTensor { - #[inline(always)] - fn as_ref(&self) -> &TensorData { - &self.0 - } -} - -impl std::ops::Deref for DecodedTensor { - type Target = TensorData; - - #[inline(always)] - fn deref(&self) -> &TensorData { - &self.0 - } -} - -impl std::borrow::Borrow for DecodedTensor { - #[inline(always)] - fn borrow(&self) -> &TensorData { - &self.0 - } -} diff --git a/crates/viewer/re_data_ui/src/image.rs b/crates/viewer/re_data_ui/src/image.rs index ec86548f7609..de66f6070bfe 100644 --- a/crates/viewer/re_data_ui/src/image.rs +++ b/crates/viewer/re_data_ui/src/image.rs @@ -6,11 +6,10 @@ use re_log_types::EntityPath; use re_renderer::renderer::ColormappedTexture; use re_types::components::{ClassId, Colormap, DepthMeter}; use re_types::datatypes::{TensorBuffer, TensorData, TensorDimension}; -use re_types::tensor_data::{DecodedTensor, TensorDataMeaning, TensorElement}; -use re_ui::{ContextExt as _, UiExt as _}; +use re_types::tensor_data::{TensorDataMeaning, TensorElement}; +use re_ui::UiExt as _; use re_viewer_context::{ - gpu_bridge, Annotations, TensorDecodeCache, TensorStats, TensorStatsCache, UiLayout, - ViewerContext, + gpu_bridge, Annotations, TensorStats, TensorStatsCache, UiLayout, ViewerContext, }; use crate::image_meaning::image_meaning_for_entity; @@ -66,29 +65,18 @@ impl EntityDataUi for re_types::components::TensorData { .latest_at_component::(entity_path, query) .map_or(RowId::ZERO, |tensor| tensor.index.1); - let decoded = ctx - .cache - .entry(|c: &mut TensorDecodeCache| c.entry(tensor_data_row_id, self.0.clone())); - match decoded { - Ok(decoded) => { - let annotations = crate::annotations(ctx, query, entity_path); - tensor_ui( - ctx, - query, - db, - ui, - ui_layout, - entity_path, - &annotations, - tensor_data_row_id, - &self.0, - &decoded, - ); - } - Err(err) => { - ui_layout.label(ui, ui.ctx().error_text(err.to_string())); - } - } + let annotations = crate::annotations(ctx, query, entity_path); + tensor_ui( + ctx, + query, + db, + ui, + ui_layout, + entity_path, + &annotations, + tensor_data_row_id, + &self.0, + ); } } @@ -102,8 +90,7 @@ pub fn tensor_ui( entity_path: &re_entity_db::EntityPath, annotations: &Annotations, tensor_data_row_id: RowId, - original_tensor: &TensorData, - tensor: &DecodedTensor, + tensor: &TensorData, ) { // See if we can convert the tensor to a GPU texture. // Even if not, we will show info about the tensor. @@ -192,11 +179,11 @@ pub fn tensor_ui( }; let text = format!( "{}, {}", - original_tensor.dtype(), + tensor.dtype(), format_tensor_shape_single_line(&shape) ); ui_layout.label(ui, text).on_hover_ui(|ui| { - tensor_summary_ui(ui, original_tensor, tensor, meaning, meter, &tensor_stats); + tensor_summary_ui(ui, tensor, meaning, meter, &tensor_stats); }); }); } @@ -204,7 +191,7 @@ pub fn tensor_ui( UiLayout::SelectionPanelFull | UiLayout::SelectionPanelLimitHeight | UiLayout::Tooltip => { ui.vertical(|ui| { ui.set_min_width(100.0); - tensor_summary_ui(ui, original_tensor, tensor, meaning, meter, &tensor_stats); + tensor_summary_ui(ui, tensor, meaning, meter, &tensor_stats); if let Some(texture) = &texture_result { let preview_size = ui @@ -243,10 +230,8 @@ pub fn tensor_ui( // TODO(emilk): support copying and saving images on web #[cfg(not(target_arch = "wasm32"))] - if original_tensor.buffer.is_compressed_image() - || tensor.could_be_dynamic_image() - { - copy_and_save_image_ui(ui, tensor, original_tensor); + if tensor.buffer.is_compressed_image() || tensor.could_be_dynamic_image() { + copy_and_save_image_ui(ui, tensor, tensor); } if let Some([_h, _w, channels]) = tensor.image_height_width_channels() { @@ -335,13 +320,12 @@ fn largest_size_that_fits_in(aspect_ratio: f32, max_size: Vec2) -> Vec2 { pub fn tensor_summary_ui_grid_contents( ui: &mut egui::Ui, - original_tensor: &TensorData, - tensor: &DecodedTensor, + tensor: &TensorData, meaning: TensorDataMeaning, meter: Option, tensor_stats: &TensorStats, ) { - let TensorData { shape, buffer: _ } = tensor.inner(); + let TensorData { shape, buffer: _ } = tensor; ui.grid_left_hand_label("Data type") .on_hover_text("Data type used for all individual elements within the tensor"); @@ -381,7 +365,7 @@ pub fn tensor_summary_ui_grid_contents( ui.end_row(); } - match &original_tensor.buffer { + match &tensor.buffer { TensorBuffer::U8(_) | TensorBuffer::U16(_) | TensorBuffer::U32(_) @@ -393,19 +377,13 @@ pub fn tensor_summary_ui_grid_contents( | TensorBuffer::F16(_) | TensorBuffer::F32(_) | TensorBuffer::F64(_) => {} - TensorBuffer::Jpeg(jpeg_bytes) => { - ui.grid_left_hand_label("Encoding"); - ui.label(format!( - "{} JPEG", - re_format::format_bytes(jpeg_bytes.size_in_bytes() as _), - )); - ui.end_row(); - } + TensorBuffer::Nv12(_) => { ui.grid_left_hand_label("Encoding"); ui.label("NV12"); ui.end_row(); } + TensorBuffer::Yuy2(_) => { ui.grid_left_hand_label("Encoding"); ui.label("YUY2"); @@ -444,8 +422,7 @@ pub fn tensor_summary_ui_grid_contents( pub fn tensor_summary_ui( ui: &mut egui::Ui, - original_tensor: &TensorData, - tensor: &DecodedTensor, + tensor: &TensorData, meaning: TensorDataMeaning, meter: Option, tensor_stats: &TensorStats, @@ -453,14 +430,7 @@ pub fn tensor_summary_ui( egui::Grid::new("tensor_summary_ui") .num_columns(2) .show(ui, |ui| { - tensor_summary_ui_grid_contents( - ui, - original_tensor, - tensor, - meaning, - meter, - tensor_stats, - ); + tensor_summary_ui_grid_contents(ui, tensor, meaning, meter, tensor_stats); }); } @@ -470,7 +440,7 @@ fn show_zoomed_image_region_tooltip( parent_ui: &egui::Ui, response: egui::Response, tensor_data_row_id: RowId, - tensor: &DecodedTensor, + tensor: &TensorData, tensor_stats: &TensorStats, annotations: &Annotations, meaning: TensorDataMeaning, @@ -563,7 +533,7 @@ pub fn show_zoomed_image_region( render_ctx: &re_renderer::RenderContext, ui: &mut egui::Ui, tensor_data_row_id: RowId, - tensor: &DecodedTensor, + tensor: &TensorData, tensor_stats: &TensorStats, annotations: &Annotations, meaning: TensorDataMeaning, @@ -595,7 +565,7 @@ fn try_show_zoomed_image_region( render_ctx: &re_renderer::RenderContext, ui: &mut egui::Ui, tensor_data_row_id: RowId, - tensor: &DecodedTensor, + tensor: &TensorData, tensor_stats: &TensorStats, annotations: &Annotations, meaning: TensorDataMeaning, @@ -894,7 +864,7 @@ fn copy_and_save_image_ui(ui: &mut egui::Ui, tensor: &TensorData, _encoded_tenso if ui.button("Save image…").clicked() { match tensor.to_dynamic_image() { Ok(dynamic_image) => { - save_image(_encoded_tensor, &dynamic_image); + save_image(&dynamic_image); } Err(err) => { re_log::error!("Failed to convert tensor to image: {err}"); @@ -905,46 +875,18 @@ fn copy_and_save_image_ui(ui: &mut egui::Ui, tensor: &TensorData, _encoded_tenso } #[cfg(not(target_arch = "wasm32"))] -fn save_image(tensor: &TensorData, dynamic_image: &image::DynamicImage) { - match &tensor.buffer { - TensorBuffer::Jpeg(bytes) => { - if let Some(path) = rfd::FileDialog::new() - .set_file_name("image.jpg") - .save_file() - { - match write_binary(&path, bytes.as_slice()) { - Ok(()) => { - re_log::info!("Image saved to {path:?}"); - } - Err(err) => { - re_log::error!( - "Failed saving image to {path:?}: {}", - re_error::format(&err) - ); - } - } +fn save_image(dynamic_image: &image::DynamicImage) { + if let Some(path) = rfd::FileDialog::new() + .set_file_name("image.png") + .save_file() + { + match dynamic_image.save(&path) { + Ok(()) => { + re_log::info!("Image saved to {path:?}"); } - } - _ => { - if let Some(path) = rfd::FileDialog::new() - .set_file_name("image.png") - .save_file() - { - match dynamic_image.save(&path) { - Ok(()) => { - re_log::info!("Image saved to {path:?}"); - } - Err(err) => { - re_log::error!("Failed saving image to {path:?}: {err}"); - } - } + Err(err) => { + re_log::error!("Failed saving image to {path:?}: {err}"); } } } } - -#[cfg(not(target_arch = "wasm32"))] -fn write_binary(path: &std::path::PathBuf, data: &[u8]) -> anyhow::Result<()> { - use std::io::Write as _; - Ok(std::fs::File::create(path)?.write_all(data)?) -} diff --git a/crates/viewer/re_space_view_bar_chart/src/space_view_class.rs b/crates/viewer/re_space_view_bar_chart/src/space_view_class.rs index 6428b4432031..b94afc93f8a7 100644 --- a/crates/viewer/re_space_view_bar_chart/src/space_view_class.rs +++ b/crates/viewer/re_space_view_bar_chart/src/space_view_class.rs @@ -228,13 +228,6 @@ Display a 1D tensor as a bar chart. TensorBuffer::F64(data) => { create_bar_chart(ent_path, data.iter().copied(), color) } - TensorBuffer::Jpeg(_) => { - re_log::warn_once!( - "trying to display JPEG data as a bar chart ({:?})", - ent_path - ); - continue; - } TensorBuffer::Nv12(_) => { re_log::warn_once!( "trying to display NV12 data as a bar chart ({:?})", diff --git a/crates/viewer/re_space_view_spatial/src/pickable_image.rs b/crates/viewer/re_space_view_spatial/src/pickable_image.rs index 19eb66bbcc19..b31cee9badfd 100644 --- a/crates/viewer/re_space_view_spatial/src/pickable_image.rs +++ b/crates/viewer/re_space_view_spatial/src/pickable_image.rs @@ -1,7 +1,7 @@ use re_chunk_store::RowId; use re_log_types::EntityPath; use re_renderer::renderer::TexturedRect; -use re_types::tensor_data::DecodedTensor; +use re_types::datatypes::TensorData; /// Image rectangle that can be picked in the view. pub struct PickableImageRect { @@ -14,5 +14,5 @@ pub struct PickableImageRect { /// Textured rectangle used by the renderer. pub textured_rect: TexturedRect, - pub tensor: DecodedTensor, + pub tensor: TensorData, } diff --git a/crates/viewer/re_space_view_spatial/src/ui.rs b/crates/viewer/re_space_view_spatial/src/ui.rs index ce6171930449..2b0f4634ccf7 100644 --- a/crates/viewer/re_space_view_spatial/src/ui.rs +++ b/crates/viewer/re_space_view_spatial/src/ui.rs @@ -23,8 +23,8 @@ use re_ui::{ }; use re_viewer_context::{ HoverHighlight, Item, ItemSpaceContext, SelectionHighlight, SpaceViewHighlights, - SpaceViewState, SpaceViewSystemExecutionError, TensorDecodeCache, TensorStatsCache, UiLayout, - ViewContext, ViewContextCollection, ViewQuery, ViewerContext, VisualizerCollection, + SpaceViewState, SpaceViewSystemExecutionError, TensorStatsCache, UiLayout, ViewContext, + ViewContextCollection, ViewQuery, ViewerContext, VisualizerCollection, }; use re_viewport_blueprint::SpaceViewBlueprint; @@ -459,7 +459,7 @@ pub fn picking( PickedImageInfo { row_id: picked.row_id, - tensor: picked.tensor.inner().clone(), + tensor: picked.tensor.clone(), meaning: TensorDataMeaning::Unknown, // "Unknown" means color coordinates, colormap: Default::default(), @@ -695,34 +695,24 @@ fn image_hover_ui( let tensor_name = instance_path.to_string(); - let decoded_tensor = ctx + let annotations = annotations.0.find(&instance_path.entity_path); + let tensor_stats = ctx .cache - .entry(|c: &mut TensorDecodeCache| c.entry(row_id, tensor)); - match decoded_tensor { - Ok(decoded_tensor) => { - let annotations = annotations.0.find(&instance_path.entity_path); - let tensor_stats = ctx - .cache - .entry(|c: &mut TensorStatsCache| c.entry(row_id, &decoded_tensor)); - if let Some(render_ctx) = ctx.render_ctx { - show_zoomed_image_region( - render_ctx, - ui, - row_id, - &decoded_tensor, - &tensor_stats, - &annotations, - meaning, - depth_meter, - &tensor_name, - [coordinates[0] as _, coordinates[1] as _], - Some(colormap), - ); - } - } - Err(err) => re_log::warn_once!( - "Encountered problem decoding tensor at path {tensor_name}: {err}" - ), + .entry(|c: &mut TensorStatsCache| c.entry(row_id, &tensor)); + if let Some(render_ctx) = ctx.render_ctx { + show_zoomed_image_region( + render_ctx, + ui, + row_id, + &tensor, + &tensor_stats, + &annotations, + meaning, + depth_meter, + &tensor_name, + [coordinates[0] as _, coordinates[1] as _], + Some(colormap), + ); } }); } diff --git a/crates/viewer/re_space_view_spatial/src/visualizers/depth_images.rs b/crates/viewer/re_space_view_spatial/src/visualizers/depth_images.rs index 423548ce451f..09699a31d52a 100644 --- a/crates/viewer/re_space_view_spatial/src/visualizers/depth_images.rs +++ b/crates/viewer/re_space_view_spatial/src/visualizers/depth_images.rs @@ -9,12 +9,13 @@ use re_renderer::renderer::{DepthCloud, DepthClouds}; use re_space_view::diff_component_filter; use re_types::{ archetypes::DepthImage, - components::{Colormap, DepthMeter, DrawOrder, FillRatio, TensorData, ViewCoordinates}, - tensor_data::{DecodedTensor, TensorDataMeaning}, + components::{self, Colormap, DepthMeter, DrawOrder, FillRatio, ViewCoordinates}, + datatypes, + tensor_data::TensorDataMeaning, }; use re_viewer_context::{ gpu_bridge::colormap_to_re_renderer, ApplicableEntities, IdentifiedViewSystem, QueryContext, - SpaceViewClass, SpaceViewSystemExecutionError, TensorDecodeCache, TensorStatsCache, + SpaceViewClass, SpaceViewSystemExecutionError, TensorStatsCache, TypedComponentFallbackProvider, ViewContext, ViewContextCollection, ViewQuery, VisualizableEntities, VisualizableFilterContext, VisualizerAdditionalApplicabilityFilter, VisualizerQueryInfo, VisualizerSystem, @@ -49,7 +50,7 @@ impl Default for DepthImageVisualizer { struct DepthImageComponentData<'a> { index: (TimeInt, RowId), - tensor: &'a TensorData, + tensor: &'a datatypes::TensorData, colormap: Option<&'a Colormap>, depth_meter: Option<&'a DepthMeter>, fill_ratio: Option<&'a FillRatio>, @@ -76,17 +77,7 @@ impl DepthImageVisualizer { } let tensor_data_row_id = data.index.1; - let tensor = match ctx.viewer_ctx.cache.entry(|c: &mut TensorDecodeCache| { - c.entry(tensor_data_row_id, data.tensor.0.clone()) - }) { - Ok(tensor) => tensor, - Err(err) => { - re_log::warn_once!( - "Encountered problem decoding tensor at path {entity_path}: {err}" - ); - continue; - } - }; + let tensor = data.tensor.clone(); let colormap = data .colormap @@ -171,7 +162,7 @@ impl DepthImageVisualizer { transforms: &TransformContext, ent_context: &SpatialSceneEntityContext<'_>, tensor_data_row_id: RowId, - tensor: &DecodedTensor, + tensor: &datatypes::TensorData, ent_path: &EntityPath, parent_pinhole_path: &EntityPath, colormap: Colormap, @@ -308,7 +299,9 @@ impl VisualizerSystem for DepthImageVisualizer { let resolver = ctx.recording().resolver(); - let tensors = match results.get_required_component_dense::(resolver) { + let tensors = match results + .get_required_component_dense::(resolver) + { Some(tensors) => tensors?, _ => return Ok(()), }; @@ -406,7 +399,7 @@ impl TypedComponentFallbackProvider for DepthImageVisualizer { fn fallback_for(&self, ctx: &re_viewer_context::QueryContext<'_>) -> DepthMeter { let is_integer_tensor = ctx .recording() - .latest_at_component::(ctx.target_entity_path, ctx.query) + .latest_at_component::(ctx.target_entity_path, ctx.query) .map_or(false, |tensor| tensor.dtype().is_integer()); if is_integer_tensor { 1000.0 } else { 1.0 }.into() diff --git a/crates/viewer/re_space_view_spatial/src/visualizers/image_encoded.rs b/crates/viewer/re_space_view_spatial/src/visualizers/image_encoded.rs index 535c0cb9d7f2..bdecd42f049a 100644 --- a/crates/viewer/re_space_view_spatial/src/visualizers/image_encoded.rs +++ b/crates/viewer/re_space_view_spatial/src/visualizers/image_encoded.rs @@ -205,7 +205,7 @@ impl ImageEncodedVisualizer { ent_path: entity_path.clone(), row_id: tensor_data_row_id, textured_rect, - tensor, + tensor: tensor.data.0, }); } } diff --git a/crates/viewer/re_space_view_spatial/src/visualizers/images.rs b/crates/viewer/re_space_view_spatial/src/visualizers/images.rs index 8df370b8d209..275ac89faddd 100644 --- a/crates/viewer/re_space_view_spatial/src/visualizers/images.rs +++ b/crates/viewer/re_space_view_spatial/src/visualizers/images.rs @@ -11,7 +11,7 @@ use re_types::{ }; use re_viewer_context::{ ApplicableEntities, IdentifiedViewSystem, QueryContext, SpaceViewClass, - SpaceViewSystemExecutionError, TensorDecodeCache, TypedComponentFallbackProvider, ViewContext, + SpaceViewSystemExecutionError, TypedComponentFallbackProvider, ViewContext, ViewContextCollection, ViewQuery, VisualizableEntities, VisualizableFilterContext, VisualizerAdditionalApplicabilityFilter, VisualizerQueryInfo, VisualizerSystem, }; @@ -184,17 +184,7 @@ impl ImageVisualizer { } let tensor_data_row_id = data.index.1; - let tensor = match ctx.viewer_ctx.cache.entry(|c: &mut TensorDecodeCache| { - c.entry(tensor_data_row_id, data.tensor.0.clone()) - }) { - Ok(tensor) => tensor, - Err(err) => { - re_log::warn_once!( - "Encountered problem decoding tensor at path {entity_path}: {err}" - ); - continue; - } - }; + let tensor = data.tensor.0.clone(); // TODO(andreas): We only support colormap for depth image at this point. let colormap = None; diff --git a/crates/viewer/re_space_view_spatial/src/visualizers/segmentation_images.rs b/crates/viewer/re_space_view_spatial/src/visualizers/segmentation_images.rs index 2dc0031a202e..0b0710b4fc4f 100644 --- a/crates/viewer/re_space_view_spatial/src/visualizers/segmentation_images.rs +++ b/crates/viewer/re_space_view_spatial/src/visualizers/segmentation_images.rs @@ -11,7 +11,7 @@ use re_types::{ }; use re_viewer_context::{ ApplicableEntities, IdentifiedViewSystem, QueryContext, SpaceViewClass, - SpaceViewSystemExecutionError, TensorDecodeCache, TypedComponentFallbackProvider, ViewContext, + SpaceViewSystemExecutionError, TypedComponentFallbackProvider, ViewContext, ViewContextCollection, ViewQuery, VisualizableEntities, VisualizableFilterContext, VisualizerAdditionalApplicabilityFilter, VisualizerQueryInfo, VisualizerSystem, }; @@ -124,17 +124,7 @@ impl VisualizerSystem for SegmentationImageVisualizer { } let tensor_data_row_id = data.index.1; - let tensor = match ctx.viewer_ctx.cache.entry(|c: &mut TensorDecodeCache| { - c.entry(tensor_data_row_id, data.tensor.0.clone()) - }) { - Ok(tensor) => tensor, - Err(err) => { - re_log::warn_once!( - "Encountered problem decoding tensor at path {entity_path}: {err}" - ); - continue; - } - }; + let tensor = data.tensor.0.clone(); // TODO(andreas): colormap is only available for depth images right now. let colormap = None; diff --git a/crates/viewer/re_space_view_spatial/src/visualizers/utilities/textured_rect.rs b/crates/viewer/re_space_view_spatial/src/visualizers/utilities/textured_rect.rs index e46e70eb097a..2e8989d8bfe6 100644 --- a/crates/viewer/re_space_view_spatial/src/visualizers/utilities/textured_rect.rs +++ b/crates/viewer/re_space_view_spatial/src/visualizers/utilities/textured_rect.rs @@ -1,10 +1,7 @@ use re_chunk_store::RowId; use re_log_types::EntityPath; use re_renderer::renderer; -use re_types::{ - components::Colormap, - tensor_data::{DecodedTensor, TensorDataMeaning}, -}; +use re_types::{components::Colormap, datatypes::TensorData, tensor_data::TensorDataMeaning}; use re_viewer_context::{gpu_bridge, TensorStatsCache, ViewerContext}; use crate::contexts::SpatialSceneEntityContext; @@ -15,7 +12,7 @@ pub fn tensor_to_textured_rect( ent_path: &EntityPath, ent_context: &SpatialSceneEntityContext<'_>, tensor_data_row_id: RowId, - tensor: &DecodedTensor, + tensor: &TensorData, meaning: TensorDataMeaning, multiplicative_tint: egui::Rgba, colormap: Option, diff --git a/crates/viewer/re_space_view_tensor/src/space_view_class.rs b/crates/viewer/re_space_view_tensor/src/space_view_class.rs index 5a224c21f1de..814296ca911d 100644 --- a/crates/viewer/re_space_view_tensor/src/space_view_class.rs +++ b/crates/viewer/re_space_view_tensor/src/space_view_class.rs @@ -11,8 +11,8 @@ use re_types::{ components::ViewFit, }, components::{Colormap, GammaCorrection, MagnificationFilter, TensorDimensionIndexSelection}, - datatypes::TensorDimension, - tensor_data::{DecodedTensor, TensorDataMeaning}, + datatypes::{TensorData, TensorDimension}, + tensor_data::TensorDataMeaning, SpaceViewClassIdentifier, View, }; use re_ui::{list_item, ContextExt as _, UiExt as _}; @@ -38,7 +38,7 @@ type ViewType = re_types::blueprint::views::TensorView; pub struct ViewTensorState { /// Last viewed tensor, copied each frame. /// Used for the selection view. - tensor: Option<(RowId, DecodedTensor)>, + tensor: Option<(RowId, TensorData)>, } impl SpaceViewState for ViewTensorState { @@ -134,7 +134,7 @@ Note: select the space view to configure which dimensions are shown." // We are in a bare Tensor view -- meaning / meter is unknown. let meaning = TensorDataMeaning::Unknown; let meter = None; - tensor_summary_ui_grid_contents(ui, tensor, tensor, meaning, meter, &tensor_stats); + tensor_summary_ui_grid_contents(ui, tensor, meaning, meter, &tensor_stats); } }); @@ -218,7 +218,7 @@ Note: select the space view to configure which dimensions are shown." )); }); } else if let Some((tensor_data_row_id, tensor)) = tensors.first() { - state.tensor = Some((*tensor_data_row_id, tensor.clone())); + state.tensor = Some((*tensor_data_row_id, tensor.0.clone())); self.view_tensor(ctx, ui, state, query.space_view_id, tensor)?; } else { ui.centered_and_justified(|ui| ui.label("(empty)")); @@ -235,7 +235,7 @@ impl TensorSpaceView { ui: &mut egui::Ui, state: &ViewTensorState, view_id: SpaceViewId, - tensor: &DecodedTensor, + tensor: &TensorData, ) -> Result<(), SpaceViewSystemExecutionError> { re_tracing::profile_function!(); diff --git a/crates/viewer/re_space_view_tensor/src/tensor_slice_to_gpu.rs b/crates/viewer/re_space_view_tensor/src/tensor_slice_to_gpu.rs index b8be398f9773..a9a6373f6349 100644 --- a/crates/viewer/re_space_view_tensor/src/tensor_slice_to_gpu.rs +++ b/crates/viewer/re_space_view_tensor/src/tensor_slice_to_gpu.rs @@ -6,8 +6,8 @@ use re_renderer::{ use re_types::{ blueprint::archetypes::TensorSliceSelection, components::{Colormap, GammaCorrection}, - datatypes::TensorBuffer, - tensor_data::{DecodedTensor, TensorCastError, TensorDataType}, + datatypes::{TensorBuffer, TensorData}, + tensor_data::{TensorCastError, TensorDataType}, }; use re_viewer_context::{ gpu_bridge::{self, colormap_to_re_renderer, tensor_data_range_heuristic, RangeError}, @@ -31,7 +31,7 @@ pub enum TensorUploadError { pub fn colormapped_texture( render_ctx: &re_renderer::RenderContext, tensor_data_row_id: RowId, - tensor: &DecodedTensor, + tensor: &TensorData, tensor_stats: &TensorStats, slice_selection: &TensorSliceSelection, colormap: Colormap, @@ -64,7 +64,7 @@ pub fn colormapped_texture( fn upload_texture_slice_to_gpu( render_ctx: &re_renderer::RenderContext, tensor_data_row_id: RowId, - tensor: &DecodedTensor, + tensor: &TensorData, slice_selection: &TensorSliceSelection, ) -> Result> { let id = egui::util::hash((tensor_data_row_id, slice_selection)); @@ -75,14 +75,12 @@ fn upload_texture_slice_to_gpu( } fn texture_desc_from_tensor( - tensor: &DecodedTensor, + tensor: &TensorData, slice_selection: &TensorSliceSelection, ) -> Result, TensorUploadError> { use wgpu::TextureFormat; re_tracing::profile_function!(); - let tensor = tensor.inner(); - match tensor.dtype() { TensorDataType::U8 => { let tensor = ndarray::ArrayViewD::::try_from(tensor)?; diff --git a/crates/viewer/re_space_view_tensor/src/visualizer_system.rs b/crates/viewer/re_space_view_tensor/src/visualizer_system.rs index be4d5acd156a..a7112d13cd1d 100644 --- a/crates/viewer/re_space_view_tensor/src/visualizer_system.rs +++ b/crates/viewer/re_space_view_tensor/src/visualizer_system.rs @@ -1,14 +1,13 @@ use re_chunk_store::{LatestAtQuery, RowId}; -use re_entity_db::{external::re_query::LatestAtMonoResult, EntityPath}; -use re_types::{archetypes::Tensor, components::TensorData, tensor_data::DecodedTensor}; +use re_types::{archetypes::Tensor, components::TensorData}; use re_viewer_context::{ - IdentifiedViewSystem, SpaceViewSystemExecutionError, TensorDecodeCache, ViewContext, - ViewContextCollection, ViewQuery, ViewerContext, VisualizerQueryInfo, VisualizerSystem, + IdentifiedViewSystem, SpaceViewSystemExecutionError, ViewContext, ViewContextCollection, + ViewQuery, VisualizerQueryInfo, VisualizerSystem, }; #[derive(Default)] pub struct TensorSystem { - pub tensors: Vec<(RowId, DecodedTensor)>, + pub tensors: Vec<(RowId, TensorData)>, } impl IdentifiedViewSystem for TensorSystem { @@ -38,7 +37,7 @@ impl VisualizerSystem for TensorSystem { .recording() .latest_at_component::(&data_result.entity_path, &timeline_query) { - self.load_tensor_entity(ctx.viewer_ctx, &data_result.entity_path, tensor); + self.tensors.push((tensor.row_id(), tensor.value)); } } @@ -55,25 +54,3 @@ impl VisualizerSystem for TensorSystem { } re_viewer_context::impl_component_fallback_provider!(TensorSystem => []); - -impl TensorSystem { - fn load_tensor_entity( - &mut self, - ctx: &ViewerContext<'_>, - ent_path: &EntityPath, - tensor: LatestAtMonoResult, - ) { - let row_id = tensor.row_id(); - match ctx - .cache - .entry(|c: &mut TensorDecodeCache| c.entry(row_id, tensor.value.0)) - { - Ok(decoded_tensor) => { - self.tensors.push((row_id, decoded_tensor)); - } - Err(err) => { - re_log::warn_once!("Failed to decode decoding tensor at path {ent_path}: {err}"); - } - } - } -} diff --git a/crates/viewer/re_viewer/src/reflection/mod.rs b/crates/viewer/re_viewer/src/reflection/mod.rs index 234bc7889215..c5c287d71e93 100644 --- a/crates/viewer/re_viewer/src/reflection/mod.rs +++ b/crates/viewer/re_viewer/src/reflection/mod.rs @@ -471,7 +471,7 @@ fn generate_component_reflection() -> Result::name(), ComponentReflection { - docstring_md: "An N-dimensional array of numbers.\n\nThe number of dimensions and their respective lengths is specified by the `shape` field.\nThe dimensions are ordered from outermost to innermost. For example, in the common case of\na 2D RGB Image, the shape would be `[height, width, channel]`.\n\nThese dimensions are combined with an index to look up values from the `buffer` field,\nwhich stores a contiguous array of typed values.\n\nNote that the buffer may be encoded in a compressed format such as `jpeg` or\nin a format with downsampled chroma, such as NV12 or YUY2.\nFor file formats, the shape is used as a hint, for chroma downsampled format\nthe shape has to be the shape of the decoded image.", + docstring_md: "An N-dimensional array of numbers.\n\nThe number of dimensions and their respective lengths is specified by the `shape` field.\nThe dimensions are ordered from outermost to innermost. For example, in the common case of\na 2D RGB Image, the shape would be `[height, width, channel]`.\n\nThese dimensions are combined with an index to look up values from the `buffer` field,\nwhich stores a contiguous array of typed values.\n\nNote that the buffer may in a format with downsampled chroma, such as NV12 or YUY2.\nFor chroma downsampled formats the shape has to be the shape of the decoded image.", placeholder: Some(TensorData::default().to_arrow()?), }, ), diff --git a/crates/viewer/re_viewer_context/src/gpu_bridge/tensor_to_gpu.rs b/crates/viewer/re_viewer_context/src/gpu_bridge/tensor_to_gpu.rs index b47ea5c0910c..16c502e14aea 100644 --- a/crates/viewer/re_viewer_context/src/gpu_bridge/tensor_to_gpu.rs +++ b/crates/viewer/re_viewer_context/src/gpu_bridge/tensor_to_gpu.rs @@ -14,12 +14,12 @@ use re_renderer::{ resource_managers::Texture2DCreationDesc, RenderContext, }; +use re_types::components::Colormap; use re_types::{ components::ClassId, datatypes::{TensorBuffer, TensorData}, tensor_data::TensorDataMeaning, }; -use re_types::{components::Colormap, tensor_data::DecodedTensor}; use crate::{gpu_bridge::colormap::colormap_to_re_renderer, Annotations, TensorStats}; @@ -52,7 +52,7 @@ pub fn tensor_to_gpu( render_ctx: &RenderContext, debug_name: &str, tensor_data_row_id: RowId, - tensor: &DecodedTensor, + tensor: &TensorData, meaning: TensorDataMeaning, tensor_stats: &TensorStats, annotations: &Annotations, @@ -98,7 +98,7 @@ fn color_tensor_to_gpu( render_ctx: &RenderContext, debug_name: &str, texture_key: u64, - tensor: &DecodedTensor, + tensor: &TensorData, tensor_stats: &TensorStats, ) -> anyhow::Result { let [height, width, depth] = texture_height_width_channels(tensor)?; @@ -217,7 +217,7 @@ fn class_id_tensor_to_gpu( render_ctx: &RenderContext, debug_name: &str, texture_key: u64, - tensor: &DecodedTensor, + tensor: &TensorData, tensor_stats: &TensorStats, annotations: &Annotations, ) -> anyhow::Result { @@ -294,7 +294,7 @@ fn depth_tensor_to_gpu( render_ctx: &RenderContext, debug_name: &str, texture_key: u64, - tensor: &DecodedTensor, + tensor: &TensorData, tensor_stats: &TensorStats, colormap: Option, ) -> anyhow::Result { @@ -327,17 +327,17 @@ fn depth_tensor_to_gpu( } fn depth_tensor_range( - tensor: &DecodedTensor, + tensor: &TensorData, tensor_stats: &TensorStats, ) -> anyhow::Result<(f64, f64)> { let range = tensor_stats.range.ok_or(anyhow::anyhow!( - "Tensor has no range!? Was this compressed?" + "TensorData has no range!? Was this compressed?" ))?; let (mut min, mut max) = range; anyhow::ensure!( min.is_finite() && max.is_finite(), - "Tensor has non-finite values" + "TensorData has non-finite values" ); min = min.min(0.0); // Depth usually start at zero. @@ -361,7 +361,7 @@ fn depth_tensor_range( /// Uses no `Unorm/Snorm` formats. fn general_texture_creation_desc_from_tensor<'a>( debug_name: &str, - tensor: &'a DecodedTensor, + tensor: &'a TensorData, ) -> anyhow::Result> { let [height, width, depth] = texture_height_width_channels(tensor)?; @@ -382,10 +382,6 @@ fn general_texture_creation_desc_from_tensor<'a>( TensorBuffer::F32(buf) => (cast_slice_to_cow(buf), TextureFormat::R32Float), TensorBuffer::F64(buf) => (narrow_f64_to_f32s(buf), TextureFormat::R32Float), // narrowing to f32! - TensorBuffer::Jpeg(_) => { - unreachable!("DecodedTensor cannot contain a JPEG") - } - TensorBuffer::Nv12(_) => { unreachable!("An NV12 tensor can only contain a 3 channel image.") } @@ -411,9 +407,6 @@ fn general_texture_creation_desc_from_tensor<'a>( TensorBuffer::F32(buf) => (cast_slice_to_cow(buf), TextureFormat::Rg32Float), TensorBuffer::F64(buf) => (narrow_f64_to_f32s(buf), TextureFormat::Rg32Float), // narrowing to f32! - TensorBuffer::Jpeg(_) => { - unreachable!("DecodedTensor cannot contain a JPEG") - } TensorBuffer::Nv12(_) => { unreachable!("An NV12 tensor can only contain a 3 channel image.") } @@ -460,9 +453,6 @@ fn general_texture_creation_desc_from_tensor<'a>( TextureFormat::Rgba32Float, ), - TensorBuffer::Jpeg(_) => { - unreachable!("DecodedTensor cannot contain a JPEG") - } TensorBuffer::Nv12(buf) | TensorBuffer::Yuy2(buf) => { (cast_slice_to_cow(buf.as_slice()), TextureFormat::R8Unorm) } @@ -485,9 +475,6 @@ fn general_texture_creation_desc_from_tensor<'a>( TensorBuffer::F32(buf) => (cast_slice_to_cow(buf), TextureFormat::Rgba32Float), TensorBuffer::F64(buf) => (narrow_f64_to_f32s(buf), TextureFormat::Rgba32Float), // narrowing to f32! - TensorBuffer::Jpeg(_) => { - unreachable!("DecodedTensor cannot contain a JPEG") - } TensorBuffer::Nv12(_) => { unreachable!("An NV12 tensor can only contain a 3 channel image.") } @@ -572,7 +559,7 @@ pub fn texture_height_width_channels(tensor: &TensorData) -> anyhow::Result<[u32 use anyhow::Context as _; let Some([mut height, mut width, channel]) = tensor.image_height_width_channels() else { - anyhow::bail!("Tensor with shape {:?} is not an image", tensor.shape); + anyhow::bail!("TensorData with shape {:?} is not an image", tensor.shape); }; height = match tensor.buffer { // Correct the texture height for NV12, tensor.image_height_width_channels returns the RGB size for NV12 images. diff --git a/crates/viewer/re_viewer_context/src/lib.rs b/crates/viewer/re_viewer_context/src/lib.rs index 7abf12e4c2f4..4cf286f29db6 100644 --- a/crates/viewer/re_viewer_context/src/lib.rs +++ b/crates/viewer/re_viewer_context/src/lib.rs @@ -73,7 +73,7 @@ pub use space_view::{ }; pub use store_context::StoreContext; pub use store_hub::StoreHub; -pub use tensor::{ImageDecodeCache, TensorDecodeCache, TensorStats, TensorStatsCache}; +pub use tensor::{ImageDecodeCache, TensorStats, TensorStatsCache}; pub use time_control::{Looping, PlayState, TimeControl, TimeView}; pub use typed_entity_collections::{ ApplicableEntities, IndicatedEntities, PerVisualizer, VisualizableEntities, diff --git a/crates/viewer/re_viewer_context/src/tensor/image_decode_cache.rs b/crates/viewer/re_viewer_context/src/tensor/image_decode_cache.rs index 12063c1d9b5c..cb49ef4f7ccc 100644 --- a/crates/viewer/re_viewer_context/src/tensor/image_decode_cache.rs +++ b/crates/viewer/re_viewer_context/src/tensor/image_decode_cache.rs @@ -1,11 +1,11 @@ use re_chunk::RowId; -use re_types::tensor_data::{DecodedTensor, TensorImageLoadError}; +use re_types::{archetypes::Tensor, tensor_data::TensorImageLoadError}; use crate::Cache; struct DecodedImageResult { /// Cached `Result` from decoding the image - tensor_result: Result, + tensor_result: Result, /// Total memory used by this image. memory_used: u64, @@ -35,7 +35,7 @@ impl ImageDecodeCache { key: RowId, image_bytes: &[u8], media_type: Option<&str>, - ) -> Result { + ) -> Result { re_tracing::profile_function!(); let lookup = self.cache.entry(key).or_insert_with(|| { @@ -61,7 +61,7 @@ impl ImageDecodeCache { fn decode_image( image_bytes: &[u8], media_type: Option<&str>, -) -> Result { +) -> Result { re_tracing::profile_function!(); let mut reader = image::io::Reader::new(std::io::Cursor::new(image_bytes)); @@ -83,7 +83,7 @@ fn decode_image( let img = reader.decode()?; - DecodedTensor::from_image(img) + Tensor::from_image(img) } impl Cache for ImageDecodeCache { diff --git a/crates/viewer/re_viewer_context/src/tensor/mod.rs b/crates/viewer/re_viewer_context/src/tensor/mod.rs index cc4bb474acfe..8c2ed6d00f62 100644 --- a/crates/viewer/re_viewer_context/src/tensor/mod.rs +++ b/crates/viewer/re_viewer_context/src/tensor/mod.rs @@ -1,11 +1,9 @@ // TODO(andreas): Move tensor utilities to a tensor specific crate? mod image_decode_cache; -mod tensor_decode_cache; mod tensor_stats; mod tensor_stats_cache; pub use image_decode_cache::ImageDecodeCache; -pub use tensor_decode_cache::TensorDecodeCache; pub use tensor_stats::TensorStats; pub use tensor_stats_cache::TensorStatsCache; diff --git a/crates/viewer/re_viewer_context/src/tensor/tensor_decode_cache.rs b/crates/viewer/re_viewer_context/src/tensor/tensor_decode_cache.rs deleted file mode 100644 index 9e70743bdb45..000000000000 --- a/crates/viewer/re_viewer_context/src/tensor/tensor_decode_cache.rs +++ /dev/null @@ -1,115 +0,0 @@ -use re_chunk::RowId; -use re_types::{ - datatypes::TensorData, - tensor_data::{DecodedTensor, TensorImageLoadError}, -}; - -use crate::Cache; - -struct DecodedTensorResult { - /// Cached `Result` from decoding the `Tensor` - tensor_result: Result, - - /// Total memory used by this `Tensor`.\ - memory_used: u64, - - /// Which [`TensorDecodeCache::generation`] was this `Tensor` last used? - last_use_generation: u64, -} - -/// Caches decoded tensors using a [`RowId`], i.e. a specific instance of -/// a `TensorData` component. -#[derive(Default)] -pub struct TensorDecodeCache { - cache: ahash::HashMap, - memory_used: u64, - generation: u64, -} - -#[allow(clippy::map_err_ignore)] -impl TensorDecodeCache { - /// Decode some [`TensorData`] if necessary and cache the result. - /// - /// The key should be the `RowId` of the `TensorData`. - /// NOTE: `TensorData` is never batched (they are mono-components), - /// so we don't need the instance id here. - /// - /// This is a no-op for tensors that are not compressed. - /// - /// Currently supports JPEG encoded tensors. - pub fn entry( - &mut self, - key: RowId, - maybe_encoded_tensor: TensorData, - ) -> Result { - re_tracing::profile_function!(); - - match DecodedTensor::try_from(maybe_encoded_tensor) { - Ok(decoded_tensor) => Ok(decoded_tensor), - - Err(encoded_tensor) => { - let lookup = self.cache.entry(key).or_insert_with(|| { - let tensor_result = DecodedTensor::try_decode(encoded_tensor); - let memory_used = match &tensor_result { - Ok(tensor) => tensor.size_in_bytes() as u64, - Err(_) => 0, - }; - self.memory_used += memory_used; - let last_use_generation = 0; - DecodedTensorResult { - tensor_result, - memory_used, - last_use_generation, - } - }); - lookup.last_use_generation = self.generation; - lookup.tensor_result.clone() - } - } - } -} - -impl Cache for TensorDecodeCache { - fn begin_frame(&mut self) { - #[cfg(not(target_arch = "wasm32"))] - let max_decode_cache_use = 4_000_000_000; - - #[cfg(target_arch = "wasm32")] - let max_decode_cache_use = 1_000_000_000; - - // TODO(jleibs): a more incremental purging mechanism, maybe switching to an LRU Cache - // would likely improve the behavior. - - if self.memory_used > max_decode_cache_use { - self.purge_memory(); - } - - self.generation += 1; - } - - fn purge_memory(&mut self) { - re_tracing::profile_function!(); - - // Very aggressively flush everything not used in this frame - - let before = self.memory_used; - - self.cache.retain(|_, ci| { - let retain = ci.last_use_generation == self.generation; - if !retain { - self.memory_used -= ci.memory_used; - } - retain - }); - - re_log::trace!( - "Flushed tensor decode cache. Before: {:.2} GB. After: {:.2} GB", - before as f64 / 1e9, - self.memory_used as f64 / 1e9, - ); - } - - fn as_any_mut(&mut self) -> &mut dyn std::any::Any { - self - } -} diff --git a/docs/content/reference/migration/migration-0-18.md b/docs/content/reference/migration/migration-0-18.md index a2a5d59b6817..68e3ce6cc295 100644 --- a/docs/content/reference/migration/migration-0-18.md +++ b/docs/content/reference/migration/migration-0-18.md @@ -8,10 +8,22 @@ NOTE! Rerun 0.18 has not yet been released ## ⚠️ Breaking changes ### `ImageEncoded` +`ImageEncoded` is our new archetype for logging an image file, e.g. a PNG or JPEG. + +#### Python +In Python we already had a `ImageEncoded` class, but this has now been replaced with the new archetype. + * Python: `NV12/YUY2` are now logged with the new `ImageChromaDownsampled` * `ImageEncoded`:s `format` parameter has been replaced with `media_type` (MIME) * `ImageFormat` is now only for `NV12/YUY2` +### Rust +* Removed `TensorBuffer::JPEG` +* Removed `TensorData::from_jpeg_bytes` +* Deprecated `Image::from_file_path` and `from_file_contents` + +For all of these, use `ImageEncoded` instead. + ### `mesh_material: Material` has been renamed to `albedo_factor: AlbedoFactor` [#6841](https://github.com/rerun-io/rerun/pull/6841) The field `mesh_material` in `Mesh3D` is now named `albedo_factor` and wraps a `datatypes.Rgba32`. diff --git a/docs/content/reference/types/components/tensor_data.md b/docs/content/reference/types/components/tensor_data.md index 5723832a16e7..ee12a9cbcd56 100644 --- a/docs/content/reference/types/components/tensor_data.md +++ b/docs/content/reference/types/components/tensor_data.md @@ -12,10 +12,8 @@ a 2D RGB Image, the shape would be `[height, width, channel]`. These dimensions are combined with an index to look up values from the `buffer` field, which stores a contiguous array of typed values. -Note that the buffer may be encoded in a compressed format such as `jpeg` or -in a format with downsampled chroma, such as NV12 or YUY2. -For file formats, the shape is used as a hint, for chroma downsampled format -the shape has to be the shape of the decoded image. +Note that the buffer may in a format with downsampled chroma, such as NV12 or YUY2. +For chroma downsampled formats the shape has to be the shape of the decoded image. ## Fields diff --git a/docs/content/reference/types/datatypes/tensor_buffer.md b/docs/content/reference/types/datatypes/tensor_buffer.md index 7f15e5a59a4f..2212ff3f6019 100644 --- a/docs/content/reference/types/datatypes/tensor_buffer.md +++ b/docs/content/reference/types/datatypes/tensor_buffer.md @@ -20,7 +20,6 @@ Tensor elements are stored in a contiguous buffer of a single type. * F16: list of `f16` * F32: list of `f32` * F64: list of `f64` -* JPEG: list of `u8` * NV12: list of `u8` * YUY2: list of `u8` diff --git a/docs/content/reference/types/datatypes/tensor_data.md b/docs/content/reference/types/datatypes/tensor_data.md index 85137e59a913..494be5a80ff4 100644 --- a/docs/content/reference/types/datatypes/tensor_data.md +++ b/docs/content/reference/types/datatypes/tensor_data.md @@ -12,10 +12,8 @@ a 2D RGB Image, the shape would be `[height, width, channel]`. These dimensions are combined with an index to look up values from the `buffer` field, which stores a contiguous array of typed values. -Note that the buffer may be encoded in a compressed format such as `jpeg` or -in a format with downsampled chroma, such as NV12 or YUY2. -For file formats, the shape is used as a hint, for chroma downsampled format -the shape has to be the shape of the decoded image. +Note that the buffer may in a format with downsampled chroma, such as NV12 or YUY2. +For chroma downsampled formats the shape has to be the shape of the decoded image. ## Fields diff --git a/rerun_cpp/src/rerun/components/tensor_data.hpp b/rerun_cpp/src/rerun/components/tensor_data.hpp index 220bde809c66..4116ecb68a47 100644 --- a/rerun_cpp/src/rerun/components/tensor_data.hpp +++ b/rerun_cpp/src/rerun/components/tensor_data.hpp @@ -20,10 +20,8 @@ namespace rerun::components { /// These dimensions are combined with an index to look up values from the `buffer` field, /// which stores a contiguous array of typed values. /// - /// Note that the buffer may be encoded in a compressed format such as `jpeg` or - /// in a format with downsampled chroma, such as NV12 or YUY2. - /// For file formats, the shape is used as a hint, for chroma downsampled format - /// the shape has to be the shape of the decoded image. + /// Note that the buffer may in a format with downsampled chroma, such as NV12 or YUY2. + /// For chroma downsampled formats the shape has to be the shape of the decoded image. struct TensorData { rerun::datatypes::TensorData data; diff --git a/rerun_cpp/src/rerun/datatypes/tensor_buffer.cpp b/rerun_cpp/src/rerun/datatypes/tensor_buffer.cpp index 3289ce02b4f0..845cbd65338c 100644 --- a/rerun_cpp/src/rerun/datatypes/tensor_buffer.cpp +++ b/rerun_cpp/src/rerun/datatypes/tensor_buffer.cpp @@ -23,7 +23,6 @@ namespace rerun { arrow::field("F16", arrow::list(arrow::field("item", arrow::float16(), false)), false), arrow::field("F32", arrow::list(arrow::field("item", arrow::float32(), false)), false), arrow::field("F64", arrow::list(arrow::field("item", arrow::float64(), false)), false), - arrow::field("JPEG", arrow::list(arrow::field("item", arrow::uint8(), false)), false), arrow::field("NV12", arrow::list(arrow::field("item", arrow::uint8(), false)), false), arrow::field("YUY2", arrow::list(arrow::field("item", arrow::uint8(), false)), false), }); @@ -200,17 +199,6 @@ namespace rerun { static_cast(union_instance.get_union_data().f64.size()) )); } break; - case TagType::JPEG: { - auto variant_builder = - static_cast(variant_builder_untyped); - ARROW_RETURN_NOT_OK(variant_builder->Append()); - auto value_builder = - static_cast(variant_builder->value_builder()); - ARROW_RETURN_NOT_OK(value_builder->AppendValues( - union_instance.get_union_data().jpeg.data(), - static_cast(union_instance.get_union_data().jpeg.size()) - )); - } break; case TagType::NV12: { auto variant_builder = static_cast(variant_builder_untyped); diff --git a/rerun_cpp/src/rerun/datatypes/tensor_buffer.hpp b/rerun_cpp/src/rerun/datatypes/tensor_buffer.hpp index 140c805863ac..3a846f581697 100644 --- a/rerun_cpp/src/rerun/datatypes/tensor_buffer.hpp +++ b/rerun_cpp/src/rerun/datatypes/tensor_buffer.hpp @@ -37,7 +37,6 @@ namespace rerun::datatypes { F16, F32, F64, - JPEG, NV12, YUY2, }; @@ -77,9 +76,6 @@ namespace rerun::datatypes { /// 64bit IEEE-754 floating point, also known as `double`. rerun::Collection f64; - /// Raw bytes of a JPEG file. - rerun::Collection jpeg; - /// NV12 is a YUV 4:2:0 chroma downsamples format with 8 bits per channel. /// /// First comes entire image in Y, followed by interleaved lines ordered as U0, V0, U1, V1, etc. @@ -161,10 +157,6 @@ namespace rerun::datatypes { using TypeAlias = rerun::Collection; new (&_data.f64) TypeAlias(other._data.f64); } break; - case detail::TensorBufferTag::JPEG: { - using TypeAlias = rerun::Collection; - new (&_data.jpeg) TypeAlias(other._data.jpeg); - } break; case detail::TensorBufferTag::NV12: { using TypeAlias = rerun::Collection; new (&_data.nv12) TypeAlias(other._data.nv12); @@ -242,10 +234,6 @@ namespace rerun::datatypes { using TypeAlias = rerun::Collection; _data.f64.~TypeAlias(); } break; - case detail::TensorBufferTag::JPEG: { - using TypeAlias = rerun::Collection; - _data.jpeg.~TypeAlias(); - } break; case detail::TensorBufferTag::NV12: { using TypeAlias = rerun::Collection; _data.nv12.~TypeAlias(); @@ -306,7 +294,7 @@ namespace rerun::datatypes { /// Number of elements in the buffer. /// - /// You may NOT call this for JPEG buffers. + /// You may NOT call this for NV12 or YUY2. size_t num_elems() const; void swap(TensorBuffer& other) noexcept { @@ -402,14 +390,6 @@ namespace rerun::datatypes { return self; } - /// Raw bytes of a JPEG file. - static TensorBuffer jpeg(rerun::Collection jpeg) { - TensorBuffer self; - self._tag = detail::TensorBufferTag::JPEG; - new (&self._data.jpeg) rerun::Collection(std::move(jpeg)); - return self; - } - /// NV12 is a YUV 4:2:0 chroma downsamples format with 8 bits per channel. /// /// First comes entire image in Y, followed by interleaved lines ordered as U0, V0, U1, V1, etc. @@ -529,15 +509,6 @@ namespace rerun::datatypes { } } - /// Return a pointer to jpeg if the union is in that state, otherwise `nullptr`. - const rerun::Collection* get_jpeg() const { - if (_tag == detail::TensorBufferTag::JPEG) { - return &_data.jpeg; - } else { - return nullptr; - } - } - /// Return a pointer to nv12 if the union is in that state, otherwise `nullptr`. const rerun::Collection* get_nv12() const { if (_tag == detail::TensorBufferTag::NV12) { diff --git a/rerun_cpp/src/rerun/datatypes/tensor_buffer_ext.cpp b/rerun_cpp/src/rerun/datatypes/tensor_buffer_ext.cpp index fa6876be1e9b..bf477f76c9e1 100644 --- a/rerun_cpp/src/rerun/datatypes/tensor_buffer_ext.cpp +++ b/rerun_cpp/src/rerun/datatypes/tensor_buffer_ext.cpp @@ -67,13 +67,12 @@ namespace rerun::datatypes { /// Number of elements in the buffer. /// - /// You may NOT call this for JPEG buffers. + /// You may NOT call this for NV12 or YUY2. size_t num_elems() const; // #endif - /// Number of elements in the buffer. size_t TensorBuffer::num_elems() const { switch (this->_tag) { case detail::TensorBufferTag::None: { @@ -120,10 +119,6 @@ namespace rerun::datatypes { assert(false && "Can't ask for the number of elements in an YUY2 encoded image"); break; } - case detail::TensorBufferTag::JPEG: { - assert(false && "Can't ask for the number of elements in a JPEG"); - break; - } } assert(false && "Unknown TensorBuffer tag"); return 0; diff --git a/rerun_cpp/src/rerun/datatypes/tensor_data.hpp b/rerun_cpp/src/rerun/datatypes/tensor_data.hpp index 73689619f855..c1c01b3b8b71 100644 --- a/rerun_cpp/src/rerun/datatypes/tensor_data.hpp +++ b/rerun_cpp/src/rerun/datatypes/tensor_data.hpp @@ -27,10 +27,8 @@ namespace rerun::datatypes { /// These dimensions are combined with an index to look up values from the `buffer` field, /// which stores a contiguous array of typed values. /// - /// Note that the buffer may be encoded in a compressed format such as `jpeg` or - /// in a format with downsampled chroma, such as NV12 or YUY2. - /// For file formats, the shape is used as a hint, for chroma downsampled format - /// the shape has to be the shape of the decoded image. + /// Note that the buffer may in a format with downsampled chroma, such as NV12 or YUY2. + /// For chroma downsampled formats the shape has to be the shape of the decoded image. struct TensorData { /// The shape of the tensor, including optional names for each dimension. rerun::Collection shape; diff --git a/rerun_py/rerun_sdk/rerun/archetypes/image_ext.py b/rerun_py/rerun_sdk/rerun/archetypes/image_ext.py index 42ed8444133c..892e2b6be003 100644 --- a/rerun_py/rerun_sdk/rerun/archetypes/image_ext.py +++ b/rerun_py/rerun_sdk/rerun/archetypes/image_ext.py @@ -7,7 +7,6 @@ import pyarrow as pa from .._validators import find_non_empty_dim_indices -from ..datatypes import TensorBufferType from ..error_utils import _send_warning_or_raise, catch_and_log_exceptions if TYPE_CHECKING: @@ -20,8 +19,6 @@ class ImageExt: """Extension for [Image][rerun.archetypes.Image].""" - JPEG_TYPE_ID = list(f.name for f in TensorBufferType().storage_type).index("JPEG") - def compress(self, *, jpeg_quality: int = 95) -> ImageEncoded | Image: """ Converts an `Image` to an [`rerun.ImageEncoded`][] using JPEG compression. @@ -48,14 +45,6 @@ def compress(self, *, jpeg_quality: int = 95) -> ImageEncoded | Image: with catch_and_log_exceptions(context="Image compression"): tensor_data_arrow = self.data.as_arrow_array() - if tensor_data_arrow[0].value["buffer"].type_code == self.JPEG_TYPE_ID: - _send_warning_or_raise( - "Image is already compressed as JPEG. Ignoring compression request.", - 1, - recording=None, - ) - return self - shape_dims = tensor_data_arrow[0].value["shape"].values.field(0).to_numpy() non_empty_dims = find_non_empty_dim_indices(shape_dims) filtered_shape = shape_dims[non_empty_dims] diff --git a/rerun_py/rerun_sdk/rerun/components/tensor_data.py b/rerun_py/rerun_sdk/rerun/components/tensor_data.py index 9aba5a438ee4..29698f43a9ca 100644 --- a/rerun_py/rerun_sdk/rerun/components/tensor_data.py +++ b/rerun_py/rerun_sdk/rerun/components/tensor_data.py @@ -25,10 +25,8 @@ class TensorData(datatypes.TensorData, ComponentMixin): These dimensions are combined with an index to look up values from the `buffer` field, which stores a contiguous array of typed values. - Note that the buffer may be encoded in a compressed format such as `jpeg` or - in a format with downsampled chroma, such as NV12 or YUY2. - For file formats, the shape is used as a hint, for chroma downsampled format - the shape has to be the shape of the decoded image. + Note that the buffer may in a format with downsampled chroma, such as NV12 or YUY2. + For chroma downsampled formats the shape has to be the shape of the decoded image. """ _BATCH_TYPE = None diff --git a/rerun_py/rerun_sdk/rerun/datatypes/tensor_buffer.py b/rerun_py/rerun_sdk/rerun/datatypes/tensor_buffer.py index 6a31a1aeb765..d871dfb0252b 100644 --- a/rerun_py/rerun_sdk/rerun/datatypes/tensor_buffer.py +++ b/rerun_py/rerun_sdk/rerun/datatypes/tensor_buffer.py @@ -82,9 +82,6 @@ class TensorBuffer(TensorBufferExt): * F64 (npt.NDArray[np.float64]): 64bit IEEE-754 floating point, also known as `double`. - * JPEG (npt.NDArray[np.uint8]): - Raw bytes of a JPEG file. - * NV12 (npt.NDArray[np.uint8]): NV12 is a YUV 4:2:0 chroma downsamples format with 8 bits per channel. @@ -96,8 +93,8 @@ class TensorBuffer(TensorBufferExt): The order of the channels is Y0, U0, Y1, V0. """ - kind: Literal["u8", "u16", "u32", "u64", "i8", "i16", "i32", "i64", "f16", "f32", "f64", "jpeg", "nv12", "yuy2"] = ( - field(default="u8") + kind: Literal["u8", "u16", "u32", "u64", "i8", "i16", "i32", "i64", "f16", "f32", "f64", "nv12", "yuy2"] = field( + default="u8" ) """ Possible values: @@ -135,9 +132,6 @@ class TensorBuffer(TensorBufferExt): * "f64": 64bit IEEE-754 floating point, also known as `double`. - * "jpeg": - Raw bytes of a JPEG file. - * "nv12": NV12 is a YUV 4:2:0 chroma downsamples format with 8 bits per channel. @@ -259,12 +253,6 @@ def __init__(self) -> None: nullable=False, metadata={}, ), - pa.field( - "JPEG", - pa.list_(pa.field("item", pa.uint8(), nullable=False, metadata={})), - nullable=False, - metadata={}, - ), pa.field( "NV12", pa.list_(pa.field("item", pa.uint8(), nullable=False, metadata={})), diff --git a/rerun_py/rerun_sdk/rerun/datatypes/tensor_data.py b/rerun_py/rerun_sdk/rerun/datatypes/tensor_data.py index bad99e3538b2..c83b22bb7375 100644 --- a/rerun_py/rerun_sdk/rerun/datatypes/tensor_data.py +++ b/rerun_py/rerun_sdk/rerun/datatypes/tensor_data.py @@ -40,10 +40,8 @@ class TensorData(TensorDataExt): These dimensions are combined with an index to look up values from the `buffer` field, which stores a contiguous array of typed values. - Note that the buffer may be encoded in a compressed format such as `jpeg` or - in a format with downsampled chroma, such as NV12 or YUY2. - For file formats, the shape is used as a hint, for chroma downsampled format - the shape has to be the shape of the decoded image. + Note that the buffer may in a format with downsampled chroma, such as NV12 or YUY2. + For chroma downsampled formats the shape has to be the shape of the decoded image. """ # __init__ can be found in tensor_data_ext.py @@ -160,12 +158,6 @@ def __init__(self) -> None: nullable=False, metadata={}, ), - pa.field( - "JPEG", - pa.list_(pa.field("item", pa.uint8(), nullable=False, metadata={})), - nullable=False, - metadata={}, - ), pa.field( "NV12", pa.list_(pa.field("item", pa.uint8(), nullable=False, metadata={})), diff --git a/rerun_py/rerun_sdk/rerun/datatypes/tensor_data_ext.py b/rerun_py/rerun_sdk/rerun/datatypes/tensor_data_ext.py index ace5947e0637..1cf2275f5857 100644 --- a/rerun_py/rerun_sdk/rerun/datatypes/tensor_data_ext.py +++ b/rerun_py/rerun_sdk/rerun/datatypes/tensor_data_ext.py @@ -137,7 +137,7 @@ def __init__( elif array is not None: self.buffer = TensorBuffer(array.flatten()) - if self.buffer.kind != "jpeg" and self.buffer.kind != "nv12" and self.buffer.kind != "yuy2": + if self.buffer.kind != "nv12" and self.buffer.kind != "yuy2": expected_buffer_size = prod(d.size for d in self.shape) if len(self.buffer.inner) != expected_buffer_size: @@ -233,9 +233,8 @@ def _build_buffer_array(buffer: TensorBufferLike) -> pa.Array: buffer = buffer.flatten() data_inner = pa.ListArray.from_arrays(pa.array([0, len(buffer)]), buffer) - if kind == "jpeg": - discriminant = "JPEG" - elif kind == "nv12": + + if kind == "nv12": discriminant = "NV12" elif kind == "yuy2": discriminant = "YUY2"