From ec9d0a03216858027de5ae706bd0ee95018908b3 Mon Sep 17 00:00:00 2001 From: Emil Ernerfeldt Date: Wed, 28 Aug 2024 18:53:56 +0200 Subject: [PATCH] Revert "BGR(A) image format support (#7238)" This reverts commit 1cd91ffb5e213f57db564573e898e427aa851e63. --- .../rerun/datatypes/color_model.fbs | 6 - .../re_types/src/archetypes/image_ext.rs | 4 +- .../re_types/src/datatypes/color_model.rs | 16 +-- .../re_types/src/datatypes/color_model_ext.rs | 8 +- crates/viewer/re_data_ui/src/image.rs | 46 -------- .../viewer/re_renderer/shader/rectangle.wgsl | 3 - .../re_renderer/shader/rectangle_fs.wgsl | 5 - .../re_renderer/src/renderer/rectangles.rs | 11 +- .../re_space_view_spatial/src/mesh_loader.rs | 66 +++-------- .../src/gpu_bridge/image_to_gpu.rs | 106 +++++------------- .../re_viewer_context/src/gpu_bridge/mod.rs | 3 +- .../re_viewer_context/src/image_info.rs | 45 +------- .../reference/types/datatypes/color_model.md | 2 - .../snippets/all/archetypes/image_advanced.py | 5 +- .../arkit_scenes/arkit_scenes/__main__.py | 11 +- .../python/face_tracking/face_tracking.py | 8 +- .../gesture_detection/gesture_detection.py | 12 +- .../human_pose_tracking.py | 7 +- .../live_camera_edge_detection.py | 3 +- examples/python/ocr/ocr.py | 2 +- examples/python/rgbd/rgbd.py | 12 +- .../segment_anything_model.py | 1 - .../structure_from_motion/__main__.py | 3 +- rerun_cpp/src/rerun/datatypes/color_model.hpp | 6 - rerun_cpp/src/rerun/image_utils.hpp | 2 - .../rerun_sdk/rerun/archetypes/image_ext.py | 18 ++- .../rerun_sdk/rerun/datatypes/color_model.py | 8 +- tests/python/release_checklist/check_bgr.py | 88 --------------- 28 files changed, 107 insertions(+), 400 deletions(-) delete mode 100644 tests/python/release_checklist/check_bgr.py diff --git a/crates/store/re_types/definitions/rerun/datatypes/color_model.fbs b/crates/store/re_types/definitions/rerun/datatypes/color_model.fbs index 04e895ce59e7..c186626b8eec 100644 --- a/crates/store/re_types/definitions/rerun/datatypes/color_model.fbs +++ b/crates/store/re_types/definitions/rerun/datatypes/color_model.fbs @@ -15,10 +15,4 @@ enum ColorModel: ubyte{ /// Red, Green, Blue, Alpha RGBA = 3, - - /// Blue, Green, Red - BGR, - - /// Blue, Green, Red, Alpha - BGRA, } diff --git a/crates/store/re_types/src/archetypes/image_ext.rs b/crates/store/re_types/src/archetypes/image_ext.rs index 1beff5061887..1f2d0223be14 100644 --- a/crates/store/re_types/src/archetypes/image_ext.rs +++ b/crates/store/re_types/src/archetypes/image_ext.rs @@ -39,10 +39,10 @@ impl Image { let is_shape_correct = match color_model { ColorModel::L => non_empty_dim_inds.len() == 2, - ColorModel::RGB | ColorModel::BGR => { + ColorModel::RGB => { non_empty_dim_inds.len() == 3 && shape[non_empty_dim_inds[2]].size == 3 } - ColorModel::RGBA | ColorModel::BGRA => { + ColorModel::RGBA => { non_empty_dim_inds.len() == 3 && shape[non_empty_dim_inds[2]].size == 4 } }; diff --git a/crates/store/re_types/src/datatypes/color_model.rs b/crates/store/re_types/src/datatypes/color_model.rs index 395993f7b3bf..8280776b963f 100644 --- a/crates/store/re_types/src/datatypes/color_model.rs +++ b/crates/store/re_types/src/datatypes/color_model.rs @@ -35,20 +35,12 @@ pub enum ColorModel { /// Red, Green, Blue, Alpha #[allow(clippy::upper_case_acronyms)] RGBA = 3, - - /// Blue, Green, Red - #[allow(clippy::upper_case_acronyms)] - BGR = 4, - - /// Blue, Green, Red, Alpha - #[allow(clippy::upper_case_acronyms)] - BGRA = 5, } impl ::re_types_core::reflection::Enum for ColorModel { #[inline] fn variants() -> &'static [Self] { - &[Self::L, Self::RGB, Self::RGBA, Self::BGR, Self::BGRA] + &[Self::L, Self::RGB, Self::RGBA] } #[inline] @@ -57,8 +49,6 @@ impl ::re_types_core::reflection::Enum for ColorModel { Self::L => "Grayscale luminance intencity/brightness/value, sometimes called `Y`", Self::RGB => "Red, Green, Blue", Self::RGBA => "Red, Green, Blue, Alpha", - Self::BGR => "Blue, Green, Red", - Self::BGRA => "Blue, Green, Red, Alpha", } } } @@ -81,8 +71,6 @@ impl std::fmt::Display for ColorModel { Self::L => write!(f, "L"), Self::RGB => write!(f, "RGB"), Self::RGBA => write!(f, "RGBA"), - Self::BGR => write!(f, "BGR"), - Self::BGRA => write!(f, "BGRA"), } } } @@ -159,8 +147,6 @@ impl ::re_types_core::Loggable for ColorModel { Some(1) => Ok(Some(Self::L)), Some(2) => Ok(Some(Self::RGB)), Some(3) => Ok(Some(Self::RGBA)), - Some(4) => Ok(Some(Self::BGR)), - Some(5) => Ok(Some(Self::BGRA)), None => Ok(None), Some(invalid) => Err(DeserializationError::missing_union_arm( Self::arrow_datatype(), diff --git a/crates/store/re_types/src/datatypes/color_model_ext.rs b/crates/store/re_types/src/datatypes/color_model_ext.rs index 2881d52d8cfe..fafb3e347166 100644 --- a/crates/store/re_types/src/datatypes/color_model_ext.rs +++ b/crates/store/re_types/src/datatypes/color_model_ext.rs @@ -8,8 +8,8 @@ impl ColorModel { pub fn num_channels(self) -> usize { match self { Self::L => 1, - Self::RGB | Self::BGR => 3, - Self::RGBA | Self::BGRA => 4, + Self::RGB => 3, + Self::RGBA => 4, } } @@ -17,8 +17,8 @@ impl ColorModel { #[inline] pub fn has_alpha(&self) -> bool { match self { - Self::L | Self::RGB | Self::BGR => false, - Self::RGBA | Self::BGRA => true, + Self::L | Self::RGB => false, + Self::RGBA => true, } } } diff --git a/crates/viewer/re_data_ui/src/image.rs b/crates/viewer/re_data_ui/src/image.rs index a0345453fc93..bedb471255c5 100644 --- a/crates/viewer/re_data_ui/src/image.rs +++ b/crates/viewer/re_data_ui/src/image.rs @@ -421,52 +421,6 @@ fn image_pixel_value_ui( None } } - - ColorModel::BGR => { - if let Some([b, g, r]) = { - if let [Some(b), Some(g), Some(r)] = [ - image.get_xyc(x, y, 0), - image.get_xyc(x, y, 1), - image.get_xyc(x, y, 2), - ] { - Some([r, g, b]) - } else { - None - } - } { - match (b, g, r) { - (TensorElement::U8(b), TensorElement::U8(g), TensorElement::U8(r)) => { - Some(format!("B: {b}, G: {g}, R: {r}, #{b:02X}{g:02X}{r:02X}")) - } - _ => Some(format!("B: {b}, G: {g}, R: {r}")), - } - } else { - None - } - } - - ColorModel::BGRA => { - if let (Some(b), Some(g), Some(r), Some(a)) = ( - image.get_xyc(x, y, 0), - image.get_xyc(x, y, 1), - image.get_xyc(x, y, 2), - image.get_xyc(x, y, 3), - ) { - match (b, g, r, a) { - ( - TensorElement::U8(b), - TensorElement::U8(g), - TensorElement::U8(r), - TensorElement::U8(a), - ) => Some(format!( - "B: {b}, G: {g}, R: {r}, A: {a}, #{r:02X}{g:02X}{b:02X}{a:02X}" - )), - _ => Some(format!("B: {b}, G: {g}, R: {r}, A: {a}")), - } - } else { - None - } - } }, }; diff --git a/crates/viewer/re_renderer/shader/rectangle.wgsl b/crates/viewer/re_renderer/shader/rectangle.wgsl index e6467ed6ce52..ac47f4bc1e18 100644 --- a/crates/viewer/re_renderer/shader/rectangle.wgsl +++ b/crates/viewer/re_renderer/shader/rectangle.wgsl @@ -59,9 +59,6 @@ struct UniformBuffer { /// Boolean: multiply RGB with alpha before filtering multiply_rgb_with_alpha: u32, - - /// Boolean: swizzle RGBA to BGRA - bgra_to_rgba: u32, }; @group(1) @binding(0) diff --git a/crates/viewer/re_renderer/shader/rectangle_fs.wgsl b/crates/viewer/re_renderer/shader/rectangle_fs.wgsl index a670524199a3..4c500311a34e 100644 --- a/crates/viewer/re_renderer/shader/rectangle_fs.wgsl +++ b/crates/viewer/re_renderer/shader/rectangle_fs.wgsl @@ -28,11 +28,6 @@ fn decode_color(sampled_value: vec4f) -> vec4f { // Normalize the value first, otherwise premultiplying alpha and linear space conversion won't make sense. var rgba = normalize_range(sampled_value); - // BGR(A) -> RGB(A) - if rect_info.bgra_to_rgba != 0u { - rgba = rgba.bgra; - } - // Convert to linear space if rect_info.decode_srgb != 0u { if all(vec3f(0.0) <= rgba.rgb) && all(rgba.rgb <= vec3f(1.0)) { diff --git a/crates/viewer/re_renderer/src/renderer/rectangles.rs b/crates/viewer/re_renderer/src/renderer/rectangles.rs index 3f9a60e46189..cfecdbe621c0 100644 --- a/crates/viewer/re_renderer/src/renderer/rectangles.rs +++ b/crates/viewer/re_renderer/src/renderer/rectangles.rs @@ -51,10 +51,6 @@ pub enum TextureFilterMin { pub enum ShaderDecoding { Nv12, Yuy2, - - /// BGR(A)->RGB(A) conversion is done in the shader. - /// (as opposed to doing it via ``) - Bgr, } /// Describes a texture and how to map it to a color. @@ -155,7 +151,7 @@ impl ColormappedTexture { let [width, height] = self.texture.width_height(); [width / 2, height] } - Some(ShaderDecoding::Bgr) | None => self.texture.width_height(), + _ => self.texture.width_height(), } } } @@ -279,8 +275,7 @@ mod gpu_data { decode_srgb: u32, multiply_rgb_with_alpha: u32, - bgra_to_rgba: u32, - _row_padding: [u32; 1], + _row_padding: [u32; 2], _end_padding: [wgpu_buffer_types::PaddingRow; 16 - 7], } @@ -367,7 +362,6 @@ mod gpu_data { super::TextureFilterMag::Linear => FILTER_BILINEAR, super::TextureFilterMag::Nearest => FILTER_NEAREST, }; - let bgra_to_rgba = shader_decoding == &Some(super::ShaderDecoding::Bgr); Ok(Self { top_left_corner_position: (*top_left_corner_position).into(), @@ -385,7 +379,6 @@ mod gpu_data { magnification_filter, decode_srgb: *decode_srgb as _, multiply_rgb_with_alpha: *multiply_rgb_with_alpha as _, - bgra_to_rgba: bgra_to_rgba as _, _row_padding: Default::default(), _end_padding: Default::default(), }) diff --git a/crates/viewer/re_space_view_spatial/src/mesh_loader.rs b/crates/viewer/re_space_view_spatial/src/mesh_loader.rs index af394dab3fdc..8ea456b72941 100644 --- a/crates/viewer/re_space_view_spatial/src/mesh_loader.rs +++ b/crates/viewer/re_space_view_spatial/src/mesh_loader.rs @@ -150,19 +150,26 @@ impl LoadedMesh { re_math::BoundingBox::from_points(vertex_positions.iter().copied()) }; - let albedo = try_get_or_create_albedo_texture( - albedo_texture_buffer, - albedo_texture_format, - render_ctx, - texture_key, - &name, - ) - .unwrap_or_else(|| { + let albedo = if let (Some(albedo_texture_buffer), Some(albedo_texture_format)) = + (&albedo_texture_buffer, albedo_texture_format) + { + let image_info = ImageInfo { + buffer_row_id: RowId::ZERO, // unused + buffer: albedo_texture_buffer.0.clone(), + format: albedo_texture_format.0, + kind: re_types::image::ImageKind::Color, + colormap: None, + }; + re_viewer_context::gpu_bridge::get_or_create_texture(render_ctx, texture_key, || { + let debug_name = "mesh albedo texture"; + texture_creation_desc_from_color_image(&image_info, debug_name) + })? + } else { render_ctx .texture_manager_2d .white_texture_unorm_handle() .clone() - }); + }; let mesh = re_renderer::mesh::Mesh { label: name.clone().into(), @@ -204,44 +211,3 @@ impl LoadedMesh { self.bbox } } - -fn try_get_or_create_albedo_texture( - albedo_texture_buffer: &Option, - albedo_texture_format: &Option, - render_ctx: &RenderContext, - texture_key: u64, - name: &str, -) -> Option { - let (Some(albedo_texture_buffer), Some(albedo_texture_format)) = - (&albedo_texture_buffer, albedo_texture_format) - else { - return None; - }; - - let image_info = ImageInfo { - buffer_row_id: RowId::ZERO, // unused - buffer: albedo_texture_buffer.0.clone(), - format: albedo_texture_format.0, - kind: re_types::image::ImageKind::Color, - colormap: None, - }; - - if re_viewer_context::gpu_bridge::required_shader_decode(albedo_texture_format).is_some() { - re_log::warn_once!("Mesh can't yet handle encoded image formats like NV12 & YUY2 or BGR(A) formats without a channel type other than U8. Ignoring the texture at {name:?}."); - return None; - } - - let texture = - re_viewer_context::gpu_bridge::get_or_create_texture(render_ctx, texture_key, || { - let debug_name = "mesh albedo texture"; - texture_creation_desc_from_color_image(&image_info, debug_name) - }); - - match texture { - Ok(texture) => Some(texture), - Err(err) => { - re_log::warn_once!("Failed to create mesh albedo texture for {name:?}: {err}"); - None - } - } -} diff --git a/crates/viewer/re_viewer_context/src/gpu_bridge/image_to_gpu.rs b/crates/viewer/re_viewer_context/src/gpu_bridge/image_to_gpu.rs index 6571a0cf0c5e..a1dab09f67de 100644 --- a/crates/viewer/re_viewer_context/src/gpu_bridge/image_to_gpu.rs +++ b/crates/viewer/re_viewer_context/src/gpu_bridge/image_to_gpu.rs @@ -87,7 +87,11 @@ fn color_image_to_gpu( let texture_format = texture_handle.format(); - let shader_decoding = required_shader_decode(&image_format); + let shader_decoding = match image_format.pixel_format { + Some(PixelFormat::NV12) => Some(ShaderDecoding::Nv12), + Some(PixelFormat::YUY2) => Some(ShaderDecoding::Yuy2), + None => None, + }; // TODO(emilk): let the user specify the color space. let decode_srgb = texture_format == TextureFormat::Rgba8Unorm @@ -96,7 +100,7 @@ fn color_image_to_gpu( // Special casing for normalized textures used above: let range = if matches!( texture_format, - TextureFormat::R8Unorm | TextureFormat::Rgba8Unorm | TextureFormat::Bgra8Unorm + TextureFormat::R8Unorm | TextureFormat::Rgba8Unorm ) { [0.0, 1.0] } else if texture_format == TextureFormat::R8Snorm { @@ -104,8 +108,6 @@ fn color_image_to_gpu( } else if let Some(shader_decoding) = shader_decoding { match shader_decoding { ShaderDecoding::Nv12 | ShaderDecoding::Yuy2 => [0.0, 1.0], - ShaderDecoding::Bgr => image_data_range_heuristic(image_stats, &image_format) - .map(|range| [range.min, range.max])?, } } else { image_data_range_heuristic(image_stats, &image_format) @@ -114,10 +116,7 @@ fn color_image_to_gpu( let color_mapper = if let Some(shader_decoding) = shader_decoding { match shader_decoding { - // We only have 1D color maps, therefore chroma downsampled and BGR formats can't have color maps. - ShaderDecoding::Bgr | ShaderDecoding::Nv12 | ShaderDecoding::Yuy2 => { - ColorMapper::OffRGB - } + ShaderDecoding::Nv12 | ShaderDecoding::Yuy2 => ColorMapper::OffRGB, } } else if texture_format.components() == 1 { // TODO(andreas): support colormap property @@ -195,49 +194,27 @@ fn image_decode_srgb_gamma_heuristic( PixelFormat::NV12 | PixelFormat::YUY2 => Ok(true), } } else { - let (min, max) = image_stats.finite_range.ok_or(RangeError::MissingRange)?; - - #[allow(clippy::if_same_then_else)] - if 0.0 <= min && max <= 255.0 { - // If the range is suspiciously reminding us of a "regular image", assume sRGB. - Ok(true) - } else if image_format.datatype().is_float() && 0.0 <= min && max <= 1.0 { - // Floating point images between 0 and 1 are often sRGB as well. - Ok(true) - } else { - Ok(false) - } - } -} - -/// Determines if and how the shader needs to decode the image. -/// -/// Assumes creation as done by [`texture_creation_desc_from_color_image`]. -pub fn required_shader_decode(image_format: &ImageFormat) -> Option { - match image_format.pixel_format { - Some(PixelFormat::NV12) => Some(ShaderDecoding::Nv12), - Some(PixelFormat::YUY2) => Some(ShaderDecoding::Yuy2), - None => { - if image_format.datatype() == ChannelDatatype::U8 { - // U8 can be converted to RGBA without the shader's help since there's a format for it. - None - } else { - let color_model = image_format.color_model(); - (color_model == ColorModel::BGR || color_model == ColorModel::BGRA) - .then_some(ShaderDecoding::Bgr) + let color_model = image_format.color_model(); + let datatype = image_format.datatype(); + match color_model { + ColorModel::L | ColorModel::RGB | ColorModel::RGBA => { + let (min, max) = image_stats.finite_range.ok_or(RangeError::MissingRange)?; + + #[allow(clippy::if_same_then_else)] + if 0.0 <= min && max <= 255.0 { + // If the range is suspiciously reminding us of a "regular image", assume sRGB. + Ok(true) + } else if datatype.is_float() && 0.0 <= min && max <= 1.0 { + // Floating point images between 0 and 1 are often sRGB as well. + Ok(true) + } else { + Ok(false) + } } } } } -/// Creates a [`Texture2DCreationDesc`] for creating a texture from an [`ImageInfo`]. -/// -/// The resulting texture has requirements as describe by [`required_shader_decode`]. -/// -/// TODO(andreas): The consumer needs to be aware of bgr and chroma downsampling conversions. -/// It would be much better if we had a separate `re_renderer`/gpu driven conversion pipeline for this -/// which would allow us to virtually extend over wgpu's texture formats. -/// This would allow us to seamlessly support e.g. NV12 on meshes without the mesh shader having to be updated. pub fn texture_creation_desc_from_color_image<'a>( image: &'a ImageInfo, debug_name: &'a str, @@ -247,7 +224,7 @@ pub fn texture_creation_desc_from_color_image<'a>( if let Some(pixel_format) = image.format.pixel_format { match pixel_format { PixelFormat::NV12 => { - // Decoded in the shader, see [`required_shader_decode`]. + // Decoded in the shader. return Texture2DCreationDesc { label: debug_name.into(), data: cast_slice_to_cow(image.buffer.as_slice()), @@ -258,7 +235,7 @@ pub fn texture_creation_desc_from_color_image<'a>( } PixelFormat::YUY2 => { - // Decoded in the shader, see [`required_shader_decode`]. + // Decoded in the shader. return Texture2DCreationDesc { label: debug_name.into(), data: cast_slice_to_cow(image.buffer.as_slice()), @@ -271,40 +248,19 @@ pub fn texture_creation_desc_from_color_image<'a>( } else { let color_model = image.format.color_model(); let datatype = image.format.datatype(); - let (data, format) = match (color_model, datatype) { - // sRGB(A) handling is done by `ColormappedTexture`. - // Why not use `Rgba8UnormSrgb`? Because premul must happen _before_ sRGB decode, so we can't + // Normalize sRGB(A) textures to 0-1 range, and let the GPU premultiply alpha. + // Why? Because premul must happen _before_ sRGB decode, so we can't // use a "Srgb-aware" texture like `Rgba8UnormSrgb` for RGBA. (ColorModel::RGB, ChannelDatatype::U8) => ( pad_rgb_to_rgba(&image.buffer, u8::MAX).into(), TextureFormat::Rgba8Unorm, ), + (ColorModel::RGBA, ChannelDatatype::U8) => { (cast_slice_to_cow(&image.buffer), TextureFormat::Rgba8Unorm) } - // Make use of wgpu's BGR(A)8 formats. - // - // From the pov of our on-the-fly decoding textured rect shader this is just a strange special case - // given that it already has to deal with other BGR(A) formats. - // - // However, we have other places where we don't have the luxury of having a shader that can do the decoding for us. - // In those cases we'd like to support as many formats as possible without decoding. - // - // (in some hopefully not too far future, re_renderer will have an internal conversion pipeline - // that injects on-the-fly texture conversion from source formats before the consumer of a given texture is run - // and caches the result alongside with the source data) - // - // See also [`required_shader_decode`] which lists this case as a format that does not need to be decoded. - (ColorModel::BGR, ChannelDatatype::U8) => ( - pad_rgb_to_rgba(&image.buffer, u8::MAX).into(), - TextureFormat::Bgra8Unorm, - ), - (ColorModel::BGRA, ChannelDatatype::U8) => { - (cast_slice_to_cow(&image.buffer), TextureFormat::Bgra8Unorm) - } - _ => { // Fallback to general case: return general_texture_creation_desc_from_image( @@ -519,8 +475,7 @@ fn general_texture_creation_desc_from_image<'a>( } } - // BGR->RGB conversion is done in the shader. - ColorModel::RGB | ColorModel::BGR => { + ColorModel::RGB => { // There are no 3-channel textures in wgpu, so we need to pad to 4 channels. // What should we pad with? It depends on whether or not the shader interprets these as alpha. // To be safe, we pad with the MAX value of integers, and with 1.0 for floats. @@ -558,8 +513,7 @@ fn general_texture_creation_desc_from_image<'a>( } } - // BGR->RGB conversion is done in the shader. - ColorModel::RGBA | ColorModel::BGRA => { + ColorModel::RGBA => { // TODO(emilk): premultiply alpha, or tell the shader to assume unmultiplied alpha match datatype { diff --git a/crates/viewer/re_viewer_context/src/gpu_bridge/mod.rs b/crates/viewer/re_viewer_context/src/gpu_bridge/mod.rs index cca9ca8abc8a..e53ff64a8460 100644 --- a/crates/viewer/re_viewer_context/src/gpu_bridge/mod.rs +++ b/crates/viewer/re_viewer_context/src/gpu_bridge/mod.rs @@ -6,8 +6,7 @@ mod re_renderer_callback; pub use colormap::{colormap_edit_or_view_ui, colormap_to_re_renderer}; pub use image_to_gpu::{ - image_data_range_heuristic, image_to_gpu, required_shader_decode, - texture_creation_desc_from_color_image, + image_data_range_heuristic, image_to_gpu, texture_creation_desc_from_color_image, }; pub use re_renderer_callback::new_renderer_callback; diff --git a/crates/viewer/re_viewer_context/src/image_info.rs b/crates/viewer/re_viewer_context/src/image_info.rs index 8359934a9100..8fd9fb3935b9 100644 --- a/crates/viewer/re_viewer_context/src/image_info.rs +++ b/crates/viewer/re_viewer_context/src/image_info.rs @@ -70,8 +70,7 @@ impl ImageInfo { match pixel_format.color_model() { ColorModel::L => (channel == 0).then_some(TensorElement::U8(luma)), - // Shouldn't hit BGR and BGRA, but we'll handle it like RGB and RGBA here for completeness. - ColorModel::RGB | ColorModel::RGBA | ColorModel::BGR | ColorModel::BGRA => { + ColorModel::RGB | ColorModel::RGBA => { if channel < 3 { let rgb = rgb_from_yuv(luma, u, v); Some(TensorElement::U8(rgb[channel as usize])) @@ -179,50 +178,26 @@ impl ImageInfo { } RgbImage::from_vec(w, h, rgb).map(DynamicImage::ImageRgb8) } else if self.format.datatype() == ChannelDatatype::U8 { - let mut u8 = self.buffer.to_vec(); + let u8 = self.buffer.to_vec(); match self.color_model() { ColorModel::L => GrayImage::from_vec(w, h, u8).map(DynamicImage::ImageLuma8), ColorModel::RGB => RgbImage::from_vec(w, h, u8).map(DynamicImage::ImageRgb8), ColorModel::RGBA => RgbaImage::from_vec(w, h, u8).map(DynamicImage::ImageRgba8), - ColorModel::BGR => { - bgr_to_rgb(&mut u8); - RgbImage::from_vec(w, h, u8).map(DynamicImage::ImageRgb8) - } - ColorModel::BGRA => { - bgra_to_rgba(&mut u8); - RgbaImage::from_vec(w, h, u8).map(DynamicImage::ImageRgba8) - } } } else if self.format.datatype() == ChannelDatatype::U16 { // Lossless conversion of u16, ignoring data_range - let mut u16 = self.to_slice::().to_vec(); + let u16 = self.to_slice::().to_vec(); match self.color_model() { ColorModel::L => Gray16Image::from_vec(w, h, u16).map(DynamicImage::ImageLuma16), ColorModel::RGB => Rgb16Image::from_vec(w, h, u16).map(DynamicImage::ImageRgb16), ColorModel::RGBA => Rgba16Image::from_vec(w, h, u16).map(DynamicImage::ImageRgba16), - ColorModel::BGR => { - bgr_to_rgb(&mut u16); - Rgb16Image::from_vec(w, h, u16).map(DynamicImage::ImageRgb16) - } - ColorModel::BGRA => { - bgra_to_rgba(&mut u16); - Rgba16Image::from_vec(w, h, u16).map(DynamicImage::ImageRgba16) - } } } else { - let mut u16 = self.to_vec_u16(self.format.datatype(), data_range); + let u16 = self.to_vec_u16(self.format.datatype(), data_range); match self.color_model() { ColorModel::L => Gray16Image::from_vec(w, h, u16).map(DynamicImage::ImageLuma16), ColorModel::RGB => Rgb16Image::from_vec(w, h, u16).map(DynamicImage::ImageRgb16), ColorModel::RGBA => Rgba16Image::from_vec(w, h, u16).map(DynamicImage::ImageRgba16), - ColorModel::BGR => { - bgr_to_rgb(&mut u16); - Rgb16Image::from_vec(w, h, u16).map(DynamicImage::ImageRgb16) - } - ColorModel::BGRA => { - bgra_to_rgba(&mut u16); - Rgba16Image::from_vec(w, h, u16).map(DynamicImage::ImageRgba16) - } } } } @@ -326,18 +301,6 @@ impl ImageInfo { } } -fn bgr_to_rgb(bgr_elements: &mut [T]) { - for bgr in bgr_elements.chunks_exact_mut(3) { - bgr.swap(0, 2); - } -} - -fn bgra_to_rgba(bgra_elements: &mut [T]) { - for bgra in bgra_elements.chunks_exact_mut(4) { - bgra.swap(0, 2); - } -} - fn get(blob: &[u8], element_offset: usize) -> Option { // NOTE: `blob` is not necessary aligned to `T`, // hence the complexity of this function. diff --git a/docs/content/reference/types/datatypes/color_model.md b/docs/content/reference/types/datatypes/color_model.md index 009c49f4bff6..fdb2b1cb7e10 100644 --- a/docs/content/reference/types/datatypes/color_model.md +++ b/docs/content/reference/types/datatypes/color_model.md @@ -12,8 +12,6 @@ This combined with [`datatypes.ChannelDatatype`](https://rerun.io/docs/reference * L * RGB * RGBA -* BGR -* BGRA ## API reference links * 🌊 [C++ API docs for `ColorModel`](https://ref.rerun.io/docs/cpp/stable/namespacererun_1_1datatypes.html) diff --git a/docs/snippets/all/archetypes/image_advanced.py b/docs/snippets/all/archetypes/image_advanced.py index d9959452c041..8dc0d2d0ff94 100644 --- a/docs/snippets/all/archetypes/image_advanced.py +++ b/docs/snippets/all/archetypes/image_advanced.py @@ -33,5 +33,6 @@ # Read with OpenCV image = cv2.imread(file_path) -# OpenCV uses BGR ordering, we need to make this known to Rerun. -rr.log("from_opencv", rr.Image(image, color_model="BGR")) +# OpenCV uses BGR ordering, so we need to convert to RGB. +image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) +rr.log("from_opencv", rr.Image(image)) diff --git a/examples/python/arkit_scenes/arkit_scenes/__main__.py b/examples/python/arkit_scenes/arkit_scenes/__main__.py index 90f47237737a..e03b2ccf0d57 100755 --- a/examples/python/arkit_scenes/arkit_scenes/__main__.py +++ b/examples/python/arkit_scenes/arkit_scenes/__main__.py @@ -225,6 +225,7 @@ def log_arkit(recording_path: Path, include_highres: bool) -> None: rr.set_time_seconds("time", float(frame_timestamp)) # load the lowres image and depth bgr = cv2.imread(f"{lowres_image_dir}/{video_id}_{frame_timestamp}.png") + rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB) depth = cv2.imread(f"{lowres_depth_dir}/{video_id}_{frame_timestamp}.png", cv2.IMREAD_ANYDEPTH) high_res_exists: bool = (image_dir / f"{video_id}_{frame_timestamp}.png").exists() and include_highres @@ -239,7 +240,7 @@ def log_arkit(recording_path: Path, include_highres: bool) -> None: LOWRES_POSED_ENTITY_PATH, ) - rr.log(f"{LOWRES_POSED_ENTITY_PATH}/bgr", rr.Image(bgr, color_model="BGR").compress(jpeg_quality=95)) + rr.log(f"{LOWRES_POSED_ENTITY_PATH}/rgb", rr.Image(rgb).compress(jpeg_quality=95)) rr.log(f"{LOWRES_POSED_ENTITY_PATH}/depth", rr.DepthImage(depth, meter=1000)) # log the high res camera @@ -259,7 +260,9 @@ def log_arkit(recording_path: Path, include_highres: bool) -> None: highres_bgr = cv2.imread(f"{image_dir}/{video_id}_{frame_timestamp}.png") highres_depth = cv2.imread(f"{depth_dir}/{video_id}_{frame_timestamp}.png", cv2.IMREAD_ANYDEPTH) - rr.log(f"{HIGHRES_ENTITY_PATH}/bgr", rr.Image(highres_bgr, color_model="BGR").compress(jpeg_quality=75)) + highres_rgb = cv2.cvtColor(highres_bgr, cv2.COLOR_BGR2RGB) + + rr.log(f"{HIGHRES_ENTITY_PATH}/rgb", rr.Image(highres_rgb).compress(jpeg_quality=75)) rr.log(f"{HIGHRES_ENTITY_PATH}/depth", rr.DepthImage(highres_depth, meter=1000)) @@ -290,9 +293,9 @@ def main() -> None: # For this to work, the origin of the 2D views has to be a pinhole camera, # this way the viewer knows how to project the 3D annotations into the 2D views. rrb.Spatial2DView( - name="BGR", + name="RGB", origin=primary_camera_entity, - contents=["$origin/bgr", "/world/annotations/**"], + contents=["$origin/rgb", "/world/annotations/**"], ), rrb.Spatial2DView( name="Depth", diff --git a/examples/python/face_tracking/face_tracking.py b/examples/python/face_tracking/face_tracking.py index d7ec8bb21402..5d5a09c13a0c 100755 --- a/examples/python/face_tracking/face_tracking.py +++ b/examples/python/face_tracking/face_tracking.py @@ -357,12 +357,15 @@ def run_from_video_capture(vid: int | str, max_dim: int | None, max_frame_count: # On some platforms it always returns zero, so we compute from the frame counter and fps frame_time_nano = int(frame_idx * 1000 / fps * 1e6) + # convert to rgb + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + # log data rr.set_time_sequence("frame_nr", frame_idx) rr.set_time_nanos("frame_time", frame_time_nano) detector.detect_and_log(frame, frame_time_nano) landmarker.detect_and_log(frame, frame_time_nano) - rr.log("video/image", rr.Image(frame, color_model="BGR")) + rr.log("video/image", rr.Image(frame)) except KeyboardInterrupt: pass @@ -376,11 +379,12 @@ def run_from_sample_image(path: Path, max_dim: int | None, num_faces: int) -> No """Run the face detector on a single image.""" image = cv2.imread(str(path)) image = resize_image(image, max_dim) + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) logger = FaceDetectorLogger(video_mode=False) landmarker = FaceLandmarkerLogger(video_mode=False, num_faces=num_faces) logger.detect_and_log(image, 0) landmarker.detect_and_log(image, 0) - rr.log("video/image", rr.Image(image, color_model="BGR")) + rr.log("video/image", rr.Image(image)) def main() -> None: diff --git a/examples/python/gesture_detection/gesture_detection.py b/examples/python/gesture_detection/gesture_detection.py index 68b8025018cd..c0c21e6f2cac 100755 --- a/examples/python/gesture_detection/gesture_detection.py +++ b/examples/python/gesture_detection/gesture_detection.py @@ -192,11 +192,10 @@ def run_from_sample_image(path: Path | str) -> None: """Run the gesture recognition on a single image.""" image = cv2.imread(str(path)) # image = resize_image(image, max_dim) - rr.log("media/image", rr.Image(image, color_model="BGR")) - - detect_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + show_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + rr.log("media/image", rr.Image(show_image)) logger = GestureDetectorLogger(video_mode=False) - logger.detect_and_log(detect_image, 0) + logger.detect_and_log(show_image, 0) def run_from_video_capture(vid: int | str, max_frame_count: int | None) -> None: @@ -237,11 +236,14 @@ def run_from_video_capture(vid: int | str, max_frame_count: int | None) -> None: # On some platforms it always returns zero, so we compute from the frame counter and fps frame_time_nano = int(frame_idx * 1000 / fps * 1e6) + # convert to rgb + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + # log data rr.set_time_sequence("frame_nr", frame_idx) rr.set_time_nanos("frame_time", frame_time_nano) detector.detect_and_log(frame, frame_time_nano) - rr.log("media/video", rr.Image(frame, color_model="BGR").compress(jpeg_quality=75)) + rr.log("media/video", rr.Image(frame).compress(jpeg_quality=75)) except KeyboardInterrupt: pass diff --git a/examples/python/human_pose_tracking/human_pose_tracking.py b/examples/python/human_pose_tracking/human_pose_tracking.py index 817e6e07ef50..e58809c4eb46 100755 --- a/examples/python/human_pose_tracking/human_pose_tracking.py +++ b/examples/python/human_pose_tracking/human_pose_tracking.py @@ -77,14 +77,15 @@ def track_pose(video_path: str, model_path: str, *, segment: bool, max_frame_cou break mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=bgr_frame.data) + rgb = cv2.cvtColor(bgr_frame.data, cv2.COLOR_BGR2RGB) rr.set_time_seconds("time", bgr_frame.time) rr.set_time_sequence("frame_idx", bgr_frame.idx) results = pose_landmarker.detect_for_video(mp_image, int(bgr_frame.time * 1000)) - h, w, _ = bgr_frame.data.shape + h, w, _ = rgb.shape landmark_positions_2d = read_landmark_positions_2d(results, w, h) - rr.log("video/bgr", rr.Image(bgr_frame.data, color_model="BGR").compress(jpeg_quality=75)) + rr.log("video/rgb", rr.Image(rgb).compress(jpeg_quality=75)) if landmark_positions_2d is not None: rr.log( "video/pose/points", @@ -236,7 +237,7 @@ def main() -> None: rrb.Spatial3DView(origin="person", name="3D pose"), ), rrb.Vertical( - rrb.Spatial2DView(origin="video/bgr", name="Raw video"), + rrb.Spatial2DView(origin="video/rgb", name="Raw video"), rrb.TextDocumentView(origin="description", name="Description"), row_shares=[2, 3], ), diff --git a/examples/python/live_camera_edge_detection/live_camera_edge_detection.py b/examples/python/live_camera_edge_detection/live_camera_edge_detection.py index 1a521e1df1fd..4d3206d6c21b 100755 --- a/examples/python/live_camera_edge_detection/live_camera_edge_detection.py +++ b/examples/python/live_camera_edge_detection/live_camera_edge_detection.py @@ -42,7 +42,8 @@ def run_canny(num_frames: int | None) -> None: frame_nr += 1 # Log the original image - rr.log("image/rgb", rr.Image(img, color_model="BGR")) + rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + rr.log("image/rgb", rr.Image(rgb)) # Convert to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) diff --git a/examples/python/ocr/ocr.py b/examples/python/ocr/ocr.py index 030f9bbc8198..846e6e864dd2 100755 --- a/examples/python/ocr/ocr.py +++ b/examples/python/ocr/ocr.py @@ -365,7 +365,7 @@ def detect_and_log_layouts(file_path: str) -> None: else: # read image img = cv2.imread(file_path) - image_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Rerun can handle BGR as well, but `ocr_model_pp` expects RGB + image_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) images.append(image_rgb.astype(np.uint8)) # Extracte the layout from each image diff --git a/examples/python/rgbd/rgbd.py b/examples/python/rgbd/rgbd.py index cc260d449e81..51c855fda94d 100755 --- a/examples/python/rgbd/rgbd.py +++ b/examples/python/rgbd/rgbd.py @@ -44,11 +44,13 @@ def parse_timestamp(filename: str) -> datetime: return datetime.fromtimestamp(float(time)) -def read_image_bgr(buf: bytes) -> npt.NDArray[np.uint8]: +def read_image_rgb(buf: bytes) -> npt.NDArray[np.uint8]: """Decode an image provided in `buf`, and interpret it as RGB data.""" np_buf: npt.NDArray[np.uint8] = np.ndarray(shape=(1, len(buf)), dtype=np.uint8, buffer=buf) - img_bgr: npt.NDArray[Any] = cv2.imdecode(np_buf, cv2.IMREAD_COLOR) - return img_bgr + # OpenCV reads images in BGR rather than RGB format + img_bgr = cv2.imdecode(np_buf, cv2.IMREAD_COLOR) + img_rgb: npt.NDArray[Any] = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB) + return img_rgb def read_depth_image(buf: bytes) -> npt.NDArray[Any]: @@ -83,8 +85,8 @@ def log_nyud_data(recording_path: Path, subset_idx: int, frames: int) -> None: if f.filename.endswith(".ppm"): buf = archive.read(f) - img_bgr = read_image_bgr(buf) - rr.log("world/camera/image/rgb", rr.Image(img_bgr, color_model="BGR").compress(jpeg_quality=95)) + img_rgb = read_image_rgb(buf) + rr.log("world/camera/image/rgb", rr.Image(img_rgb).compress(jpeg_quality=95)) elif f.filename.endswith(".pgm"): buf = archive.read(f) diff --git a/examples/python/segment_anything_model/segment_anything_model.py b/examples/python/segment_anything_model/segment_anything_model.py index 57540b15e405..ea3cc91ae384 100755 --- a/examples/python/segment_anything_model/segment_anything_model.py +++ b/examples/python/segment_anything_model/segment_anything_model.py @@ -138,7 +138,6 @@ def load_image(image_uri: str) -> cv2.typing.MatLike: else: image = cv2.imread(image_uri, cv2.IMREAD_COLOR) - # Rerun can handle BGR as well, but SAM requires RGB. image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) return image diff --git a/examples/python/structure_from_motion/structure_from_motion/__main__.py b/examples/python/structure_from_motion/structure_from_motion/__main__.py index d68f57675704..cba96fa1943e 100755 --- a/examples/python/structure_from_motion/structure_from_motion/__main__.py +++ b/examples/python/structure_from_motion/structure_from_motion/__main__.py @@ -162,7 +162,8 @@ def read_and_log_sparse_reconstruction(dataset_path: Path, filter_output: bool, if resize: bgr = cv2.imread(str(image_file)) bgr = cv2.resize(bgr, resize) - rr.log("camera/image", rr.Image(bgr, color_model="BGR").compress(jpeg_quality=75)) + rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB) + rr.log("camera/image", rr.Image(rgb).compress(jpeg_quality=75)) else: rr.log("camera/image", rr.EncodedImage(path=dataset_path / "images" / image.name)) diff --git a/rerun_cpp/src/rerun/datatypes/color_model.hpp b/rerun_cpp/src/rerun/datatypes/color_model.hpp index deb455fe97e7..97fcd4e6e7c9 100644 --- a/rerun_cpp/src/rerun/datatypes/color_model.hpp +++ b/rerun_cpp/src/rerun/datatypes/color_model.hpp @@ -33,12 +33,6 @@ namespace rerun::datatypes { /// Red, Green, Blue, Alpha RGBA = 3, - - /// Blue, Green, Red - BGR = 4, - - /// Blue, Green, Red, Alpha - BGRA = 5, }; } // namespace rerun::datatypes diff --git a/rerun_cpp/src/rerun/image_utils.hpp b/rerun_cpp/src/rerun/image_utils.hpp index 12be1ece918c..35e05a0a685f 100644 --- a/rerun_cpp/src/rerun/image_utils.hpp +++ b/rerun_cpp/src/rerun/image_utils.hpp @@ -137,10 +137,8 @@ namespace rerun { switch (color_model) { case datatypes::ColorModel::L: return 1; - case datatypes::ColorModel::BGR: case datatypes::ColorModel::RGB: return 3; - case datatypes::ColorModel::BGRA: case datatypes::ColorModel::RGBA: return 4; } diff --git a/rerun_py/rerun_sdk/rerun/archetypes/image_ext.py b/rerun_py/rerun_sdk/rerun/archetypes/image_ext.py index 26004e3a16d7..568030a6fe9a 100644 --- a/rerun_py/rerun_sdk/rerun/archetypes/image_ext.py +++ b/rerun_py/rerun_sdk/rerun/archetypes/image_ext.py @@ -81,7 +81,7 @@ def __init__( `1x480x640x3x1` is treated as a `480x640x3`. You also need to specify the `color_model` of it (e.g. "RGB"). color_model: - L, RGB, RGBA, BGR, BGRA, etc, specifying how to interpret `image`. + L, RGB, RGBA, etc, specifying how to interpret `image`. pixel_format: NV12, YUV420, etc. For chroma-downsampling. Requires `width`, `height`, and `bytes`. @@ -207,9 +207,9 @@ def __init__( if channels == 1: color_model = ColorModel.L elif channels == 3: - color_model = ColorModel.RGB + color_model = ColorModel.RGB # TODO(#2340): change default to BGR elif channels == 4: - color_model = ColorModel.RGBA + color_model = ColorModel.RGBA # TODO(#2340): change default to BGRA else: _send_warning_or_raise(f"Expected 1, 3, or 4 channels; got {channels}") else: @@ -278,9 +278,10 @@ def compress(self: Any, jpeg_quality: int = 95) -> EncodedImage | Image: if image_format.pixel_format is not None: raise ValueError(f"Cannot JPEG compress an image with pixel_format {image_format.pixel_format}") - if image_format.color_model not in (ColorModel.L, ColorModel.RGB, ColorModel.BGR): + if image_format.color_model not in (ColorModel.L, ColorModel.RGB): + # TODO(#2340): BGR support! raise ValueError( - f"Cannot JPEG compress an image of type {image_format.color_model}. Only L (monochrome), RGB and BGR are supported." + f"Cannot JPEG compress an image of type {image_format.color_model}. Only L (monochrome) and RGB are supported." ) if image_format.channel_datatype != ChannelDatatype.U8: @@ -307,12 +308,7 @@ def compress(self: Any, jpeg_quality: int = 95) -> EncodedImage | Image: else: image = buf.reshape(image_format.height, image_format.width, 3) - # PIL doesn't understand BGR. - if image_format.color_model == ColorModel.BGR: - mode = "RGB" - image = image[:, :, ::-1] - else: - mode = str(image_format.color_model) + mode = str(image_format.color_model) pil_image = PILImage.fromarray(image, mode=mode) output = BytesIO() diff --git a/rerun_py/rerun_sdk/rerun/datatypes/color_model.py b/rerun_py/rerun_sdk/rerun/datatypes/color_model.py index 1659fc38c3b8..e3dff416fffb 100644 --- a/rerun_py/rerun_sdk/rerun/datatypes/color_model.py +++ b/rerun_py/rerun_sdk/rerun/datatypes/color_model.py @@ -36,12 +36,6 @@ class ColorModel(Enum): RGBA = 3 """Red, Green, Blue, Alpha""" - BGR = 4 - """Blue, Green, Red""" - - BGRA = 5 - """Blue, Green, Red, Alpha""" - @classmethod def auto(cls, val: str | int | ColorModel) -> ColorModel: """Best-effort converter, including a case-insensitive string matcher.""" @@ -63,7 +57,7 @@ def __str__(self) -> str: return self.name -ColorModelLike = Union[ColorModel, Literal["BGR", "BGRA", "L", "RGB", "RGBA", "bgr", "bgra", "l", "rgb", "rgba"], int] +ColorModelLike = Union[ColorModel, Literal["L", "RGB", "RGBA", "l", "rgb", "rgba"], int] ColorModelArrayLike = Union[ColorModelLike, Sequence[ColorModelLike]] diff --git a/tests/python/release_checklist/check_bgr.py b/tests/python/release_checklist/check_bgr.py deleted file mode 100644 index 9522a94810b9..000000000000 --- a/tests/python/release_checklist/check_bgr.py +++ /dev/null @@ -1,88 +0,0 @@ -from __future__ import annotations - -import os -from argparse import Namespace -from io import BytesIO -from uuid import uuid4 - -import numpy as np -import requests -import rerun as rr -import rerun.blueprint as rrb -from PIL import Image - -README = """\ -# BGR Support - -This checks whether BGR images with various datatypes are supported. - -### Action -All images should look the same (and sane). - -""" - -types = [ - # Skipping on i8, since it would look different. - ("u8", np.uint8), - ("u16", np.uint16), - ("u32", np.uint32), - ("u64", np.uint64), - ("i16", np.int16), - ("i32", np.int32), - ("i64", np.int64), - ("f16", np.float16), - ("f32", np.float32), - ("f64", np.float64), -] - - -def blueprint() -> rrb.BlueprintLike: - entities = [f"bgr_{type}" for (type, _) in types] + [f"bgra_{type}" for (type, _) in types] + ["rgb_u8"] - return rrb.Grid(contents=[rrb.Spatial2DView(origin=path) for path in entities]) - - -def log_readme() -> None: - rr.log("readme", rr.TextDocument(README, media_type=rr.MediaType.MARKDOWN), timeless=True) - - -def run_bgr_images(sample_image_rgb_u8: np.ndarray) -> None: - # We're being explicit about datatypes & datamodels on all calls to avoid confunsion. - - # Show the original image as a reference: - rr.log("rgb_u8", rr.Image(sample_image_rgb_u8, color_model="RGB", datatype="u8")) - - sample_image_bgr_u8 = sample_image_rgb_u8[:, :, ::-1] - sample_image_bgra_u8 = np.insert(sample_image_bgr_u8, 3, 255, axis=2) - - for datatype, dtype in types: - sample_image_bgr = np.asarray(sample_image_bgr_u8, dtype=dtype) - rr.log(f"bgr_{datatype}", rr.Image(sample_image_bgr, color_model="BGR", datatype=datatype)) - sample_image_bgra = np.asarray(sample_image_bgra_u8, dtype=dtype) - rr.log(f"bgra_{datatype}", rr.Image(sample_image_bgra, color_model="BGRA", datatype=datatype)) - - -def download_example_image_as_rgb() -> np.ndarray: - # Download this recreation of the lena image (via https://mortenhannemose.github.io/lena/): - # https://mortenhannemose.github.io/assets/img/Lena_512.png - url = "https://mortenhannemose.github.io/assets/img/Lena_512.png" - response = requests.get(url) - image = Image.open(BytesIO(response.content)) - image = image.convert("RGB") - return np.array(image) - - -def run(args: Namespace) -> None: - rr.script_setup(args, f"{os.path.basename(__file__)}", recording_id=uuid4(), default_blueprint=blueprint()) - - sample_image_rgb_u8 = download_example_image_as_rgb() - log_readme() - run_bgr_images(sample_image_rgb_u8) - - -if __name__ == "__main__": - import argparse - - parser = argparse.ArgumentParser(description="Interactive release checklist") - rr.script_add_args(parser) - args = parser.parse_args() - run(args)