diff --git a/src/backend/renderer/gles2/mod.rs b/src/backend/renderer/gles2/mod.rs index 71b4ba8097d1..4d0fc711c61e 100644 --- a/src/backend/renderer/gles2/mod.rs +++ b/src/backend/renderer/gles2/mod.rs @@ -264,7 +264,7 @@ impl Drop for Gles2Target { pub struct Gles2Renderer { buffers: Vec, target: Option, - extensions: Vec, + pub(crate) extensions: Vec, tex_programs: [Gles2TexProgram; shaders::FRAGMENT_COUNT], solid_program: Gles2SolidProgram, #[cfg(feature = "wayland_frontend")] @@ -676,7 +676,7 @@ impl Gles2Renderer { Ok(renderer) } - fn make_current(&self) -> Result<(), MakeCurrentError> { + pub(crate) fn make_current(&self) -> Result<(), MakeCurrentError> { unsafe { if let Some(&Gles2Target::Surface(ref surface)) = self.target.as_ref() { self.egl.make_current_with_surface(&**surface)?; @@ -786,12 +786,13 @@ impl ImportMem for Gles2Renderer { // this is guaranteed a non-public internal type, so we are good. surface .and_then(|surface| surface.data_map.get::>().cloned()) + .filter(|texture| texture.size == (width, height).into()) .unwrap_or_else(|| { let mut tex = 0; unsafe { self.gl.GenTextures(1, &mut tex) }; // new texture, upload in full upload_full = true; - Rc::new(Gles2TextureInternal { + let new = Rc::new(Gles2TextureInternal { texture: tex, texture_kind: shader_idx, is_external: false, @@ -799,7 +800,12 @@ impl ImportMem for Gles2Renderer { size: (width, height).into(), egl_images: None, destruction_callback_sender: self.destruction_callback_sender.clone(), - }) + }); + if let Some(surface) = surface { + let copy = new.clone(); + surface.data_map.insert_if_missing(|| copy); + } + new }), ); @@ -975,7 +981,12 @@ impl ImportEgl for Gles2Renderer { self.egl_reader.as_ref() } - fn import_egl_buffer(&mut self, buffer: &wl_buffer::WlBuffer) -> Result { + fn import_egl_buffer( + &mut self, + buffer: &wl_buffer::WlBuffer, + _surface: Option<&crate::wayland::compositor::SurfaceData>, + _damage: &[Rectangle], + ) -> Result { if !self.extensions.iter().any(|ext| ext == "GL_OES_EGL_image") { return Err(Gles2Error::GLExtensionNotSupported(&["GL_OES_EGL_image"])); } @@ -1023,7 +1034,11 @@ impl ImportEgl for Gles2Renderer { #[cfg(feature = "wayland_frontend")] impl ImportDma for Gles2Renderer { - fn import_dmabuf(&mut self, buffer: &Dmabuf) -> Result { + fn import_dmabuf( + &mut self, + buffer: &Dmabuf, + _damage: Option<&[Rectangle]>, + ) -> Result { use crate::backend::allocator::Buffer; if !self.extensions.iter().any(|ext| ext == "GL_OES_EGL_image") { return Err(Gles2Error::GLExtensionNotSupported(&["GL_OES_EGL_image"])); @@ -1049,7 +1064,6 @@ impl ImportDma for Gles2Renderer { egl_images: Some(vec![image]), destruction_callback_sender: self.destruction_callback_sender.clone(), })); - self.egl.unbind()?; self.dmabuf_cache.insert(buffer.weak(), texture.clone()); Ok(texture) }) @@ -1120,6 +1134,8 @@ impl Gles2Renderer { } impl ExportMem for Gles2Renderer { + type TextureMapping = Gles2Mapping; + fn copy_framebuffer( &mut self, region: Rectangle, @@ -1158,7 +1174,6 @@ impl ExportMem for Gles2Renderer { texture: &Self::TextureId, region: Rectangle, ) -> Result { - let size = texture.size(); let mut pbo = 0; let old_target = self.target.take(); self.bind(texture.clone())?; @@ -1644,7 +1659,6 @@ impl Gles2Renderer { impl Renderer for Gles2Renderer { type Error = Gles2Error; type TextureId = Gles2Texture; - type TextureMapping = Gles2Mapping; type Frame = Gles2Frame; fn downscale_filter(&mut self, filter: TextureFilter) -> Result<(), Self::Error> { diff --git a/src/backend/renderer/mod.rs b/src/backend/renderer/mod.rs index 0c94be1f311a..5a976c18b21f 100644 --- a/src/backend/renderer/mod.rs +++ b/src/backend/renderer/mod.rs @@ -32,6 +32,8 @@ use crate::backend::egl::{ Error as EglError, }; +pub mod multigpu; + #[cfg(feature = "wayland_frontend")] pub mod utils; @@ -190,9 +192,8 @@ pub trait Renderer { type Error: Error; /// Texture Handle type used by this renderer. type TextureId: Texture; - /// Texture type representing a downloaded pixel buffer. - type TextureMapping: TextureMapping; /// Type representing a currently in-progress frame during the [`Renderer::render`]-call + #[cfg(not(feature = "nightly"))] type Frame: Frame; /// Set the filter method to be used when rendering a texture into a smaller area than its size @@ -355,6 +356,8 @@ pub trait ImportEgl: Renderer { fn import_egl_buffer( &mut self, buffer: &wl_buffer::WlBuffer, + surface: Option<&crate::wayland::compositor::SurfaceData>, + damage: &[Rectangle], ) -> Result<::TextureId, ::Error>; } @@ -380,13 +383,16 @@ pub trait ImportDma: Renderer { fn import_dma_buffer( &mut self, buffer: &wl_buffer::WlBuffer, + surface: Option<&crate::wayland::compositor::SurfaceData>, + damage: &[Rectangle], ) -> Result<::TextureId, ::Error> { + let _ = surface; let dmabuf = buffer .as_ref() .user_data() .get::() .expect("import_dma_buffer without checking buffer type?"); - self.import_dmabuf(dmabuf) + self.import_dmabuf(dmabuf, Some(damage)) } /// Import a given raw dmabuf into the renderer. @@ -403,6 +409,7 @@ pub trait ImportDma: Renderer { fn import_dmabuf( &mut self, dmabuf: &Dmabuf, + damage: Option<&[Rectangle]>, ) -> Result<::TextureId, ::Error>; } @@ -453,8 +460,8 @@ impl ImportAll for R { ) -> Option::TextureId, ::Error>> { match buffer_type(buffer) { Some(BufferType::Shm) => Some(self.import_shm_buffer(buffer, surface, damage)), - Some(BufferType::Egl) => Some(self.import_egl_buffer(buffer)), - Some(BufferType::Dma) => Some(self.import_dma_buffer(buffer)), + Some(BufferType::Egl) => Some(self.import_egl_buffer(buffer, surface, damage)), + Some(BufferType::Dma) => Some(self.import_dma_buffer(buffer, surface, damage)), _ => None, } } @@ -473,7 +480,7 @@ impl ImportAll for R { ) -> Option::TextureId, ::Error>> { match buffer_type(buffer) { Some(BufferType::Shm) => Some(self.import_shm_buffer(buffer, surface, damage)), - Some(BufferType::Dma) => Some(self.import_dma_buffer(buffer)), + Some(BufferType::Dma) => Some(self.import_dma_buffer(buffer, surface, damage)), _ => None, } } @@ -481,6 +488,9 @@ impl ImportAll for R { /// Trait for renderers supporting exporting contents of framebuffers or textures into memory. pub trait ExportMem: Renderer { + /// Texture type representing a downloaded pixel buffer. + type TextureMapping: TextureMapping; + /// Copies the contents of the currently bound framebuffer. /// /// This operation is not destructive, the contexts of the framebuffer keep being valid. @@ -492,7 +502,7 @@ pub trait ExportMem: Renderer { fn copy_framebuffer( &mut self, region: Rectangle, - ) -> Result<::TextureMapping, ::Error>; + ) -> Result::Error>; /// Copies the contents of the currently bound framebuffer. /// *Note*: This function may change or invalidate the current bind. /// @@ -505,7 +515,7 @@ pub trait ExportMem: Renderer { &mut self, texture: &Self::TextureId, region: Rectangle, - ) -> Result<::TextureMapping, Self::Error>; + ) -> Result; /// Returns a read-only pointer to a previously created texture mapping. /// /// The format of the returned slice is RGBA8. diff --git a/src/backend/renderer/multigpu/egl.rs b/src/backend/renderer/multigpu/egl.rs new file mode 100644 index 000000000000..8b17fc3d5642 --- /dev/null +++ b/src/backend/renderer/multigpu/egl.rs @@ -0,0 +1,218 @@ +//! Implementation of the multi-gpu [`GraphicsApi`] using +//! EGL for device enumeration and OpenGL ES for rendering. + +use crate::backend::{ + drm::{CreateDrmNodeError, DrmNode, NodeType}, + egl::{EGLContext, EGLDevice, EGLDisplay, Error as EGLError}, + renderer::{ + gles2::{Gles2Error, Gles2Renderer}, + multigpu::{ApiDevice, GraphicsApi}, + }, + SwapBuffersError, +}; +#[cfg(all( + feature = "wayland_frontend", + feature = "backend_egl", + feature = "use_system_lib" +))] +use crate::{ + backend::{ + allocator::dmabuf::Dmabuf, + egl::display::EGLBufferReader, + renderer::{ + multigpu::{Error as MultigpuError, MultiRenderer}, + ImportEgl, Offscreen, Renderer, + }, + }, + reexports::wayland_server::protocol::wl_buffer, + utils::{Buffer, Rectangle}, +}; + +/// Errors raised by the [`EglGlesBackend`] +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// EGL api error + #[error(transparent)] + Egl(#[from] EGLError), + /// OpenGL error + #[error(transparent)] + Gl(#[from] Gles2Error), + /// Error creating a drm node + #[error(transparent)] + DrmNode(#[from] CreateDrmNodeError), +} + +impl From for SwapBuffersError { + fn from(err: Error) -> SwapBuffersError { + match err { + x @ Error::DrmNode(_) | x @ Error::Egl(_) => SwapBuffersError::ContextLost(Box::new(x)), + Error::Gl(x) => x.into(), + } + } +} + +/// A [`GraphicsApi`] utilizing EGL for device enumeration and OpenGL ES for rendering. +/// +/// If not necessary for other operations, it is recommended to not use a +/// [`Gles2Texture`](crate::backend::renderer::gles2::Gles2Texture), but a +/// [`Gles2Renderbuffer`](crate::backend::renderer::gles2::Gles2Renderbuffer) +/// as a `Target`, when creating [`MultiRenderer`](super::MultiRenderer)s +#[derive(Debug)] +pub struct EglGlesBackend; +impl GraphicsApi for EglGlesBackend { + type Device = EglGlesDevice; + type Error = Error; + + fn enumerate(&self, list: &mut Vec, log: &slog::Logger) -> Result<(), Self::Error> { + let devices = EGLDevice::enumerate() + .map_err(Error::Egl)? + .flat_map(|device| { + let path = device.drm_device_path().ok()?; + Some(( + device, + DrmNode::from_path(path) + .ok()? + .node_with_type(NodeType::Render)? + .ok()?, + )) + }) + .collect::>(); + // remove old stuff + list.retain(|renderer| devices.iter().any(|(_, node)| &renderer.node == node)); + // add new stuff + let new_renderers = devices + .into_iter() + .filter(|(_, node)| !list.iter().any(|renderer| &renderer.node == node)) + .map(|(device, node)| { + slog::info!(log, "Trying to initialize {:?} from {}", device, node); + let display = EGLDisplay::new(&device, None).map_err(Error::Egl)?; + let context = EGLContext::new(&display, None).map_err(Error::Egl)?; + let renderer = unsafe { Gles2Renderer::new(context, None).map_err(Error::Gl)? }; + + Ok(EglGlesDevice { + node, + _device: device, + _display: display, + renderer, + }) + }) + .flat_map(|x: Result| match x { + Ok(x) => Some(x), + Err(x) => { + slog::warn!(log, "Skipping EGLDevice: {}", x); + None + } + }) + .collect::>(); + list.extend(new_renderers); + // but don't replace already initialized renderers + + Ok(()) + } +} + +/// [`ApiDevice`] of the [`EglGlesBackend`] +#[derive(Debug)] +pub struct EglGlesDevice { + node: DrmNode, + renderer: Gles2Renderer, + _display: EGLDisplay, + _device: EGLDevice, +} + +impl ApiDevice for EglGlesDevice { + type Renderer = Gles2Renderer; + + fn renderer(&self) -> &Self::Renderer { + &self.renderer + } + fn renderer_mut(&mut self) -> &mut Self::Renderer { + &mut self.renderer + } + fn node(&self) -> &DrmNode { + &self.node + } +} + +#[cfg(all( + feature = "wayland_frontend", + feature = "backend_egl", + feature = "use_system_lib" +))] +impl<'a, 'b, Target> ImportEgl for MultiRenderer<'a, 'b, EglGlesBackend, EglGlesBackend, Target> +where + Gles2Renderer: Offscreen, +{ + fn bind_wl_display(&mut self, display: &wayland_server::Display) -> Result<(), EGLError> { + self.render.renderer_mut().bind_wl_display(display) + } + fn unbind_wl_display(&mut self) { + self.render.renderer_mut().unbind_wl_display() + } + fn egl_reader(&self) -> Option<&EGLBufferReader> { + self.render.renderer().egl_reader() + } + + fn import_egl_buffer( + &mut self, + buffer: &wl_buffer::WlBuffer, + surface: Option<&crate::wayland::compositor::SurfaceData>, + damage: &[Rectangle], + ) -> Result<::TextureId, ::Error> { + if let Some(ref mut renderer) = self.target.as_mut() { + if let Ok(dmabuf) = Self::try_import_egl(renderer.renderer_mut(), buffer) { + let node = *renderer.node(); + return self.import_dmabuf_internal(Some(node), &dmabuf, surface, Some(buffer), Some(damage)); + } + } + for renderer in self.other_renderers.iter_mut() { + if let Ok(dmabuf) = Self::try_import_egl(renderer.renderer_mut(), buffer) { + let node = *renderer.node(); + return self.import_dmabuf_internal(Some(node), &dmabuf, surface, Some(buffer), Some(damage)); + } + } + Err(MultigpuError::DeviceMissing) + } +} + +#[cfg(all( + feature = "wayland_frontend", + feature = "backend_egl", + feature = "use_system_lib" +))] +impl<'a, 'b, Target> MultiRenderer<'a, 'b, EglGlesBackend, EglGlesBackend, Target> { + fn try_import_egl( + renderer: &mut Gles2Renderer, + buffer: &wl_buffer::WlBuffer, + ) -> Result> { + if !renderer.extensions.iter().any(|ext| ext == "GL_OES_EGL_image") { + return Err(MultigpuError::Render(Gles2Error::GLExtensionNotSupported(&[ + "GL_OES_EGL_image", + ]))); + } + + if renderer.egl_reader().is_none() { + return Err(MultigpuError::Render(Gles2Error::EGLBufferAccessError( + crate::backend::egl::BufferAccessError::NotManaged(crate::backend::egl::EGLError::BadDisplay), + ))); + } + renderer + .make_current() + .map_err(Gles2Error::from) + .map_err(MultigpuError::Render)?; + + let egl = renderer + .egl_reader() + .as_ref() + .unwrap() + .egl_buffer_contents(buffer) + .map_err(Gles2Error::EGLBufferAccessError) + .map_err(MultigpuError::Render)?; + renderer + .egl_context() + .display + .create_dmabuf_from_image(egl.image(0).unwrap(), egl.size, egl.y_inverted) + .map_err(Gles2Error::BindBufferEGLError) + .map_err(MultigpuError::Render) + } +} diff --git a/src/backend/renderer/multigpu/mod.rs b/src/backend/renderer/multigpu/mod.rs new file mode 100644 index 000000000000..b877031d8079 --- /dev/null +++ b/src/backend/renderer/multigpu/mod.rs @@ -0,0 +1,1629 @@ +//! +//! This module aims to make multi-gpu setups easier to handle for compositors. +//! +//! Its main entry point is the [`GpuManager`]. Initializing this with a +//! [`GraphicsApi`] implementation will allow you to create [`MultiRenderer`]s. +//! +//! smithay provides the following graphics apis: +//! - [`egl::EglGlesBackend`] +//! +//! A [`MultiRenderer`] gets created using two [`DrmNode`]s to identify gpus. +//! One gpu will be referred to as the render-gpu, the other as the target-gpu. +//! +//! Note: The render- and target-gpu may be identically to allow the multigpu +//! module to be used on single-gpu systems as well avoiding supporting multiple code-paths. +//! Doing so will not result in worse performance compared to rendering without the multi-gpu module. +//! +//! A [`MultiRenderer`] will support the [`Renderer`](super::Renderer)-trait as well +//! as the other corresponding traits of the [`renderer`](crate::backend::renderer)-module, +//! if the [`GraphicsApi`] allows it. +//! +//! Any rendering operations will take place on the render-gpu transparently. +//! Output will be redirected the target gpu and such any [`Bind`]- and [`Offscreen`]-implementations +//! will be allocated on the target-gpu. +//! +//! Any `Import*`-implementations will also transparently create copies of client buffers, +//! if necessary, always striving for the best possible performance for a given setup. +//! +//! Any `Export*`-implementations will reside on the render-gpu, if applicable. +//! +//! *Note*: This module will not keep you from selecting sub-optimal configurations. +//! Any heuristics for which render-gpu to use for a given set of client buffers +//! and desired target-gpu are up to be implemented by the compositor. The module only +//! reduces the amount of necessary setup operations. +//! + +use super::*; +use std::{ + any::{Any, TypeId}, + cell::{Ref, RefCell}, + collections::HashMap, + fmt, + rc::Rc, + sync::Mutex, +}; + +use crate::{ + backend::{ + allocator::{Buffer, Format}, + drm::DrmNode, + SwapBuffersError, + }, + utils::{Buffer as BufferCoords, Physical, Size}, +}; + +pub mod egl; + +lazy_static::lazy_static! { + static ref CAN_IMPORT: Mutex> = Mutex::new(HashMap::new()); +} + +/// Tracks available gpus from a given [`GraphicsApi`] +#[derive(Debug)] +pub struct GpuManager { + api: A, + devices: Vec, + log: ::slog::Logger, +} + +/// Errors generated by [`GpuManager`] and [`MultiRenderer`]. +#[derive(thiserror::Error)] +pub enum Error +where + R::Error: 'static, + T::Error: 'static, + <::Renderer as Renderer>::Error: 'static, + <::Renderer as Renderer>::Error: 'static, +{ + /// The graphics api has not found any devices + #[error("No devices found")] + NoDevices, + /// The graphics api errored on device enumeration + #[error("The render graphics api failed enumerating devices {0:?}")] + RenderApiError(#[source] R::Error), + /// The graphics api errored on device enumeration + #[error("The target graphics api failed enumerating devices {0:?}")] + TargetApiError(#[source] T::Error), + /// The graphics api has found no node matching the drm node + #[error("The graphics api has found no node matching {0:?}")] + NoDevice(DrmNode), + /// The device requested did not match the expected + #[error("The devices requested {0:?} did not match the expected")] + MismatchedDevice(DrmNode), + /// The device has gone missing + #[error("The device has gone missing")] + DeviceMissing, + /// Error on the rendering device + #[error("Error on the rendering device: {0:}")] + Render(#[source] <::Renderer as Renderer>::Error), + /// Error on the target device + #[error("Error on the target device: {0:}")] + Target(#[source] <::Renderer as Renderer>::Error), + /// Failed to import buffer using the api on any device + #[error("Failed to import buffer")] + ImportFailed, +} + +impl fmt::Debug for Error +where + R::Error: 'static, + T::Error: 'static, + <::Renderer as Renderer>::Error: 'static, + <::Renderer as Renderer>::Error: 'static, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Error::NoDevices => write!(f, "Error::NoDevices"), + Error::RenderApiError(err) => write!(f, "Error::RenderApiError({:?})", err), + Error::TargetApiError(err) => write!(f, "Error::TargetApiError({:?})", err), + Error::NoDevice(dev) => write!(f, "Error::NoDevice({:?})", dev), + Error::MismatchedDevice(dev) => write!(f, "Error::MismatchedDevice({:?})", dev), + Error::DeviceMissing => write!(f, "Error::DeviceMissing"), + Error::Render(err) => write!(f, "Error::Render({:?})", err), + Error::Target(err) => write!(f, "Error::Target({:?})", err), + Error::ImportFailed => write!(f, "Error::ImportFailed"), + } + } +} + +impl From> for SwapBuffersError +where + R::Error: Into + Send + Sync, + T::Error: Into + Send + Sync, + <::Renderer as Renderer>::Error: Into + Send + Sync, + <::Renderer as Renderer>::Error: Into + Send + Sync, +{ + fn from(err: Error) -> SwapBuffersError { + match err { + x @ Error::NoDevices | x @ Error::NoDevice(_) | x @ Error::DeviceMissing => { + SwapBuffersError::ContextLost(Box::new(x)) + } + x @ Error::MismatchedDevice(_) | x @ Error::ImportFailed => { + SwapBuffersError::TemporaryFailure(Box::new(x)) + } + Error::RenderApiError(x) => x.into(), + Error::TargetApiError(x) => x.into(), + Error::Render(x) => x.into(), + Error::Target(x) => x.into(), + } + } +} + +impl GpuManager { + /// Create a new [`GpuManager`] for a given [`GraphicsApi`]. + pub fn new(api: A, log: impl Into>) -> Result, Error> { + let log = crate::slog_or_fallback(log); + let mut devices = Vec::new(); + api.enumerate(&mut devices, &log).map_err(Error::RenderApiError)?; + if devices.is_empty() { + return Err(Error::NoDevices); + } + + Ok(GpuManager { api, devices, log }) + } + + /// Create a [`MultiRenderer`]. + /// + /// - `source_device` should be a function to tell the renderer, which gpu a client buffer is allocated on, + /// if applicable (e.g. for dmabuf based buffers). In cases where this would be meaningless (e.g. shm-based buffers) + /// or the origin of a buffer cannot be determined with certainty, this function needs to return `None`. + /// - `render_device` should referr to the gpu node rendering operations will take place upon. + /// - `target_device` should referr to the gpu node the composited buffer will end up upon + /// + /// - the `Target` generic argument referrs to the object used by the `render_device` to render to before + /// transferring the data to the `target_device`. Referr to [`Offscreen`](super::Offscreen)-implementations + /// to find supported options and referr to the documentations of the used `GraphicsApi` for possible + /// (performance) implication of selecting a specific `Target`. + pub fn renderer<'a, F, Target>( + &'a mut self, + source_device: F, + render_device: &DrmNode, + target_device: &DrmNode, + ) -> Result, Error> + where + ::Renderer: Offscreen, + F: FnMut(&wl_buffer::WlBuffer) -> Option + 'static, + { + if !self.devices.iter().any(|device| device.node() == render_device) + || !self.devices.iter().any(|device| device.node() == target_device) + { + self.api + .enumerate(&mut self.devices, &self.log) + .map_err(Error::RenderApiError)?; + } + + if !self.devices.iter().any(|device| device.node() == render_device) { + return Err(Error::NoDevice(*render_device)); + } + if !self.devices.iter().any(|device| device.node() == target_device) { + return Err(Error::NoDevice(*target_device)); + } + + let (mut render, others) = self + .devices + .iter_mut() + .partition::, _>(|device| device.node() == render_device); + if target_device != render_device { + let (mut target, others) = others + .into_iter() + .partition::, _>(|device| device.node() == target_device); + + Ok(MultiRenderer { + source: Box::leak(Box::new(source_device)), + render: RenderDevice::Device(render.remove(0)), + target: Some(target.remove(0)), + other_renderers: others, + proxy_framebuffer: std::marker::PhantomData, + log: self.log.clone(), + }) + } else { + Ok(MultiRenderer { + source: Box::leak(Box::new(source_device)), + render: RenderDevice::Device(render.remove(0)), + target: None, + other_renderers: others, + proxy_framebuffer: std::marker::PhantomData, + log: self.log.clone(), + }) + } + } + + /// Create a [`MultiRenderer`] from two different [`GraphicsApi`]s. + /// + /// - `render_api` should be the [`GpuManager`] used for the `render_device`. + /// - `target_api` should be the [`GpuManager`] used for the `target_device`. + /// - `source_device` should be a function to tell the renderer, which gpu a client buffer is allocated on, + /// if applicable (e.g. for dmabuf based buffers). In cases where this would be meaningless (e.g. shm-based buffers) + /// or the origin of a buffer cannot be determined with certainty, this function needs to return `None`. + /// - `render_device` should referr to the gpu node rendering operations will take place upon. + /// - `target_device` should referr to the gpu node the composited buffer will end up upon + /// + /// - the `Target` generic argument referrs to the object used by the `render_device` to render to before + /// transferring the data to the `target_device`. Referr to [`Offscreen`](super::Offscreen)-implementations + /// to find supported options and referr to the documentations of the used `GraphicsApi` for possible + /// (performance) implication of selecting a specific `Target`. + pub fn cross_renderer<'a, 'b, B: GraphicsApi, F, Target>( + render_api: &'a mut Self, + target_api: &'b mut GpuManager, + source_device: F, + render_device: &DrmNode, + target_device: &DrmNode, + ) -> Result, Error> + where + ::Renderer: Offscreen, + F: FnMut(&wl_buffer::WlBuffer) -> Option + 'static, + { + if !render_api + .devices + .iter() + .any(|device| device.node() == render_device) + { + render_api + .api + .enumerate(&mut render_api.devices, &render_api.log) + .map_err(Error::RenderApiError)?; + } + if !target_api + .devices + .iter() + .any(|device| device.node() == target_device) + { + target_api + .api + .enumerate(&mut target_api.devices, &target_api.log) + .map_err(Error::TargetApiError)?; + } + + if !render_api + .devices + .iter() + .any(|device| device.node() == render_device) + { + return Err(Error::NoDevice(*render_device)); + } + if !target_api + .devices + .iter() + .any(|device| device.node() == target_device) + { + return Err(Error::NoDevice(*target_device)); + } + + let (mut render, others) = render_api + .devices + .iter_mut() + .partition::, _>(|device| device.node() == render_device); + if target_device != render_device { + let target = target_api + .devices + .iter_mut() + .find(|device| device.node() == target_device) + .unwrap(); + + Ok(MultiRenderer { + source: Box::leak(Box::new(source_device)), + render: RenderDevice::Device(render.remove(0)), + target: Some(target), + other_renderers: others, + proxy_framebuffer: std::marker::PhantomData, + log: render_api.log.clone(), + }) + } else { + Ok(MultiRenderer { + source: Box::leak(Box::new(source_device)), + render: RenderDevice::Device(render.remove(0)), + target: None, + other_renderers: others, + proxy_framebuffer: std::marker::PhantomData, + log: target_api.log.clone(), + }) + } + } + + /// Function for optimizing buffer imports across multiple gpus. + /// + /// If you are using [`MultiRenderer`]s do rendering of your client buffers, + /// you can call `early_import` on commit to start necessary copy processes early. + /// + /// - `source` may specify on which gpu node the provided buffer is allocated, if applicable. + /// - `target` referrs to the gpu node, that the buffer needs to be accessable on later. + /// *Note*: Usually this will be **render**ing gpu of a [`MultiRenderer`] + /// - `buffer` is the client buffer you wish to import + /// - `surface` is the [`SurfaceData`] of the matching wl_surface, used for caching the pre-computed results + /// - `damage` is the damge of the buffer. + pub fn early_import( + &mut self, + source: Option, + target: DrmNode, + buffer: &wl_buffer::WlBuffer, + surface: &SurfaceData, + damage: &[Rectangle], + ) -> Result<(), Error> + where + A: 'static, + ::Renderer: ImportMem + ImportDma + ExportMem, + <::Renderer as ExportMem>::TextureMapping: 'static, + { + match buffer_type(buffer) { + Some(BufferType::Dma) => { + let (mut target_device, mut others) = self + .devices + .iter_mut() + .partition::, _>(|device| device.node() == &target); + let target_device = target_device.get_mut(0).ok_or(Error::DeviceMissing)?; + let format = buffer.as_ref().user_data().get::().unwrap().format(); + + let source = source.filter(|source| source != &target); + let can_import = source.as_ref().map(|_| CAN_IMPORT.lock().unwrap()); + let might_import = source + .and_then(|source| can_import.as_ref().unwrap().get(&(source, target, format))) + .copied() + .unwrap_or(true); + + if might_import { + match target_device + .renderer_mut() + .import_dma_buffer(buffer, Some(surface), damage) + { + Ok(imported) => { + if let (Some(ref mut can_import), Some(source)) = (can_import, source) { + can_import.insert((source, target, format), true); + } + let mut texture = MultiTexture::from_surface( + Some(surface), + Some(buffer.clone()), + imported.size(), + ); + texture.insert_texture::(target, imported); + surface.data_map.insert_if_missing(|| texture.0); + return Ok(()); + } + Err(err) => { + slog::warn!( + self.log, + "Error importing dmabuf (format: {:?}) from {:?} to {}: {}", + format, + source, + target, + err + ); + slog::info!(self.log, "Falling back to cpu-copy."); + if let (Some(ref mut can_import), Some(source)) = (can_import, source) { + can_import.insert((source, target, format), false); + } + } + } + } + + // if we do need to do a memory copy, we start with the export here + for import_renderer in others + .iter_mut() + .filter(|device| source.as_ref().map(|src| src == device.node()).unwrap_or(true)) + { + if let Ok(texture) = + import_renderer + .renderer_mut() + .import_dma_buffer(buffer, Some(surface), damage) + { + let mut gpu_texture = + MultiTexture::from_surface(Some(surface), Some(buffer.clone()), texture.size()); + let mappings = if gpu_texture.get::(&target).is_none() { + // force full copy + let damage = Rectangle::from_loc_and_size((0, 0), texture.size()); + vec![( + damage, + import_renderer + .renderer_mut() + .copy_texture(&texture, damage) + .map_err(Error::Target)?, + )] + } else { + // do a partial copy + damage + .iter() + .copied() + .map(|damage| { + let mapping = import_renderer + .renderer_mut() + .copy_texture(&texture, damage) + .map_err(Error::Target)?; + Ok((damage, mapping)) + }) + .collect::, Error>>()? + }; + gpu_texture.insert_mapping::( + *import_renderer.node(), + target, + texture.size(), + mappings.into_iter(), + ); + surface.data_map.insert_if_missing(|| gpu_texture.0); + return Ok(()); + } + } + + Err(Error::ImportFailed) + } + #[cfg(all( + feature = "wayland_frontend", + feature = "backend_egl", + feature = "use_system_lib" + ))] + Some(BufferType::Egl) => { + // we need specialization for requiring ImportEGL + // or require ImportAll, which will block this function for all + // renderers that cannot import egl buffers, so we just don't + // and sadly go the slow path + Ok(()) + } + Some(BufferType::Shm) => { + // this is async anyway + let shm_texture = self + .devices + .iter_mut() + .find(|dev| dev.node() == &target) + .ok_or(Error::DeviceMissing)? + .renderer_mut() + .import_shm_buffer(buffer, Some(surface), damage) + .map_err(Error::::Target)?; + let mut texture = MultiTexture::from_surface( + Some(surface), + Some(buffer.clone()), + buffer_dimensions(buffer).unwrap(), + ); + texture.insert_texture::(target, shm_texture); + surface.data_map.insert_if_missing(|| texture.0); + Ok(()) + } + None => { + // welp, nothing we can do + Ok(()) + } + } + } +} + +/// A graphics api, that supports enumerating graphics devices +pub trait GraphicsApi { + /// Devices this api produces + type Device: ApiDevice; + /// Errors this api returns + type Error: std::error::Error; + + /// Enumerate available devices by: + /// - removing gone devices from list + /// - adding new devices to list + /// + /// Existing devices are guranteed to be not recreated + fn enumerate(&self, list: &mut Vec, log: &slog::Logger) -> Result<(), Self::Error>; +} + +/// A device produced by a [`GraphicsApi`]. +pub trait ApiDevice { + /// The [`Renderer`](super::Renderer) this devices contains + type Renderer: Renderer; + + /// Returns a reference to the underlying renderer + fn renderer(&self) -> &Self::Renderer; + /// Returns a mutable reference to the underlying renderer + fn renderer_mut(&mut self) -> &mut Self::Renderer; + /// Returns a [`DrmNode`] representing the graphics device + fn node(&self) -> &DrmNode; +} + +/// Renderer, that transparently copies rendering results to another gpu, +/// as well as transparently importing client buffers residing on different gpus. +#[derive(Debug)] +pub struct MultiRenderer<'a, 'b, R: GraphicsApi, T: GraphicsApi, Target> { + source: *mut dyn FnMut(&wl_buffer::WlBuffer) -> Option, + render: RenderDevice<'a, R>, + target: Option<&'b mut T::Device>, + other_renderers: Vec<&'a mut R::Device>, + proxy_framebuffer: std::marker::PhantomData, + log: ::slog::Logger, +} + +// Hack for implementing Renderer::render.. +#[derive(Debug)] +enum RenderDevice<'a, A: GraphicsApi> { + Device(&'a mut A::Device), + // Hack to avoid lifetime problems in Renderer::render + Renderer(*mut ::Renderer, DrmNode), +} + +impl<'a, A: GraphicsApi> RenderDevice<'a, A> { + /* + fn unwrap_device(&mut self) -> &mut A::Device { + match self { + RenderDevice::Device(dev) => *dev, + RenderDevice::Renderer(_, _) => panic!("unwrap called on RenderDevice::Renderer"), + } + } + */ + + fn node(&self) -> &DrmNode { + match self { + RenderDevice::Device(dev) => dev.node(), + RenderDevice::Renderer(_, node) => node, + } + } + + fn renderer(&self) -> &::Renderer { + match self { + RenderDevice::Device(dev) => dev.renderer(), + RenderDevice::Renderer(renderer, _) => unsafe { &**renderer }, + } + } + + fn renderer_mut(&mut self) -> &mut ::Renderer { + match self { + RenderDevice::Device(dev) => dev.renderer_mut(), + RenderDevice::Renderer(renderer, _) => unsafe { &mut **renderer }, + } + } +} + +impl<'a, 'b, R: GraphicsApi, T: GraphicsApi, Target> Drop for MultiRenderer<'a, 'b, R, T, Target> { + fn drop(&mut self) { + let _ = unsafe { Box::from_raw(self.source as *mut _) }; + } +} + +/// [`Frame`](super::Frame) implementation of a [`MultiRenderer`]. +#[derive(Debug)] +pub struct MultiFrame { + node: DrmNode, + frame: *mut <::Renderer as Renderer>::Frame, + damage: Vec>, + // We need this for the associated Error type of the Frame implementation + _target: std::marker::PhantomData, + log: ::slog::Logger, +} + +impl<'a, 'b, R: GraphicsApi, T: GraphicsApi, Target> Unbind for MultiRenderer<'a, 'b, R, T, Target> +where + ::Renderer: Unbind, + ::Renderer: Unbind, + // We need this because the Renderer-impl does and Unbind requires Renderer + R: 'static, + R::Error: 'static, + T::Error: 'static, + ::Renderer: Offscreen + ExportDma + ExportMem + ImportDma + ImportMem, + ::Renderer: ImportDma + ImportMem, + <::Renderer as Renderer>::Error: 'static, + <::Renderer as Renderer>::Error: 'static, +{ + fn unbind(&mut self) -> Result<(), ::Error> { + if let Some(target) = self.target.as_mut() { + target.renderer_mut().unbind().map_err(Error::Target) + } else { + self.render.renderer_mut().unbind().map_err(Error::Render) + } + } +} + +impl<'a, 'b, R: GraphicsApi, T: GraphicsApi, Target, Other> Offscreen + for MultiRenderer<'a, 'b, R, T, Other> +where + ::Renderer: Offscreen, + ::Renderer: Offscreen, + // We need these because the Bind-impl does and Offscreen requires Bind + ::Renderer: Bind, + ::Renderer: Bind, + // We need these because the Unbind-impl does and Offscreen requires Bind, which requires Unbind + ::Renderer: Unbind, + ::Renderer: Unbind, + // We need these because the Renderer-impl does and Offscreen requires Bind, which requires Unbind, which requires Renderer + R: 'static, + R::Error: 'static, + T::Error: 'static, + ::Renderer: Offscreen + ExportDma + ExportMem + ImportDma + ImportMem, + ::Renderer: ImportDma + ImportMem, + <::Renderer as Renderer>::Error: 'static, + <::Renderer as Renderer>::Error: 'static, +{ + fn create_buffer(&mut self, size: Size) -> Result::Error> { + if let Some(target) = self.target.as_mut() { + target.renderer_mut().create_buffer(size).map_err(Error::Target) + } else { + self.render + .renderer_mut() + .create_buffer(size) + .map_err(Error::Render) + } + } +} + +impl<'a, 'b, R: GraphicsApi, T: GraphicsApi, Target, Other> Bind + for MultiRenderer<'a, 'b, R, T, Other> +where + ::Renderer: Bind, + ::Renderer: Bind, + // We need these because the Unbind-impl does and Bind requires Unbind + ::Renderer: Unbind, + ::Renderer: Unbind, + // We need this because the Renderer-impl does and Bind requires Unbind, which requires Renderer + R: 'static, + R::Error: 'static, + T::Error: 'static, + ::Renderer: Offscreen + ExportDma + ExportMem + ImportDma + ImportMem, + ::Renderer: ImportDma + ImportMem, + <::Renderer as Renderer>::Error: 'static, + <::Renderer as Renderer>::Error: 'static, +{ + fn bind(&mut self, bind: Target) -> Result<(), ::Error> { + if let Some(target) = self.target.as_mut() { + target.renderer_mut().bind(bind).map_err(Error::Target) + } else { + self.render.renderer_mut().bind(bind).map_err(Error::Render) + } + } + + fn supported_formats(&self) -> Option> { + if let Some(target) = self.target.as_ref() { + target.renderer().supported_formats() + } else { + Bind::::supported_formats(self.render.renderer()) + } + } +} + +static MAX_CPU_COPIES: usize = 3; // TODO, benchmark this + +impl<'a, 'b, R: GraphicsApi, T: GraphicsApi, Target> Renderer for MultiRenderer<'a, 'b, R, T, Target> +where + R: 'static, + R::Error: 'static, + T::Error: 'static, + ::Renderer: Offscreen + ExportDma + ExportMem + ImportDma + ImportMem, + ::Renderer: ImportDma + ImportMem, + <::Renderer as Renderer>::Error: 'static, + <::Renderer as Renderer>::Error: 'static, +{ + type Error = Error; + type TextureId = MultiTexture; + type Frame = MultiFrame; + + fn downscale_filter(&mut self, filter: TextureFilter) -> Result<(), Self::Error> { + self.render + .renderer_mut() + .downscale_filter(filter) + .map_err(Error::Render) + } + fn upscale_filter(&mut self, filter: TextureFilter) -> Result<(), Self::Error> { + self.render + .renderer_mut() + .upscale_filter(filter) + .map_err(Error::Render) + } + + fn render( + &mut self, + size: Size, + dst_transform: Transform, + rendering: F, + ) -> Result + where + F: FnOnce(&mut Self, &mut Self::Frame) -> Res, + { + let buffer_size = size.to_logical(1).to_buffer(1, dst_transform); + if self.target.is_some() { + let render_buffer = Offscreen::::create_buffer(self.render.renderer_mut(), buffer_size) + .map_err(Error::Render)?; + self.render + .renderer_mut() + .bind(render_buffer) + .map_err(Error::Render)?; + } + + let node = *self.render.node(); + let source_ref = self.source; + // we need to move some stuff into the closure temporarily + let mut target = self.target.take(); + let mut other_renderers = self.other_renderers.drain(..).collect::>(); + let target_ref = &mut target; + let other_renderers_ref = &mut other_renderers; + let log = self.log.clone(); + let res = self + .render + .renderer_mut() + .render(size, dst_transform, move |render, frame| { + let mut new_renderer = MultiRenderer { + source: source_ref, + render: RenderDevice::Renderer(render, node), + target: target_ref.take(), + other_renderers: other_renderers_ref.drain(..).collect(), + proxy_framebuffer: std::marker::PhantomData, + log: log.clone(), + }; + let mut frame = MultiFrame { + node, + frame, // we cheat here and use a raw-ptr, because otherwise your associated type would gain an uncostraint lifetime parameter + damage: Vec::new(), + _target: std::marker::PhantomData::, + log, + }; + + let res = rendering(&mut new_renderer, &mut frame); + // don't return them, but reset the reference, so we can restore self on error + *target_ref = new_renderer.target.take(); + *other_renderers_ref = new_renderer.other_renderers.drain(..).collect(); + // don't free source + std::mem::forget(new_renderer); + (res, frame.damage) + }) + .map_err(Error::Render); + // restore self + self.target = target; + self.other_renderers = other_renderers; + // then possibly return the error + let (res, damage) = res?; + let mut damage = damage + .into_iter() + .map(|rect| { + rect.to_logical(1) + .to_buffer(1, dst_transform, &size.to_logical(1)) + }) + .collect::>(); + + if let Some(target) = self.target.as_mut() { + { + let mut can_import = CAN_IMPORT.lock().unwrap(); + let dmabuf = self + .render + .renderer_mut() + .export_framebuffer(buffer_size) + .map_err(Error::Render)?; + let might_import = *can_import + .get(&(*self.render.node(), *target.node(), dmabuf.format())) + .unwrap_or(&true); + if might_import { + // try gpu copy + match target.renderer_mut().import_dmabuf(&dmabuf, Some(&damage)) { + Ok(texture) => { + // import successful + target + .renderer_mut() + .render(size, dst_transform, |_renderer, frame| { + frame.render_texture_from_to( + &texture, + Rectangle::from_loc_and_size((0, 0), buffer_size), + Rectangle::from_loc_and_size((0, 0), size).to_f64(), + &damage, + dst_transform.invert(), + 1.0, + ) + }) + .and_then(std::convert::identity) + .map_err(Error::Target)?; + + can_import.insert((*self.render.node(), *target.node(), dmabuf.format()), true); + return Ok(res); + } + Err(err) => { + let (source, target, format) = + (*self.render.node(), *target.node(), dmabuf.format()); + slog::warn!( + self.log, + "Error importing dmabuf (format: {:?}) from {} to {}: {}", + format, + source, + target, + err + ); + slog::info!(self.log, "Falling back to cpu-copy."); + can_import.insert((source, target, format), false); + } + } + } + } + + // cpu copy + if damage.len() > MAX_CPU_COPIES { + damage = Vec::from([Rectangle::from_loc_and_size((0, 0), buffer_size)]); + } + damage.dedup(); + damage.retain(|rect| rect.overlaps(Rectangle::from_loc_and_size((0, 0), buffer_size))); + damage.retain(|rect| rect.size.h > 0 && rect.size.w > 0); + // merge overlapping rectangles + damage = damage.into_iter().fold(Vec::new(), |new_damage, mut rect| { + // replace with drain_filter, when that becomes stable to reuse the original Vec's memory + let (overlapping, mut new_damage): (Vec<_>, Vec<_>) = + new_damage.into_iter().partition(|other| other.overlaps(rect)); + + for overlap in overlapping { + rect = rect.merge(overlap); + } + new_damage.push(rect); + new_damage + }); + + let mut mappings = Vec::new(); + for rect in damage { + let mapping = ( + self.render + .renderer_mut() + .copy_framebuffer(rect) + .map_err(Error::Render)?, + rect, + ); + mappings.push(mapping); + } + mappings.sort_by(|map1, map2| { + let size1 = map1.1.size; + let size2 = map2.1.size; + (size1.w * size1.h).cmp(&(size2.w * size2.h)) + }); + + let render = &mut self.render; + target + .renderer_mut() + .render(size, dst_transform, move |target, frame| { + for mapping in mappings { + let slice = render + .renderer_mut() + .map_texture(&mapping.0) + .map_err(Error::Render::)?; + let transform = if mapping.0.flipped() { + Transform::Flipped180 + } else { + Transform::Normal + }; + let texture = target + .import_memory(slice, mapping.1.size, mapping.0.flipped()) + .map_err(Error::Target)?; + frame + .render_texture_from_to( + &texture, + Rectangle::from_loc_and_size((0, 0), mapping.1.size), + mapping + .1 + .to_logical(1, transform + dst_transform.invert(), &buffer_size) + .to_physical(1) + .to_f64(), + &[Rectangle::from_loc_and_size((0, 0), mapping.1.size)], + Transform::Normal, + 1.0, + ) + .map_err(Error::Target)?; + } + Ok(()) + }) + .map_err(Error::Target) + .and_then(std::convert::identity)?; + } + + Ok(res) + } +} + +/// [`Texture`](super::Texture)s produced by a [`MultiRenderer`]. +#[derive(Debug, Clone)] +pub struct MultiTexture(Rc>); +#[derive(Debug)] +struct MultiTextureInternal { + textures: HashMap>, + source: Option, + size: Size, +} + +type DamageAnyTextureMappings = Vec<(Rectangle, Box)>; +#[derive(Debug)] +struct GpuSingleTexture { + mapping: Option<(DrmNode, DamageAnyTextureMappings)>, + texture: Option>, +} + +impl MultiTexture { + fn from_surface( + surface: Option<&crate::wayland::compositor::SurfaceData>, + buffer: Option, + size: Size, + ) -> MultiTexture { + let internal = surface + .and_then(|surface| { + surface + .data_map + .get::>>() + .cloned() + }) + .unwrap_or_else(|| { + Rc::new(RefCell::new(MultiTextureInternal { + textures: HashMap::new(), + source: None, + size, + })) + }); + internal.borrow_mut().source = buffer; + MultiTexture(internal) + } + + fn get( + &self, + render: &DrmNode, + ) -> Option::Renderer as Renderer>::TextureId>> + where + <::Renderer as Renderer>::TextureId: 'static, + { + let tex = self.0.borrow(); + // TODO: use Ref::filter_map when stabilized + if tex + .textures + .get(&TypeId::of::()) + .and_then(|textures| textures.get(render)) + .and_then(|texture| texture.texture.as_ref()) + .and_then(|texture| { + ::downcast_ref::<<::Renderer as Renderer>::TextureId>( + &**texture, + ) + }) + .is_some() + { + Some(Ref::map(tex, |tex| { + tex.textures.get(&TypeId::of::()) + .and_then(|textures| textures.get(render)) + .and_then(|texture| texture.texture.as_ref()) + .and_then(|texture| ::downcast_ref::<<::Renderer as Renderer>::TextureId>(&**texture)) + .unwrap() + })) + } else { + None + } + } + + fn insert_texture( + &mut self, + render: DrmNode, + texture: <::Renderer as Renderer>::TextureId, + ) where + <::Renderer as Renderer>::TextureId: 'static, + { + let mut tex = self.0.borrow_mut(); + let textures = tex.textures.entry(TypeId::of::()).or_default(); + textures.insert( + render, + GpuSingleTexture { + mapping: None, + texture: Some(Box::new(texture) as Box<_>), + }, + ); + } + + fn insert_mapping< + A: GraphicsApi + 'static, + I: Iterator< + Item = ( + Rectangle, + <::Renderer as ExportMem>::TextureMapping, + ), + >, + >( + &mut self, + source: DrmNode, + render: DrmNode, + size: Size, + new_mappings: I, + ) where + ::Renderer: ExportMem, + <::Renderer as ExportMem>::TextureMapping: 'static, + { + let mut tex = self.0.borrow_mut(); + let textures = tex.textures.entry(TypeId::of::()).or_default(); + let (old_texture, old_mapping) = textures + .remove(&render) + .map(|single| (single.texture, single.mapping)) + .unwrap_or((None, None)); + let old_texture = old_texture.filter(|tex| { + ::downcast_ref::<<::Renderer as Renderer>::TextureId>(tex) + .map(|tex| tex.size()) + == Some(size) + }); + let mut mappings = old_mapping + .filter(|(old_src, _)| *old_src == source) + .map(|(_, mappings)| mappings) + .unwrap_or_default(); + mappings.extend(new_mappings.map(|(r, m)| (r, Box::new(m) as Box<_>))); + textures.insert( + render, + GpuSingleTexture { + mapping: Some((source, mappings)), + texture: old_texture, + }, + ); + } +} + +impl Texture for MultiTexture { + fn size(&self) -> Size { + self.0.borrow().size + } + fn width(&self) -> u32 { + self.0.borrow().size.w as u32 + } + fn height(&self) -> u32 { + self.0.borrow().size.h as u32 + } +} + +impl Frame for MultiFrame +where + R: 'static, + R::Error: 'static, + T::Error: 'static, + <::Renderer as Renderer>::Error: 'static, + <::Renderer as Renderer>::Error: 'static, +{ + type Error = Error; + type TextureId = MultiTexture; + + fn clear(&mut self, color: [f32; 4], at: &[Rectangle]) -> Result<(), Self::Error> { + self.damage.extend(at); + unsafe { &mut *self.frame } + .clear(color, at) + .map_err(Error::Render) + } + + fn render_texture_from_to( + &mut self, + texture: &Self::TextureId, + src: Rectangle, + dst: Rectangle, + damage: &[Rectangle], + src_transform: Transform, + alpha: f32, + ) -> Result<(), Self::Error> { + if let Some(texture) = texture.get::(&self.node) { + self.damage.extend(damage.iter().map(|rect| { + let src = src.to_f64(); + let rect = rect.to_f64(); + let (x, y, w, h) = (rect.loc.x, rect.loc.y, rect.size.w, rect.size.h); + Rectangle::from_loc_and_size( + ( + ((x - src.loc.x) / src.size.w * dst.size.w) + dst.loc.x, + ((y - src.loc.y) / src.size.h * dst.size.h) + dst.loc.y, + ), + (w / src.size.w * dst.size.w, h / src.size.h * dst.size.h), + ) + .to_i32_round() + })); + unsafe { &mut *self.frame } + .render_texture_from_to(&*texture, src, dst, damage, src_transform, alpha) + .map_err(Error::Render) + } else { + slog::warn!( + self.log, + "Failed to render texture, import for wrong devices? {:?}", + texture + ); + Ok(()) + } + } + + fn transformation(&self) -> Transform { + unsafe { &mut *self.frame }.transformation() + } +} + +impl<'a, 'b, R: GraphicsApi, T: GraphicsApi, Target> ImportMem for MultiRenderer<'a, 'b, R, T, Target> +where + ::Renderer: ImportMem, + // We need this because the Renderer-impl does and ImportMem requires Renderer + R: 'static, + R::Error: 'static, + T::Error: 'static, + ::Renderer: Offscreen + ExportDma + ExportMem + ImportDma + ImportMem, + ::Renderer: ImportDma + ImportMem, + <::Renderer as Renderer>::Error: 'static, + <::Renderer as Renderer>::Error: 'static, +{ + fn import_shm_buffer( + &mut self, + buffer: &wl_buffer::WlBuffer, + surface: Option<&crate::wayland::compositor::SurfaceData>, + damage: &[Rectangle], + ) -> Result<::TextureId, ::Error> { + let shm_texture = self + .render + .renderer_mut() + .import_shm_buffer(buffer, surface, damage) + .map_err(Error::Render)?; + let mut texture = + MultiTexture::from_surface(surface, Some(buffer.clone()), buffer_dimensions(buffer).unwrap()); + texture.insert_texture::(*self.render.node(), shm_texture); + Ok(texture) + } + + fn import_memory( + &mut self, + data: &[u8], + size: Size, + flipped: bool, + ) -> Result<::TextureId, ::Error> { + let mem_texture = self + .render + .renderer_mut() + .import_memory(data, size, flipped) + .map_err(Error::Render)?; + let mut texture = MultiTexture::from_surface(None, None, size); + texture.insert_texture::(*self.render.node(), mem_texture); + Ok(texture) + } + + fn update_memory( + &mut self, + texture: &::TextureId, + data: &[u8], + region: Rectangle, + ) -> Result<(), ::Error> { + let texture = MultiTexture::from_surface(None, None, texture.size()); + let mem_texture = texture + .get::(self.render.node()) + .ok_or_else(|| Error::MismatchedDevice(*self.render.node()))?; + self.render + .renderer_mut() + .update_memory(&*mem_texture, data, region) + .map_err(Error::Render) + } + + fn shm_formats(&self) -> &[wl_shm::Format] { + self.render.renderer().shm_formats() + } +} + +impl<'a, 'b, R: GraphicsApi, T: GraphicsApi, Target> ImportDma for MultiRenderer<'a, 'b, R, T, Target> +where + ::Renderer: ImportDma + ImportMem + ExportMem, + ::Renderer: ExportMem, + <::Renderer as ExportMem>::TextureMapping: 'static, + <::Renderer as ExportMem>::TextureMapping: 'static, + T: 'static, + // We need this because the Renderer-impl does and ImportDma requires Renderer + R: 'static, + ::Renderer: Offscreen + ExportDma + ExportMem + ImportDma + ImportMem, + ::Renderer: ImportDma + ImportMem, + <::Renderer as Renderer>::Error: 'static, + <::Renderer as Renderer>::Error: 'static, +{ + fn dmabuf_formats<'c>(&'c self) -> Box + 'c> { + self.render.renderer().dmabuf_formats() + } + + fn import_dma_buffer( + &mut self, + buffer: &wl_buffer::WlBuffer, + surface: Option<&SurfaceData>, + damage: &[Rectangle], + ) -> Result<::TextureId, ::Error> { + let dmabuf = buffer + .as_ref() + .user_data() + .get::() + .expect("import_dma_buffer without checking buffer type?"); + let tranch = unsafe { &mut *self.source }(buffer); + self.import_dmabuf_internal(tranch, dmabuf, surface, Some(buffer), Some(damage)) + } + + fn import_dmabuf( + &mut self, + dmabuf: &Dmabuf, + damage: Option<&[Rectangle]>, + ) -> Result<::TextureId, ::Error> { + self.import_dmabuf_internal(None, dmabuf, None, None, damage) + } +} + +impl<'a, 'b, R: GraphicsApi, T: GraphicsApi, Target> MultiRenderer<'a, 'b, R, T, Target> +where + ::Renderer: ImportDma + ImportMem + ExportMem, + ::Renderer: ExportMem, + <::Renderer as ExportMem>::TextureMapping: 'static, + <::Renderer as ExportMem>::TextureMapping: 'static, + T: 'static, + // We need this because the Renderer-impl does and ImportDma requires Renderer + R: 'static, + ::Renderer: Offscreen + ExportDma + ExportMem + ImportDma + ImportMem, + ::Renderer: ImportDma + ImportMem, + <::Renderer as Renderer>::Error: 'static, + <::Renderer as Renderer>::Error: 'static, +{ + fn import_dmabuf_internal( + &mut self, + source: Option, + dmabuf: &Dmabuf, + surface: Option<&SurfaceData>, + buffer: Option<&wl_buffer::WlBuffer>, + damage: Option<&[Rectangle]>, + ) -> Result<::TextureId, ::Error> { + let source = source.filter(|source| source != self.render.node()); + let can_import = source.as_ref().map(|_| CAN_IMPORT.lock().unwrap()); + let might_import = source + .and_then(|source| { + can_import + .as_ref() + .unwrap() + .get(&(source, *self.render.node(), dmabuf.format())) + }) + .copied() + .unwrap_or(true); + + if might_import { + match self.render.renderer_mut().import_dmabuf(dmabuf, damage) { + Ok(imported) => { + if let (Some(ref mut can_import), Some(source)) = (can_import, source) { + can_import.insert((source, *self.render.node(), dmabuf.format()), true); + } + let mut texture = MultiTexture::from_surface(surface, buffer.cloned(), imported.size()); + texture.insert_texture::(*self.render.node(), imported); + if let Some(surface) = surface { + surface.data_map.insert_if_missing(|| texture.0.clone()); + } + return Ok(texture); + } + Err(err) => { + let target = *self.render.node(); + slog::warn!( + self.log, + "Error importing dmabuf (format: {:?}) from {:?} to {}: {}", + dmabuf.format(), + source, + target, + err + ); + slog::info!(self.log, "Falling back to cpu-copy."); + if let (Some(ref mut can_import), Some(source)) = (can_import, source) { + can_import.insert((source, target, dmabuf.format()), false); + } + } + } + } + + // lets check if we don't have a mapping + let mut texture = MultiTexture::from_surface(surface, buffer.cloned(), dmabuf.size()); + let size = texture.0.borrow().size; + let needs_reimport = texture + .0 + .borrow_mut() + .textures + .get_mut(&TypeId::of::()) + .and_then(|nodes_textures| nodes_textures.get_mut(self.render.node())) + .map(|texture| match texture.mapping.as_ref() { + None => true, + // in the few cases, were we need to rerender more, then was damaged by the client, + // we might have not been continuously rendering this buffer. So we need to assume, + // everything might have been damaged in the meantime. + // In those cases we cannot assume that our existing texture + early-import is sufficiently + // recent and we need to reimport. + Some((_, mappings)) => !damage + .as_ref() + .filter(|_| texture.texture.is_some()) // we need a full import in that case + .map(|x| Vec::from(*x)) + .unwrap_or_else(|| vec![Rectangle::from_loc_and_size((0, 0), size)]) + .into_iter() + .all(|rect| mappings.iter().any(|(region, _)| region.contains_rect(rect))), + }) + .unwrap_or(true); + if needs_reimport { + // no (usable) early-import :( + if let Some(import_renderer) = self.target.as_mut() { + if let Ok(dma_texture) = import_renderer.renderer_mut().import_dmabuf(dmabuf, damage) { + let mappings = damage + .unwrap_or(&[Rectangle::from_loc_and_size((0, 0), dma_texture.size())]) + .iter() + .cloned() + .map(|damage| { + let mapping = import_renderer + .renderer_mut() + .copy_texture(&dma_texture, damage) + .map_err(Error::Target)?; + Ok((damage, mapping)) + }) + .collect::, Error>>()?; + texture.insert_mapping::( + *import_renderer.node(), + *self.render.node(), + dma_texture.size(), + mappings.into_iter(), + ); + } + } + + for import_renderer in self + .other_renderers + .iter_mut() + .filter(|device| source.as_ref().map(|src| src == device.node()).unwrap_or(true)) + { + if let Ok(dma_texture) = import_renderer.renderer_mut().import_dmabuf(dmabuf, damage) { + let mappings = damage + .unwrap_or(&[Rectangle::from_loc_and_size((0, 0), texture.size())]) + .iter() + .cloned() + .map(|damage| { + let mapping = import_renderer + .renderer_mut() + .copy_texture(&dma_texture, damage) + .map_err(Error::Render)?; + Ok((damage, mapping)) + }) + .collect::, Error>>()?; + texture.insert_mapping::( + *import_renderer.node(), + *self.render.node(), + texture.size(), + mappings.into_iter(), + ); + } + } + } + // else we have an early import(!) + + let mut texture_ref = texture.0.borrow_mut(); + let tex = texture_ref + .textures + .get_mut(&TypeId::of::()) + .unwrap() + .get_mut(self.render.node()) + .unwrap(); + let (foreign_node, mappings) = tex.mapping.take().unwrap(); + if tex.texture.is_none() { + // full upload + let new_texture = if let Some(source) = self + .target + .as_mut() + .filter(|target| target.node() == &foreign_node) + { + let mapping = ::downcast_ref::< + <::Renderer as ExportMem>::TextureMapping, + >(&*mappings[0].1) + .unwrap(); + let mapped = source + .renderer_mut() + .map_texture(mapping) + .map_err(Error::Target)?; + self.render + .renderer_mut() + .import_memory(mapped, mapping.size(), mapping.flipped()) + .ok() + } else if let Some(source) = self + .other_renderers + .iter_mut() + .find(|device| device.node() == &foreign_node) + { + let mapping = ::downcast_ref::< + <::Renderer as ExportMem>::TextureMapping, + >(&*mappings[0].1) + .unwrap(); + let mapped = source + .renderer_mut() + .map_texture(mapping) + .map_err(Error::Render)?; + self.render + .renderer_mut() + .import_memory(mapped, mapping.size(), mapping.flipped()) + .ok() + } else { + None + }; + tex.texture = Some(Box::new(new_texture) as Box<_>); + } else { + // update + let texture = ::downcast_ref::< + <::Renderer as Renderer>::TextureId, + >(tex.texture.as_ref().unwrap()) + .unwrap(); + if let Some(source) = self + .target + .as_mut() + .filter(|target| target.node() == &foreign_node) + { + for (region, mapping) in mappings { + let mapping = ::downcast_ref::< + <::Renderer as ExportMem>::TextureMapping, + >(&*mapping) + .unwrap(); + if let Ok(mapped) = source.renderer_mut().map_texture(mapping) { + let _ = self.render.renderer_mut().update_memory(texture, mapped, region); + } + } + } else if let Some(source) = self + .other_renderers + .iter_mut() + .find(|device| device.node() == &foreign_node) + { + for (region, mapping) in mappings { + let mapping = ::downcast_ref::< + <::Renderer as ExportMem>::TextureMapping, + >(&*mapping) + .unwrap(); + if let Ok(mapped) = source.renderer_mut().map_texture(mapping) { + let _ = self.render.renderer_mut().update_memory(texture, mapped, region); + } + } + }; + } + if let Some(surface) = surface { + surface.data_map.insert_if_missing(|| texture.0.clone()); + } + + std::mem::drop(texture_ref); + Ok(texture) + } +} + +/// [`TextureMapping`](super::TextureMapping)s produced by [`ExportMem`]-implementations of +/// [`MultiRenderer`]s. +pub struct MultiTextureMapping(TextureMappingInternal) +where + ::Renderer: ExportMem, + ::Renderer: ExportMem; +enum TextureMappingInternal +where + ::Renderer: ExportMem, + ::Renderer: ExportMem, +{ + Either(<::Renderer as ExportMem>::TextureMapping), + Or(<::Renderer as ExportMem>::TextureMapping), +} +impl fmt::Debug for MultiTextureMapping +where + ::Renderer: ExportMem, + ::Renderer: ExportMem, + <::Renderer as ExportMem>::TextureMapping: fmt::Debug, + <::Renderer as ExportMem>::TextureMapping: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.0 { + TextureMappingInternal::Either(ref mapping) => mapping.fmt(f), + TextureMappingInternal::Or(ref mapping) => mapping.fmt(f), + } + } +} + +impl Texture for MultiTextureMapping +where + ::Renderer: ExportMem, + ::Renderer: ExportMem, +{ + fn size(&self) -> Size { + match self { + MultiTextureMapping::(TextureMappingInternal::Either(x)) => x.size(), + MultiTextureMapping::(TextureMappingInternal::Or(x)) => x.size(), + } + } + + fn width(&self) -> u32 { + match self { + MultiTextureMapping::(TextureMappingInternal::Either(x)) => x.width(), + MultiTextureMapping::(TextureMappingInternal::Or(x)) => x.width(), + } + } + fn height(&self) -> u32 { + match self { + MultiTextureMapping::(TextureMappingInternal::Either(x)) => x.height(), + MultiTextureMapping::(TextureMappingInternal::Or(x)) => x.height(), + } + } +} +impl TextureMapping for MultiTextureMapping +where + ::Renderer: ExportMem, + ::Renderer: ExportMem, +{ + fn flipped(&self) -> bool { + match self { + MultiTextureMapping::(TextureMappingInternal::Either(x)) => x.flipped(), + MultiTextureMapping::(TextureMappingInternal::Or(x)) => x.flipped(), + } + } +} + +impl<'a, 'b, R: GraphicsApi, T: GraphicsApi, Target> ExportMem for MultiRenderer<'a, 'b, R, T, Target> +where + ::Renderer: ExportMem, + ::Renderer: ExportMem, + // We need this because the Renderer-impl does and ExportMem requires Renderer + R: 'static, + R::Error: 'static, + T::Error: 'static, + ::Renderer: Offscreen + ExportDma + ExportMem + ImportDma + ImportMem, + ::Renderer: ImportDma + ImportMem, + <::Renderer as Renderer>::Error: 'static, + <::Renderer as Renderer>::Error: 'static, +{ + type TextureMapping = MultiTextureMapping; + + fn copy_framebuffer( + &mut self, + region: Rectangle, + ) -> Result::Error> { + if let Some(target) = self.target.as_mut() { + target + .renderer_mut() + .copy_framebuffer(region) + .map(|mapping| MultiTextureMapping(TextureMappingInternal::Either(mapping))) + .map_err(Error::Target) + } else { + self.render + .renderer_mut() + .copy_framebuffer(region) + .map(|mapping| MultiTextureMapping(TextureMappingInternal::Or(mapping))) + .map_err(Error::Render) + } + } + + fn copy_texture( + &mut self, + texture: &Self::TextureId, + region: Rectangle, + ) -> Result { + let tex = texture + .get::(self.render.node()) + .ok_or_else(|| Error::MismatchedDevice(*self.render.node()))?; + self.render + .renderer_mut() + .copy_texture(&*tex, region) + .map(|mapping| MultiTextureMapping(TextureMappingInternal::Or(mapping))) + .map_err(Error::Render) + } + + fn map_texture<'c>( + &mut self, + texture_mapping: &'c Self::TextureMapping, + ) -> Result<&'c [u8], ::Error> { + match texture_mapping { + MultiTextureMapping(TextureMappingInternal::Either(target_mapping)) => self + .target + .as_mut() + .unwrap() + .renderer_mut() + .map_texture(target_mapping) + .map_err(Error::Target), + MultiTextureMapping(TextureMappingInternal::Or(render_mapping)) => self + .render + .renderer_mut() + .map_texture(render_mapping) + .map_err(Error::Render), + } + } +} + +impl<'a, 'b, R: GraphicsApi, T: GraphicsApi, Target> ExportDma for MultiRenderer<'a, 'b, R, T, Target> +where + ::Renderer: ExportDma, + // We need this because the Renderer-impl does and ExportDma requires Renderer + R: 'static, + R::Error: 'static, + T::Error: 'static, + ::Renderer: Offscreen + ExportDma + ExportMem + ImportDma + ImportMem, + ::Renderer: ImportDma + ImportMem, + <::Renderer as Renderer>::Error: 'static, + <::Renderer as Renderer>::Error: 'static, +{ + fn export_framebuffer( + &mut self, + size: Size, + ) -> Result::Error> { + if let Some(target) = self.target.as_mut() { + target + .renderer_mut() + .export_framebuffer(size) + .map_err(Error::Target) + } else { + self.render + .renderer_mut() + .export_framebuffer(size) + .map_err(Error::Render) + } + } + fn export_texture( + &mut self, + texture: &::TextureId, + ) -> Result::Error> { + let tex = texture + .get::(self.render.node()) + .ok_or_else(|| Error::MismatchedDevice(*self.render.node()))?; + self.render + .renderer_mut() + .export_texture(&*tex) + .map_err(Error::Render) + } +}