Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Building on #1799 : Add a function for computing a world point from a screen point #4177

Closed
wants to merge 7 commits into from
Closed
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -165,11 +165,19 @@ path = "examples/2d/text2d.rs"
name = "texture_atlas"
path = "examples/2d/texture_atlas.rs"

[[example]]
name = "mouse_tracking"
path = "examples/2d/mouse_tracking.rs"

# 3D Rendering
[[example]]
name = "3d_scene"
path = "examples/3d/3d_scene.rs"

[[example]]
name = "screen_to_world"
path = "examples/3d/screen_to_world.rs"

[[example]]
name = "lighting"
path = "examples/3d/lighting.rs"
Expand Down
78 changes: 75 additions & 3 deletions crates/bevy_render/src/camera/camera.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
use crate::{
camera::CameraProjection, prelude::Image, render_asset::RenderAssets,
render_resource::TextureView, view::ExtractedWindows,
camera::CameraProjection,
prelude::Image,
primitives::{Line, Plane},
render_asset::RenderAssets,
render_resource::TextureView,
view::ExtractedWindows,
};
use bevy_asset::{AssetEvent, Assets, Handle};
use bevy_ecs::{
Expand All @@ -12,7 +16,7 @@ use bevy_ecs::{
reflect::ReflectComponent,
system::{QuerySet, Res},
};
use bevy_math::{Mat4, UVec2, Vec2, Vec3};
use bevy_math::{Mat4, UVec2, Vec2, Vec3, Vec4};
use bevy_reflect::{Reflect, ReflectDeserialize};
use bevy_transform::components::GlobalTransform;
use bevy_utils::HashSet;
Expand Down Expand Up @@ -138,6 +142,74 @@ impl Camera {
None
}
}

/// Given a position in screen space, compute the world-space line that corresponds to it.
pub fn screen_to_world_ray(
&self,
pos_screen: Vec2,
windows: &Windows,
images: &Assets<Image>,
camera_transform: &GlobalTransform,
) -> Line {
let camera_position = camera_transform.compute_matrix();
let window_size = self.target.get_logical_size(windows, images).unwrap();
let projection_matrix = self.projection_matrix;

// Normalized device coordinate cursor position from (-1, -1, -1) to (1, 1, 1)
let cursor_ndc = (pos_screen / window_size) * 2.0 - Vec2::from([1.0, 1.0]);
let cursor_pos_ndc_near: Vec3 = cursor_ndc.extend(-1.0);
let cursor_pos_ndc_far: Vec3 = cursor_ndc.extend(1.0);

// Use near and far ndc points to generate a ray in world space
// This method is more robust than using the location of the camera as the start of
// the ray, because ortho cameras have a focal point at infinity!
let ndc_to_world: Mat4 = camera_position * projection_matrix.inverse();
let cursor_pos_near: Vec3 = ndc_to_world.project_point3(cursor_pos_ndc_near);
let cursor_pos_far: Vec3 = ndc_to_world.project_point3(cursor_pos_ndc_far);
let ray_direction = cursor_pos_far - cursor_pos_near;
Line::from_point_direction(cursor_pos_near, ray_direction)
}

/// Given a position in screen space and a plane in world space, compute what point on the plane the point in screen space corresponds to.
/// In 2D, use `screen_to_point_2d`.
pub fn screen_to_point_on_plane(
&self,
pos_screen: Vec2,
plane: Plane,
windows: &Windows,
images: &Assets<Image>,
camera_transform: &GlobalTransform,
) -> Option<Vec3> {
let world_ray = self.screen_to_world_ray(pos_screen, windows, images, camera_transform);
let d = world_ray.point.dot(plane.normal());
if d == 0. {
None
} else {
let diff = world_ray.point.extend(1.0) - plane.normal_d();
let p = diff.dot(plane.normal_d());
let dist = p / d;
Some(world_ray.point - world_ray.direction * dist)
}
}

/// Computes the world position for a given screen position.
/// The output will always be on the XY plane with Z at zero. It is designed for 2D, but also works with a 3D camera.
/// For more flexibility in 3D, consider `screen_to_point_on_plane`.
pub fn screen_to_point_2d(
&self,
pos_screen: Vec2,
windows: &Windows,
images: &Assets<Image>,
camera_transform: &GlobalTransform,
) -> Option<Vec3> {
self.screen_to_point_on_plane(
pos_screen,
Plane::new(Vec4::new(0., 0., 1., 0.)),
windows,
images,
camera_transform,
)
}
}

#[allow(clippy::type_complexity)]
Expand Down
12 changes: 12 additions & 0 deletions crates/bevy_render/src/primitives/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,18 @@ impl CubemapFrusta {
}
}

#[derive(Clone, Copy, Debug, Default)]
pub struct Line {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we should provide the result as a pair of Vec3's, and not a Line struct.

  1. Based on my previous contributions, cart's preference has been to not wrap in geometric structs
  2. We have a geometric primitive RFC that this may conflict with.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have very similar code to what's here, and I use a Ray<Vec3>:

pub struct Ray<V> {
    pub origin: V,
    pub direction: V,
}

A ray is useful, because the math is simpler for intersections, and the equation is self.origin + t * self.direction. I use them often. It's also useful to implement Mul<Ray<Vec3>, Output=Ray<Vec3>> for transforms.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The Shapes RFC also has rays.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Agreed a ray is more apt here. My opinion is we defer adding primitives to the RFC impl, especially considering we don't have cart's final blessing.

pub point: Vec3,
pub direction: Vec3,
}

impl Line {
pub fn from_point_direction(point: Vec3, direction: Vec3) -> Self {
Self { point, direction }
}
}

#[cfg(test)]
mod tests {
use super::*;
Expand Down
5 changes: 2 additions & 3 deletions examples/2d/mesh2d_manual.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,8 @@ fn star(
// Set the position attribute
star.insert_attribute(Mesh::ATTRIBUTE_POSITION, v_pos);
// And a RGB color attribute as well
let mut v_color: Vec<u32> = vec![Color::BLACK.as_linear_rgba_u32()];
v_color.extend_from_slice(&[Color::YELLOW.as_linear_rgba_u32(); 10]);
let mut v_color: Vec<u32> = vec![bytemuck::cast([0_u8, 0_u8, 0_u8, 255_u8])];
v_color.extend_from_slice(&[bytemuck::cast([255_u8, 255_u8, 0_u8, 255_u8]); 10]);
star.insert_attribute(Mesh::ATTRIBUTE_COLOR, v_color);

// Now, we specify the indices of the vertex that are going to compose the
Expand Down Expand Up @@ -230,7 +230,6 @@ fn vertex(vertex: Vertex) -> VertexOutput {
var out: VertexOutput;
// Project the world position of the mesh into screen position
out.clip_position = view.view_proj * mesh.model * vec4<f32>(vertex.position, 1.0);
// Unpack the `u32` from the vertex buffer into the `vec4<f32>` used by the fragment shader
out.color = vec4<f32>((vec4<u32>(vertex.color) >> vec4<u32>(0u, 8u, 16u, 24u)) & vec4<u32>(255u)) / 255.0;
return out;
}
Expand Down
43 changes: 43 additions & 0 deletions examples/2d/mouse_tracking.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
use bevy::{prelude::*, render::camera::Camera};

fn main() {
App::new()
.add_plugins(DefaultPlugins)
.add_startup_system(setup)
.add_system(follow)
.run();
}

#[derive(Component)]
struct Follow;

fn setup(mut commands: Commands, asset_server: Res<AssetServer>) {
let texture_handle = asset_server.load("branding/icon.png");
commands.spawn_bundle(OrthographicCameraBundle::new_2d());
commands
.spawn_bundle(SpriteBundle {
texture: texture_handle,
..Default::default()
})
.insert(Follow);
}

fn follow(
mut q: Query<&mut Transform, With<Follow>>,
q_camera: Query<(&Camera, &GlobalTransform)>,
windows: Res<Windows>,
images: Res<Assets<Image>>,
mut evr_cursor: EventReader<CursorMoved>,
) {
let (camera, camera_transform) = q_camera.single();
if let Some(cursor) = evr_cursor.iter().next() {
for mut transform in q.iter_mut() {
let point: Option<Vec3> =
camera.screen_to_point_2d(cursor.position, &windows, &images, camera_transform);
println!("Point {:?}", point);
if let Some(point) = point {
transform.translation = point;
}
}
}
}
71 changes: 71 additions & 0 deletions examples/3d/screen_to_world.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
use bevy::{prelude::*, render::camera::Camera, render::primitives::Plane};

fn main() {
App::new()
.insert_resource(Msaa { samples: 4 })
.add_plugins(DefaultPlugins)
.add_startup_system(setup)
.add_system(follow)
.run();
}

#[derive(Component)]
struct Follow;

/// set up a simple 3D scene
fn setup(
mut commands: Commands,
mut meshes: ResMut<Assets<Mesh>>,
mut materials: ResMut<Assets<StandardMaterial>>,
) {
// plane
commands.spawn_bundle(PbrBundle {
mesh: meshes.add(Mesh::from(shape::Plane { size: 5.0 })),
material: materials.add(Color::rgb(0.3, 0.5, 0.3).into()),
..Default::default()
});
// cube
commands
.spawn_bundle(PbrBundle {
mesh: meshes.add(Mesh::from(shape::Cube { size: 1.0 })),
material: materials.add(Color::rgb(0.8, 0.7, 0.6).into()),
transform: Transform::from_xyz(0.0, 0.5, 0.0),
..Default::default()
})
.insert(Follow);
// light
commands.spawn_bundle(PointLightBundle {
transform: Transform::from_xyz(4.0, 8.0, 4.0),
..Default::default()
});
// camera
commands.spawn_bundle(PerspectiveCameraBundle {
transform: Transform::from_xyz(-2.0, 2.5, 5.0).looking_at(Vec3::ZERO, Vec3::Y),
..Default::default()
});
}

fn follow(
mut q: Query<&mut Transform, With<Follow>>,
q_camera: Query<(&Camera, &GlobalTransform)>,
windows: Res<Windows>,
images: Res<Assets<Image>>,
mut evr_cursor: EventReader<CursorMoved>,
) {
// Assumes there is at least one camera
let (camera, camera_transform) = q_camera.iter().next().unwrap();
if let Some(cursor) = evr_cursor.iter().next() {
for mut transform in q.iter_mut() {
let point: Option<Vec3> = camera.screen_to_point_on_plane(
cursor.position,
Plane::new(Vec4::new(0., 1., 0., 1.)),
&windows,
&images,
camera_transform,
);
if let Some(point) = point {
transform.translation = point + Vec3::new(0., 0.5, 0.);
}
}
}
}