Skip to content

Commit

Permalink
Use more Rust features allowed under REPO_MSRV (#6887)
Browse files Browse the repository at this point in the history
* chore: remove `std::mem::*` imports now unnecessary with `REPO_MSRV`

`std::mem::{size,align}_of{,_val}` was added to `std::prelude` in Rust
1.80; see
[`rust`#123168](rust-lang/rust#123168).

* refactor(benches): s/once_cell::Lazy/std::sync::LazyLock

Weaken our dependence on the `once_cell` crate by using functionality
from `std` instead that was upstreamed from `once_cell`, this time with
what's available in Rust 1.80+.

It's not yet possible to eliminate this dependency entirely, but do what
we can with `REPO_MSRV` for now.

* chore: remove unnecessarily `allow`'d lint rules under `REPO_MSRV`

* chore: migrate easy `allow`s to `expect` under `REPO_MSRV`

Remove or `expect` clear-cut `allow` statements that were easy for me to
figure out.

* chore: `warn` on `clippy::allow_attributes` under `REPO_MSRV`
  • Loading branch information
ErichDonGubler authored Jan 10, 2025
1 parent 450ac2d commit d9cc727
Show file tree
Hide file tree
Showing 51 changed files with 103 additions and 105 deletions.
1 change: 0 additions & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 0 additions & 1 deletion benches/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@ naga = { workspace = true, features = [
"wgsl-out",
] }
nanorand.workspace = true
once_cell.workspace = true
pollster.workspace = true
profiling.workspace = true
rayon.workspace = true
Expand Down
4 changes: 2 additions & 2 deletions benches/benches/bind_groups.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use std::{

use criterion::{criterion_group, Criterion, Throughput};
use nanorand::{Rng, WyRand};
use once_cell::sync::Lazy;
use std::sync::LazyLock;

use crate::DeviceState;

Expand Down Expand Up @@ -60,7 +60,7 @@ impl BindGroupState {
}

fn run_bench(ctx: &mut Criterion) {
let state = Lazy::new(BindGroupState::new);
let state = LazyLock::new(BindGroupState::new);

if !state
.device_state
Expand Down
12 changes: 6 additions & 6 deletions benches/benches/computepass.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@ use std::{

use criterion::{criterion_group, Criterion, Throughput};
use nanorand::{Rng, WyRand};
use once_cell::sync::Lazy;
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use std::sync::LazyLock;

use crate::DeviceState;

Expand Down Expand Up @@ -424,7 +424,7 @@ impl ComputepassState {
}

fn run_bench(ctx: &mut Criterion) {
let state = Lazy::new(ComputepassState::new);
let state = LazyLock::new(ComputepassState::new);

let dispatch_count = dispatch_count();
let dispatch_count_bindless = dispatch_count_bindless();
Expand All @@ -449,7 +449,7 @@ fn run_bench(ctx: &mut Criterion) {
group.bench_function(
format!("{cpasses} computepasses x {dispatch_per_pass} dispatches ({label})"),
|b| {
Lazy::force(&state);
LazyLock::force(&state);

b.iter_custom(|iters| {
profiling::scope!("benchmark invocation");
Expand Down Expand Up @@ -498,7 +498,7 @@ fn run_bench(ctx: &mut Criterion) {
group.bench_function(
format!("{threads} threads x {dispatch_per_pass} dispatch"),
|b| {
Lazy::force(&state);
LazyLock::force(&state);

b.iter_custom(|iters| {
profiling::scope!("benchmark invocation");
Expand Down Expand Up @@ -538,7 +538,7 @@ fn run_bench(ctx: &mut Criterion) {
group.throughput(Throughput::Elements(dispatch_count_bindless as _));

group.bench_function(format!("{dispatch_count_bindless} dispatch"), |b| {
Lazy::force(&state);
LazyLock::force(&state);

b.iter_custom(|iters| {
profiling::scope!("benchmark invocation");
Expand Down Expand Up @@ -579,7 +579,7 @@ fn run_bench(ctx: &mut Criterion) {
texture_count + storage_texture_count + storage_buffer_count
),
|b| {
Lazy::force(&state);
LazyLock::force(&state);

b.iter(|| state.device_state.queue.submit([]));
},
Expand Down
12 changes: 6 additions & 6 deletions benches/benches/renderpass.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@ use std::{

use criterion::{criterion_group, Criterion, Throughput};
use nanorand::{Rng, WyRand};
use once_cell::sync::Lazy;
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use std::sync::LazyLock;

use crate::DeviceState;

Expand Down Expand Up @@ -427,7 +427,7 @@ impl RenderpassState {
}

fn run_bench(ctx: &mut Criterion) {
let state = Lazy::new(RenderpassState::new);
let state = LazyLock::new(RenderpassState::new);

let draw_count = draw_count();
let vertex_buffer_count = draw_count * VERTEX_BUFFERS_PER_DRAW;
Expand All @@ -450,7 +450,7 @@ fn run_bench(ctx: &mut Criterion) {
group.bench_function(
format!("{rpasses} renderpasses x {draws_per_pass} draws ({label})"),
|b| {
Lazy::force(&state);
LazyLock::force(&state);

b.iter_custom(|iters| {
profiling::scope!("benchmark invocation");
Expand Down Expand Up @@ -502,7 +502,7 @@ fn run_bench(ctx: &mut Criterion) {
for threads in [2, 4, 8] {
let draws_per_pass = draw_count / threads;
group.bench_function(format!("{threads} threads x {draws_per_pass} draws"), |b| {
Lazy::force(&state);
LazyLock::force(&state);

b.iter_custom(|iters| {
profiling::scope!("benchmark invocation");
Expand Down Expand Up @@ -541,7 +541,7 @@ fn run_bench(ctx: &mut Criterion) {
group.throughput(Throughput::Elements(draw_count as _));

group.bench_function(format!("{draw_count} draws"), |b| {
Lazy::force(&state);
LazyLock::force(&state);

b.iter_custom(|iters| {
profiling::scope!("benchmark invocation");
Expand Down Expand Up @@ -577,7 +577,7 @@ fn run_bench(ctx: &mut Criterion) {
texture_count + vertex_buffer_count
),
|b| {
Lazy::force(&state);
LazyLock::force(&state);

b.iter(|| state.device_state.queue.submit([]));
},
Expand Down
6 changes: 3 additions & 3 deletions benches/benches/resource_creation.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
use std::time::{Duration, Instant};

use criterion::{criterion_group, Criterion, Throughput};
use once_cell::sync::Lazy;
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use std::sync::LazyLock;

use crate::DeviceState;

fn run_bench(ctx: &mut Criterion) {
let state = Lazy::new(DeviceState::new);
let state = LazyLock::new(DeviceState::new);

const RESOURCES_TO_CREATE: usize = 8;

Expand All @@ -19,7 +19,7 @@ fn run_bench(ctx: &mut Criterion) {
group.bench_function(
format!("{threads} threads x {resources_per_thread} resource"),
|b| {
Lazy::force(&state);
LazyLock::force(&state);

b.iter_custom(|iters| {
profiling::scope!("benchmark invocation");
Expand Down
1 change: 0 additions & 1 deletion examples/src/boids/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
// adapted from https://github.com/austinEng/webgpu-samples/blob/master/src/examples/computeBoids.ts

use nanorand::{Rng, WyRand};
use std::mem::size_of;
use wgpu::util::DeviceExt;

// number of boid particles to simulate
Expand Down
2 changes: 1 addition & 1 deletion examples/src/bunnymark/mod.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use bytemuck::{Pod, Zeroable};
use nanorand::{Rng, WyRand};
use std::{borrow::Cow, mem::size_of};
use std::borrow::Cow;
use wgpu::util::DeviceExt;
use winit::{
event::{ElementState, KeyEvent},
Expand Down
2 changes: 1 addition & 1 deletion examples/src/cube/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use bytemuck::{Pod, Zeroable};
use std::{f32::consts, mem::size_of};
use std::f32::consts;
use wgpu::util::DeviceExt;

#[repr(C)]
Expand Down
3 changes: 1 addition & 2 deletions examples/src/framework.rs
Original file line number Diff line number Diff line change
Expand Up @@ -362,8 +362,7 @@ async fn start<E: Example>(title: &str) {
}

log::info!("Entering event loop...");
// On native this is a result, but on wasm it's a unit type.
#[allow(clippy::let_unit_value)]
#[cfg_attr(target_arch = "wasm32", expect(clippy::let_unit_value))]
let _ = (event_loop_function)(
window_loop.event_loop,
move |event: Event<()>, target: &EventLoopWindowTarget<()>| {
Expand Down
1 change: 0 additions & 1 deletion examples/src/hello/mod.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
/// This example shows how to describe the adapter in use.
async fn run() {
#[cfg_attr(target_arch = "wasm32", allow(unused_variables))]
let adapter = {
let instance = wgpu::Instance::default();
#[cfg(not(target_arch = "wasm32"))]
Expand Down
4 changes: 1 addition & 3 deletions examples/src/hello_compute/mod.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
use std::{mem::size_of_val, str::FromStr};
use std::str::FromStr;
use wgpu::util::DeviceExt;

// Indicates a u32 overflow in an intermediate Collatz value
const OVERFLOW: u32 = 0xffffffff;

#[cfg_attr(test, allow(dead_code))]
async fn run() {
let numbers = if std::env::args().len() <= 2 {
let default = vec![1, 2, 3, 4];
Expand Down Expand Up @@ -32,7 +31,6 @@ async fn run() {
log::info!("Steps: [{}]", disp_steps.join(", "));
}

#[cfg_attr(test, allow(dead_code))]
async fn execute_gpu(numbers: &[u32]) -> Option<Vec<u32>> {
// Instantiates instance of WebGPU
let instance = wgpu::Instance::default();
Expand Down
4 changes: 0 additions & 4 deletions examples/src/hello_synchronization/mod.rs
Original file line number Diff line number Diff line change
@@ -1,14 +1,10 @@
use std::mem::size_of_val;

const ARR_SIZE: usize = 128;

struct ExecuteResults {
patient_workgroup_results: Vec<u32>,
#[cfg_attr(test, allow(unused))]
hasty_workgroup_results: Vec<u32>,
}

#[cfg_attr(test, allow(unused))]
async fn run() {
let instance = wgpu::Instance::default();
let adapter = instance
Expand Down
5 changes: 4 additions & 1 deletion examples/src/hello_triangle/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,10 @@ async fn run(event_loop: EventLoop<()>, window: Window) {

pub fn main() {
let event_loop = EventLoop::new().unwrap();
#[allow(unused_mut)]
#[cfg_attr(
not(target_arch = "wasm32"),
expect(unused_mut, reason = "`wasm32` re-assigns to specify canvas")
)]
let mut builder = winit::window::WindowBuilder::new();
#[cfg(target_arch = "wasm32")]
{
Expand Down
2 changes: 0 additions & 2 deletions examples/src/hello_workgroups/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,6 @@
//!
//! Only parts specific to this example will be commented.
use std::mem::size_of_val;

use wgpu::util::DeviceExt;

async fn run() {
Expand Down
1 change: 1 addition & 0 deletions examples/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#![allow(clippy::arc_with_non_send_sync)] // False positive on wasm
#![warn(clippy::allow_attributes)]

pub mod framework;
pub mod utils;
Expand Down
4 changes: 2 additions & 2 deletions examples/src/main.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
struct ExampleDesc {
name: &'static str,
function: fn(),
#[allow(dead_code)] // isn't used on native
#[cfg_attr(not(target_arch = "wasm32"), expect(dead_code))]
webgl: bool,
#[allow(dead_code)] // isn't used on native
#[cfg_attr(not(target_arch = "wasm32"), expect(dead_code))]
webgpu: bool,
}

Expand Down
2 changes: 1 addition & 1 deletion examples/src/mipmap/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use bytemuck::{Pod, Zeroable};
use std::{f32::consts, mem::size_of};
use std::f32::consts;
use wgpu::util::DeviceExt;

const TEXTURE_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Rgba8UnormSrgb;
Expand Down
4 changes: 2 additions & 2 deletions examples/src/msaa_line/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
//! * Set the primitive_topology to PrimitiveTopology::LineList.
//! * Vertices and Indices describe the two points that make up a line.
use std::{iter, mem::size_of};
use std::iter;

use bytemuck::{Pod, Zeroable};
use wgpu::util::DeviceExt;
Expand Down Expand Up @@ -214,7 +214,7 @@ impl crate::framework::Example for Example {
}
}

#[allow(clippy::single_match)]
#[expect(clippy::single_match)]
fn update(&mut self, event: winit::event::WindowEvent) {
match event {
WindowEvent::KeyboardInput {
Expand Down
6 changes: 5 additions & 1 deletion examples/src/ray_cube_compute/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -118,13 +118,17 @@ impl<F: Future<Output = Option<wgpu::Error>>> Future for ErrorFuture<F> {
}
}

#[allow(dead_code)]
struct Example {
rt_target: wgpu::Texture,
#[expect(dead_code)]
rt_view: wgpu::TextureView,
#[expect(dead_code)]
sampler: wgpu::Sampler,
#[expect(dead_code)]
uniform_buf: wgpu::Buffer,
#[expect(dead_code)]
vertex_buf: wgpu::Buffer,
#[expect(dead_code)]
index_buf: wgpu::Buffer,
tlas_package: wgpu::TlasPackage,
compute_pipeline: wgpu::ComputePipeline,
Expand Down
2 changes: 0 additions & 2 deletions examples/src/repeated_compute/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,6 @@
//! hello-compute example does not such as mapping buffers
//! and why use the async channels.
use std::mem::size_of_val;

const OVERFLOW: u32 = 0xffffffff;

async fn run() {
Expand Down
2 changes: 1 addition & 1 deletion examples/src/shadow/mod.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use std::{f32::consts, iter, mem::size_of, ops::Range, sync::Arc};
use std::{f32::consts, iter, ops::Range, sync::Arc};

use bytemuck::{Pod, Zeroable};
use wgpu::util::{align_to, DeviceExt};
Expand Down
4 changes: 2 additions & 2 deletions examples/src/skybox/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use bytemuck::{Pod, Zeroable};
use std::{f32::consts, mem::size_of};
use std::f32::consts;
use wgpu::{util::DeviceExt, AstcBlock, AstcChannel};

const IMAGE_SIZE: u32 = 256;
Expand Down Expand Up @@ -379,7 +379,7 @@ impl crate::framework::Example for Example {
}
}

#[allow(clippy::single_match)]
#[expect(clippy::single_match)]
fn update(&mut self, event: winit::event::WindowEvent) {
match event {
winit::event::WindowEvent::CursorMoved { position, .. } => {
Expand Down
1 change: 0 additions & 1 deletion examples/src/srgb_blend/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
use bytemuck::{Pod, Zeroable};
use std::mem::size_of;
use wgpu::util::DeviceExt;

#[repr(C)]
Expand Down
1 change: 0 additions & 1 deletion examples/src/stencil_triangles/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
use bytemuck::{Pod, Zeroable};
use std::mem::size_of;
use wgpu::util::DeviceExt;

#[repr(C)]
Expand Down
2 changes: 0 additions & 2 deletions examples/src/storage_texture/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,6 @@
//! A lot of things aren't explained here via comments. See hello-compute and
//! repeated-compute for code that is more thoroughly commented.
use std::mem::size_of_val;

#[cfg(not(target_arch = "wasm32"))]
use crate::utils::output_image_native;
#[cfg(target_arch = "wasm32")]
Expand Down
5 changes: 1 addition & 4 deletions examples/src/texture_arrays/mod.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,5 @@
use bytemuck::{Pod, Zeroable};
use std::{
mem::size_of,
num::{NonZeroU32, NonZeroU64},
};
use std::num::{NonZeroU32, NonZeroU64};
use wgpu::util::DeviceExt;

#[repr(C)]
Expand Down
Loading

0 comments on commit d9cc727

Please sign in to comment.