Skip to content

Commit

Permalink
Satisfy cargo fmt
Browse files Browse the repository at this point in the history
  • Loading branch information
TimerErTim committed Feb 15, 2023
1 parent 33a4329 commit 42dab6d
Show file tree
Hide file tree
Showing 11 changed files with 27 additions and 20 deletions.
1 change: 0 additions & 1 deletion examples/01-tensor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ type Device = dfdx::tensor::Cpu;
#[cfg(feature = "cuda")]
type Device = dfdx::tensor::Cuda;


fn main() {
// a device is required to create & modify tensors.
// we will use the Cpu device here for simplicity
Expand Down
1 change: 0 additions & 1 deletion examples/02-ops.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ type Device = Cpu;
#[cfg(feature = "cuda")]
type Device = dfdx::tensor::Cuda;


fn main() {
let dev = Device::default();

Expand Down
1 change: 0 additions & 1 deletion examples/03-nn.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ type Device = dfdx::tensor::Cpu;
#[cfg(feature = "cuda")]
type Device = dfdx::tensor::Cuda;


fn main() {
let dev = Device::default();

Expand Down
1 change: 0 additions & 1 deletion examples/05-optim.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ type Device = dfdx::tensor::Cpu;
#[cfg(feature = "cuda")]
type Device = dfdx::tensor::Cuda;


// first let's declare our neural network to optimze
type Mlp = (
(Linear<5, 32>, ReLU),
Expand Down
18 changes: 11 additions & 7 deletions examples/07-custom-module.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
use dfdx::{
gradients::Tape,
nn::{
BuildModule,
Module, modules::{Linear, ReLU},
modules::{Linear, ReLU},
BuildModule, Module,
},
shapes::{Rank1, Rank2},
tensor::{HasErr, SampleTensor, Tensor},
Expand All @@ -16,7 +16,6 @@ type Device = dfdx::tensor::Cpu;
#[cfg(feature = "cuda")]
type Device = dfdx::tensor::Cuda;


/// Custom model struct
/// This case is trivial and should be done with a tuple of linears and relus,
/// but it demonstrates how to build models with custom behavior
Expand All @@ -28,7 +27,7 @@ struct Mlp<const IN: usize, const INNER: usize, const OUT: usize> {

// BuildModule lets you randomize a model's parameters
impl<const IN: usize, const INNER: usize, const OUT: usize> BuildModule<Device, f32>
for Mlp<IN, INNER, OUT>
for Mlp<IN, INNER, OUT>
{
fn try_build(device: &Device) -> Result<Self, <Device as HasErr>::Err> {
Ok(Self {
Expand All @@ -41,7 +40,7 @@ for Mlp<IN, INNER, OUT>

// impl Module for single item
impl<const IN: usize, const INNER: usize, const OUT: usize> Module<Tensor<Rank1<IN>, f32, Device>>
for Mlp<IN, INNER, OUT>
for Mlp<IN, INNER, OUT>
{
type Output = Tensor<Rank1<OUT>, f32, Device>;

Expand All @@ -53,8 +52,13 @@ for Mlp<IN, INNER, OUT>
}

// impl Module for batch of items
impl<const BATCH: usize, const IN: usize, const INNER: usize, const OUT: usize, T: Tape<Device>>
Module<Tensor<Rank2<BATCH, IN>, f32, Device, T>> for Mlp<IN, INNER, OUT>
impl<
const BATCH: usize,
const IN: usize,
const INNER: usize,
const OUT: usize,
T: Tape<Device>,
> Module<Tensor<Rank2<BATCH, IN>, f32, Device, T>> for Mlp<IN, INNER, OUT>
{
type Output = Tensor<Rank2<BATCH, OUT>, f32, Device, T>;

Expand Down
1 change: 0 additions & 1 deletion examples/08-tensor-broadcast-reduce.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ type Device = dfdx::tensor::Cpu;
#[cfg(feature = "cuda")]
type Device = dfdx::tensor::Cuda;


fn main() {
let dev = Device::default();
let a = dev.tensor([1.0f32, 2.0, 3.0]);
Expand Down
1 change: 0 additions & 1 deletion examples/09-tensor-permute.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ type Device = dfdx::tensor::Cpu;
#[cfg(feature = "cuda")]
type Device = dfdx::tensor::Cuda;


fn main() {
let dev = Device::default();

Expand Down
1 change: 0 additions & 1 deletion examples/10-tensor-index.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ type Device = dfdx::tensor::Cpu;
#[cfg(feature = "cuda")]
type Device = dfdx::tensor::Cuda;


fn main() {
let dev = Device::default();

Expand Down
6 changes: 4 additions & 2 deletions examples/nightly-resnet18.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@ fn main() {
let dev = Device::default();
let m = dev.build_module::<Resnet18<1000>, f32>();


let x: Tensor<Rank3<3, 224, 224>, f32, _> = dev.sample_normal();
let start = Instant::now();
for _ in 0..PROBES {
Expand All @@ -64,7 +63,10 @@ fn main() {
for _ in 0..PROBES {
let _y = m.forward(x.clone());
}
println!("Average batched (16) forward: {:?}", start.elapsed() / PROBES);
println!(
"Average batched (16) forward: {:?}",
start.elapsed() / PROBES
);
}

#[cfg(not(feature = "nightly"))]
Expand Down
8 changes: 6 additions & 2 deletions examples/rl-dqn.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ type Device = Cpu;
#[cfg(feature = "cuda")]
type Device = Cuda;


const BATCH: usize = 64;
const STATE: usize = 4;
const ACTION: usize = 2;
Expand Down Expand Up @@ -79,6 +78,11 @@ fn main() {
}
target_q_net.clone_from(&q_net);

println!("Epoch {} in {:?}: q loss={:#.3}", epoch + 1, start.elapsed(), total_loss / 20.0);
println!(
"Epoch {} in {:?}: q loss={:#.3}",
epoch + 1,
start.elapsed(),
total_loss / 20.0
);
}
}
8 changes: 6 additions & 2 deletions examples/rl-ppo.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ type Device = Cpu;
#[cfg(feature = "cuda")]
type Device = Cuda;


const BATCH: usize = 64;
const STATE: usize = 4;
const ACTION: usize = 2;
Expand Down Expand Up @@ -82,6 +81,11 @@ fn main() {
}
target_pi_net.clone_from(&pi_net);

println!("Epoch {} in {:?}: loss={:#.3}", epoch + 1, start.elapsed(), total_loss / 20.0);
println!(
"Epoch {} in {:?}: loss={:#.3}",
epoch + 1,
start.elapsed(),
total_loss / 20.0
);
}
}

0 comments on commit 42dab6d

Please sign in to comment.