Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Breaking] Reduce things exported in prelude #209

Merged
merged 5 commits into from
Oct 3, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion examples/01-tensor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@

use rand::thread_rng;

use dfdx::tensor::{tensor, HasArrayData, Tensor1D, Tensor2D, Tensor3D, TensorCreator};
use dfdx::arrays::HasArrayData;
use dfdx::tensor::{tensor, Tensor1D, Tensor2D, Tensor3D, TensorCreator};

fn main() {
// easily create tensors using the `tensor` function
Expand Down
3 changes: 2 additions & 1 deletion examples/02-ops.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@

use rand::prelude::*;

use dfdx::tensor::{HasArrayData, Tensor0D, Tensor2D, TensorCreator};
use dfdx::arrays::HasArrayData;
use dfdx::tensor::{Tensor0D, Tensor2D, TensorCreator};
use dfdx::tensor_ops::add;

fn main() {
Expand Down
3 changes: 2 additions & 1 deletion examples/05-optim.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,12 @@

use rand::prelude::*;

use dfdx::arrays::HasArrayData;
use dfdx::gradients::{Gradients, OwnedTape};
use dfdx::losses::mse_loss;
use dfdx::nn::{Linear, Module, ReLU, ResetParams, Tanh};
use dfdx::optim::{Momentum, Optimizer, Sgd, SgdConfig};
use dfdx::tensor::{HasArrayData, Tensor2D, TensorCreator};
use dfdx::tensor::{Tensor2D, TensorCreator};

// first let's declare our neural network to optimze
type Mlp = (
Expand Down
1 change: 1 addition & 0 deletions examples/06-mnist.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
//! to build a neural network that learns to recognize
//! the MNIST digits.

use dfdx::data::SubsetIterator;
use dfdx::prelude::*;
use indicatif::ProgressBar;
use mnist::*;
Expand Down
4 changes: 2 additions & 2 deletions examples/08-tensor-broadcast-reduce.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
//! Demonstrates broadcasting tensors to different sizes, and axis reductions
//! with BroadcastTo and ReduceTo

use dfdx::arrays::Axis;
use dfdx::tensor::{tensor, HasArrayData, Tensor1D, Tensor2D, Tensor4D};
use dfdx::arrays::{Axis, HasArrayData};
use dfdx::tensor::{tensor, Tensor1D, Tensor2D, Tensor4D};
use dfdx::tensor_ops::BroadcastTo;

fn main() {
Expand Down
3 changes: 2 additions & 1 deletion examples/10-tensor-index.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
//! Demonstrates how to select sub tensors (index) from tensors

use dfdx::tensor::{tensor, HasArrayData, Tensor2D, Tensor3D};
use dfdx::arrays::HasArrayData;
use dfdx::tensor::{tensor, Tensor2D, Tensor3D};
use dfdx::tensor_ops::Select1;

fn main() {
Expand Down
6 changes: 6 additions & 0 deletions src/arrays.rs
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,12 @@ pub trait HasArrayType {
+ HasLastAxis;
}

/// Something that has [HasArrayType], and also can return a reference to or mutate `Self::Array`.
pub trait HasArrayData: HasArrayType {
fn data(&self) -> &Self::Array;
fn mut_data(&mut self) -> &mut Self::Array;
}

#[cfg(test)]
mod tests {
use super::*;
Expand Down
15 changes: 8 additions & 7 deletions src/data.rs
Original file line number Diff line number Diff line change
@@ -1,19 +1,21 @@
//! A collection of data utility classes such as [one_hot_encode()] and [SubsetIterator].

use crate::prelude::*;
use rand::prelude::SliceRandom;

use crate::arrays::HasArrayData;
use crate::tensor::{Tensor1D, Tensor2D, TensorCreator};

/// Generates a tensor with ordered data from 0 to `N`.
///
/// Examples:
/// ```rust
/// # use dfdx::prelude::*;
/// # use dfdx::{prelude::*, data::arange};
/// let t = arange::<5>();
/// assert_eq!(t.data(), &[0.0, 1.0, 2.0, 3.0, 4.0]);
/// ```
///
/// ```rust
/// # use dfdx::prelude::*;
/// # use dfdx::{prelude::*, data::arange};
/// let t: Tensor1D<10> = arange();
/// assert_eq!(t.data(), &[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]);
/// ```
Expand All @@ -39,8 +41,7 @@ pub fn arange<const N: usize>() -> Tensor1D<N> {
///
/// Examples:
/// ```rust
/// # use dfdx::prelude::*;
///
/// # use dfdx::{prelude::*, data::one_hot_encode};
/// let class_labels = [0, 1, 2, 1, 1];
/// // NOTE: 5 is the batch size, 3 is the number of classes
/// let probs = one_hot_encode::<5, 3>(&class_labels);
Expand Down Expand Up @@ -68,14 +69,14 @@ pub fn one_hot_encode<const B: usize, const N: usize>(class_labels: &[usize; B])
///
/// Iterating a dataset in order:
/// ```rust
/// # use dfdx::prelude::*;
/// # use dfdx::{prelude::*, data::SubsetIterator};
/// let mut subsets = SubsetIterator::<5>::in_order(100);
/// assert_eq!(subsets.next(), Some([0, 1, 2, 3, 4]));
/// ```
///
/// Iterating a dataset in random order:
/// ```rust
/// # use dfdx::prelude::*;
/// # use dfdx::{prelude::*, data::SubsetIterator};
/// # use rand::prelude::*;
/// let mut rng = StdRng::seed_from_u64(0);
/// let mut subsets = SubsetIterator::<5>::shuffled(100, &mut rng);
Expand Down
2 changes: 1 addition & 1 deletion src/devices/fill.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ where
#[cfg(test)]
mod tests {
use super::*;
use crate::prelude::ZeroElements;
use crate::arrays::ZeroElements;
use rand::{thread_rng, Rng};

#[test]
Expand Down
2 changes: 1 addition & 1 deletion src/devices/foreach.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ use crate::arrays::CountElements;
///
/// Examples:
/// ```rust
/// # use dfdx::prelude::*;
/// # use dfdx::devices::{Cpu, ForEachElement};
/// let mut a = [[0.0; 3]; 2];
/// let b = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]];
/// Cpu::foreach_mr(&mut a, &b, &mut |x, y| {
Expand Down
2 changes: 1 addition & 1 deletion src/devices/permute.rs
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ permutations!([0, 1, 2, 3]);
#[cfg(test)]
mod tests {
use super::*;
use crate::prelude::FillElements;
use crate::devices::FillElements;
use rand::{thread_rng, Rng};

#[test]
Expand Down
15 changes: 10 additions & 5 deletions src/gradients.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
//! Implementations of [GradientTape] and generic Nd array containers via [Gradients].

use crate::prelude::*;
use std::collections::HashMap;

use crate::arrays::HasArrayType;
use crate::devices::{AllocateZeros, HasDevice};
use crate::unique_id::{HasUniqueId, UniqueId};

/// Records gradient computations to execute later.
///
/// The only two things you can do with this are:
Expand Down Expand Up @@ -129,7 +132,7 @@ impl Gradients {
///
/// Examples:
/// ```rust
/// # use dfdx::prelude::*;
/// # use dfdx::{prelude::*, gradients::*};
/// let a = Tensor1D::new([1.0, 2.0, 3.0]);
/// let b: Tensor1D<5> = Tensor1D::zeros();
/// let mut gradients: Gradients = Default::default();
Expand Down Expand Up @@ -183,7 +186,7 @@ impl Gradients {
///
/// Example usage:
/// ```
/// # use dfdx::prelude::*;
/// # use dfdx::{prelude::*, gradients::*};
/// let t = Tensor1D::new([1.0, 2.0, 3.0]);
/// let mut gradients: Gradients = Default::default();
/// *gradients.mut_gradient(&t) = [-4.0, 5.0, -6.0];
Expand All @@ -202,7 +205,7 @@ impl Gradients {
///
/// Example usage:
/// ```
/// # use dfdx::prelude::*;
/// # use dfdx::{prelude::*, gradients::*};
/// let t = Tensor1D::new([1.0, 2.0, 3.0]);
/// let mut gradients: Gradients = Default::default();
/// let g: &mut [f32; 3] = gradients.mut_gradient(&t);
Expand Down Expand Up @@ -231,7 +234,7 @@ impl Gradients {
///
/// # Example usage:
/// ```
/// # use dfdx::prelude::*;
/// # use dfdx::{prelude::*, gradients::*};
/// let t = Tensor1D::new([1.0, 2.0, 3.0]);
/// let mut gradients: Gradients = Default::default();
/// gradients.mut_gradient(&t);
Expand Down Expand Up @@ -303,6 +306,8 @@ impl UnusedTensors {
#[cfg(test)]
mod tests {
use super::*;
use crate::devices::Cpu;
use crate::unique_id::unique_id;

struct Tensor {
id: UniqueId,
Expand Down
14 changes: 5 additions & 9 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@
//!
//! 6. Compute gradients with [crate::tensor_ops::backward()]. See [crate::tensor_ops].
//! ```rust
//! # use dfdx::prelude::*;
//! # use dfdx::{prelude::*, gradients::Gradients};
//! # let mut rng = rand::thread_rng();
//! # let model: Linear<10, 5> = Default::default();
//! # let y_true: Tensor1D<5> = Tensor1D::randn(&mut rng).softmax();
Expand All @@ -76,7 +76,7 @@
//! ```
//! 7. Use an optimizer from [crate::optim] to optimize your network!
//! ```rust
//! # use dfdx::prelude::*;
//! # use dfdx::{prelude::*, gradients::Gradients};
//! # let mut rng = rand::thread_rng();
//! # let mut model: Linear<10, 5> = Default::default();
//! # let x: Tensor1D<10> = Tensor1D::zeros();
Expand Down Expand Up @@ -108,18 +108,14 @@ pub mod unique_id;

/// Contains all public exports.
pub mod prelude {
pub use crate::arrays::*;
pub use crate::data::*;
pub use crate::devices::*;
pub use crate::gradients::*;
pub use crate::arrays::{AllAxes, Axes2, Axes3, Axes4, Axis, HasArrayData};
pub use crate::devices::HasDevice;
pub use crate::gradients::{NoneTape, OwnedTape};
pub use crate::losses::*;
pub use crate::nn::*;
pub use crate::optim::*;
pub use crate::tensor::*;
pub use crate::tensor_ops::*;
pub use crate::unique_id::*;

pub use crate::{Assert, ConstTrue};
}

#[cfg(not(any(
Expand Down
10 changes: 7 additions & 3 deletions src/losses.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
//! Standard loss functions such as [mse_loss()], [cross_entropy_with_logits_loss()], and more.

use crate::prelude::*;
use crate::arrays::{AllAxes, HasArrayType, HasLastAxis};
use crate::tensor::Tensor;
use crate::tensor_ops::{
abs, div_scalar, ln, log_softmax, mean, mul, mul_scalar, negate, sqrt, square, sub, Reduce,
};

/// [Mean Squared Error](https://en.wikipedia.org/wiki/Mean_squared_error).
/// This computes `(&targ - pred).square().mean()`.
Expand Down Expand Up @@ -190,9 +194,9 @@ pub fn binary_cross_entropy_with_logits_loss<T: Reduce<AllAxes>>(

#[cfg(test)]
mod tests {
use crate::tests::assert_close;

use super::*;
use crate::prelude::*;
use crate::tests::assert_close;

#[test]
fn test_mse() {
Expand Down
4 changes: 3 additions & 1 deletion src/nn/activations.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
use crate::arrays::{HasArrayType, HasLastAxis};
use crate::gradients::{CanUpdateWithGradients, GradientProvider, UnusedTensors};
use crate::prelude::*;
use rand::Rng;

Expand Down Expand Up @@ -161,7 +163,7 @@ mod tests {

let t = Tensor2D::new([[-2.0, -1.0, 0.0], [1.0, 2.0, 3.0]]);
let r1 = Softmax.forward(t.clone());
let r2 = t.softmax::<Axis<1>>();
let r2 = t.softmax::<crate::arrays::Axis<1>>();
assert_eq!(r1.data(), r2.data());
}
}
1 change: 1 addition & 0 deletions src/nn/conv.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use crate::gradients::{CanUpdateWithGradients, GradientProvider, Tape, UnusedTensors};
use crate::prelude::*;
use rand::Rng;
use rand_distr::Uniform;
Expand Down
2 changes: 2 additions & 0 deletions src/nn/dropout.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
use crate::gradients::{CanUpdateWithGradients, GradientProvider, UnusedTensors};
use crate::prelude::*;
use crate::unique_id::unique_id;
use rand::{prelude::StdRng, Rng, SeedableRng};
use std::{cell::RefCell, ops::DerefMut};

Expand Down
2 changes: 2 additions & 0 deletions src/nn/flatten.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
use crate::gradients::{CanUpdateWithGradients, GradientProvider, Tape, UnusedTensors};
use crate::prelude::*;
use crate::{Assert, ConstTrue};

/// **Requires Nightly** Flattens 3d tensors to 1d, and 4d tensors to 2d.
///
Expand Down
1 change: 1 addition & 0 deletions src/nn/generalized_residual.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use crate::gradients::{CanUpdateWithGradients, GradientProvider, UnusedTensors};
use crate::prelude::*;

/// A residual connection `R` around `F`: `F(x) + R(x)`,
Expand Down
2 changes: 2 additions & 0 deletions src/nn/impl_module_for_tuples.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use crate::gradients::{CanUpdateWithGradients, GradientProvider, UnusedTensors};
use crate::prelude::*;
use rand::prelude::Rng;
use std::io::{Read, Seek, Write};
Expand Down Expand Up @@ -93,6 +94,7 @@ tuple_impls!([A, B, C, D, E, F] [0, 1, 2, 3, 4, 5], F, [E, D, C, B, A]);
mod tests {
use super::*;
use crate::nn::tests::SimpleGradients;
use crate::unique_id::HasUniqueId;
use rand::{prelude::StdRng, SeedableRng};
use std::fs::File;
use tempfile::NamedTempFile;
Expand Down
8 changes: 6 additions & 2 deletions src/nn/layer_norm.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
use crate::arrays::Axis;
use crate::devices::{Cpu, FillElements};
use crate::gradients::{CanUpdateWithGradients, GradientProvider, Tape, UnusedTensors};
use crate::prelude::*;
use std::io::{Read, Seek, Write};
use zip::{result::ZipResult, ZipArchive};
Expand All @@ -21,8 +24,8 @@ use zip::{result::ZipResult, ZipArchive};
/// ```
#[derive(Debug, Clone)]
pub struct LayerNorm1D<const M: usize> {
pub gamma: Tensor1D<M, NoneTape>,
pub beta: Tensor1D<M, NoneTape>,
pub gamma: Tensor1D<M>,
pub beta: Tensor1D<M>,
pub epsilon: f32,
}

Expand Down Expand Up @@ -125,6 +128,7 @@ impl<const M: usize> LoadFromNpz for LayerNorm1D<M> {
mod tests {
use super::*;
use crate::nn::tests::SimpleGradients;
use crate::unique_id::HasUniqueId;
use rand::{prelude::StdRng, SeedableRng};
use rand_distr::Standard;
use std::fs::File;
Expand Down
6 changes: 4 additions & 2 deletions src/nn/linear.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use crate::gradients::{CanUpdateWithGradients, GradientProvider, Tape, UnusedTensors};
use crate::prelude::*;
use rand::Rng;
use rand_distr::Uniform;
Expand Down Expand Up @@ -25,10 +26,10 @@ use zip::{result::ZipResult, ZipArchive, ZipWriter};
#[derive(Default, Debug, Clone)]
pub struct Linear<const I: usize, const O: usize> {
/// Transposed weight matrix, shape (O, I)
pub weight: Tensor2D<O, I, NoneTape>,
pub weight: Tensor2D<O, I>,

/// Bias vector, shape (O, )
pub bias: Tensor1D<O, NoneTape>,
pub bias: Tensor1D<O>,
}

impl<const I: usize, const O: usize> CanUpdateWithGradients for Linear<I, O> {
Expand Down Expand Up @@ -113,6 +114,7 @@ impl<const B: usize, const S: usize, const I: usize, const O: usize, H: Tape>
#[cfg(test)]
mod tests {
use super::*;
use crate::unique_id::HasUniqueId;
use crate::{nn::tests::SimpleGradients, tests::assert_close};
use rand::{prelude::StdRng, SeedableRng};
use std::fs::File;
Expand Down
Loading