Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding "numpy" feature to make numpy & npz optional #241

Merged
merged 6 commits into from
Oct 15, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,19 +21,20 @@ keywords = [
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

[package.metadata.docs.rs]
features = ["nightly"]
features = ["nightly", "numpy"]

[dependencies]
rand = { version = "0.8.5", features = ["std_rng"] }
rand_distr = { version = "0.4.3", features = [] }
matrixmultiply = { version = "0.3.2", features = [] }
zip = { version = "0.6.2", features = [] }
zip = { version = "0.6.2", features = [], optional = true }
cblas-sys = { version = "0.1.4", optional = true }
libc = { version = "0.2", optional = true }

[features]
default = []
default = ["numpy"]
nightly = []
numpy = ["dep:zip"]
cblas = ["dep:cblas-sys", "dep:libc"]
intel-mkl = ["cblas"]

Expand Down
1 change: 1 addition & 0 deletions examples/06-mnist.rs
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,7 @@ fn main() {
}

// save our model to a .npz file
#[cfg(feature = "numpy")]
model
.save("mnist-classifier.npz")
.expect("failed to save model");
Expand Down
10 changes: 8 additions & 2 deletions examples/numpy-save-load.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
//! Demonstrates how to use dfdx::numpy to save and load arrays

use dfdx::numpy as np;

#[cfg(feature = "numpy")]
fn main() {
use dfdx::numpy as np;

np::save("0d-rs.npy", &1.234).expect("Saving failed");
np::save("1d-rs.npy", &[1.0, 2.0, 3.0]).expect("Saving failed");
np::save("2d-rs.npy", &[[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]]).expect("Saving failed");
Expand All @@ -19,3 +20,8 @@ fn main() {
np::load("2d-rs.npy", &mut expected_2d).expect("Loading failed");
assert_eq!(expected_2d, [[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]]);
}

#[cfg(not(feature = "numpy"))]
fn main() {
panic!("Use the 'numpy' feature to run this example");
}
9 changes: 9 additions & 0 deletions src/feature_flags.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,15 @@
//!
//! `build.rs` will fail helpfully if you don't have the correct path/environment variables.
//!
//! # "numpy"
//!
//! Enables saving and loading arrays to .npy files, and saving and loading nn to .npz files.
//!
//! Example:
//! ```toml
//! dfdx = { version = "...", features = ["numpy"] }
//! ```
//!
//! # "nightly"
//!
//! Enables using all features that currently require the nightly rust compiler.
Expand Down
1 change: 1 addition & 0 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@ pub mod feature_flags;
pub mod gradients;
pub mod losses;
pub mod nn;
#[cfg(feature = "numpy")]
pub mod numpy;
pub mod optim;
pub mod tensor;
Expand Down
6 changes: 0 additions & 6 deletions src/nn/activations.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,6 @@ macro_rules! activation_impls {
fn reset_params<R: Rng>(&mut self, _: &mut R) {}
}

impl SaveToNpz for $struct_name {}
impl LoadFromNpz for $struct_name {}

impl<T: Tensor<Dtype = f32>> Module<T> for $struct_name {
type Output = T;
fn forward(&self, input: T) -> Self::Output {
Expand Down Expand Up @@ -66,9 +63,6 @@ impl ResetParams for Softmax {
fn reset_params<R: Rng>(&mut self, _: &mut R) {}
}

impl SaveToNpz for Softmax {}
impl LoadFromNpz for Softmax {}

impl<T> Module<T> for Softmax
where
T: Reduce<<<T as HasArrayType>::Array as HasLastAxis>::LastAxis>,
Expand Down
58 changes: 1 addition & 57 deletions src/nn/batchnorm2d.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,7 @@
use super::{npz_fread, npz_fwrite, LoadFromNpz, NpzError, SaveToNpz};
use super::{Module, ModuleMut, ResetParams};
use crate::arrays::{HasArrayData, HasAxes};
use crate::devices::{Cpu, FillElements};
use crate::{gradients::*, tensor::*, tensor_ops::*};
use std::io::{Read, Seek, Write};
use zip::{result::ZipResult, ZipArchive};

/// Batch normalization for images as described in
/// [Batch Normalization: Accelerating Deep Network Training
Expand Down Expand Up @@ -191,34 +188,11 @@ impl<const C: usize> CanUpdateWithGradients for BatchNorm2D<C> {
}
}

impl<const C: usize> SaveToNpz for BatchNorm2D<C> {
fn write<W: Write + Seek>(&self, p: &str, w: &mut zip::ZipWriter<W>) -> ZipResult<()> {
npz_fwrite(w, format!("{p}scale.npy"), self.scale.data())?;
npz_fwrite(w, format!("{p}bias.npy"), self.bias.data())?;
npz_fwrite(w, format!("{p}running_mean.npy"), self.running_mean.data())?;
npz_fwrite(w, format!("{p}running_var.npy"), self.running_var.data())?;
Ok(())
}
}

impl<const C: usize> LoadFromNpz for BatchNorm2D<C> {
fn read<R: Read + Seek>(&mut self, p: &str, r: &mut ZipArchive<R>) -> Result<(), NpzError> {
npz_fread(r, format!("{p}scale.npy"), self.scale.mut_data())?;
npz_fread(r, format!("{p}bias.npy"), self.bias.mut_data())?;
let mean = self.running_mean.mut_data();
npz_fread(r, format!("{p}running_mean.npy"), mean)?;
let var = self.running_var.mut_data();
npz_fread(r, format!("{p}running_var.npy"), var)?;
Ok(())
}
}

#[cfg(test)]
mod tests {
use super::*;
use crate::{nn::tests::SimpleGradients, tests::assert_close};
use crate::tests::assert_close;
use rand::{rngs::StdRng, SeedableRng};
use tempfile::NamedTempFile;

#[test]
fn test_batchnorm2d_3d_forward_mut() {
Expand Down Expand Up @@ -322,34 +296,4 @@ mod tests {
],
);
}

#[test]
fn test_batchnorm2d_save_load() {
let mut rng = StdRng::seed_from_u64(13);
let mut bn: BatchNorm2D<3> = Default::default();

assert_eq!(bn.running_mean.data(), &[0.0; 3]);
assert_eq!(bn.running_var.data(), &[1.0; 3]);
assert_eq!(bn.scale.data(), &[1.0; 3]);
assert_eq!(bn.bias.data(), &[0.0; 3]);

let x1: Tensor3D<3, 4, 5> = TensorCreator::randn(&mut rng);
let g = backward(bn.forward_mut(x1.trace()).exp().mean());
bn.update(&mut SimpleGradients(g), &mut Default::default());

assert_ne!(bn.running_mean.data(), &[0.0; 3]);
assert_ne!(bn.running_var.data(), &[1.0; 3]);
assert_ne!(bn.scale.data(), &[1.0; 3]);
assert_ne!(bn.bias.data(), &[0.0; 3]);

let file = NamedTempFile::new().expect("failed to create tempfile");
assert!(bn.save(file.path().to_str().unwrap()).is_ok());

let mut loaded: BatchNorm2D<3> = Default::default();
assert!(loaded.load(file.path().to_str().unwrap()).is_ok());
assert_eq!(loaded.scale.data(), bn.scale.data());
assert_eq!(loaded.bias.data(), bn.bias.data());
assert_eq!(loaded.running_mean.data(), bn.running_mean.data());
assert_eq!(loaded.running_var.data(), bn.running_var.data());
}
}
69 changes: 0 additions & 69 deletions src/nn/conv.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@ use crate::gradients::{CanUpdateWithGradients, GradientProvider, Tape, UnusedTen
use crate::prelude::*;
use rand::Rng;
use rand_distr::Uniform;
use std::io::{Read, Seek, Write};
use zip::{result::ZipResult, ZipArchive, ZipWriter};

/// **Requires Nightly** Performs 2d convolutions on 3d and 4d images.
///
Expand Down Expand Up @@ -57,30 +55,6 @@ impl<const I: usize, const O: usize, const K: usize, const S: usize, const P: us
}
}

impl<const I: usize, const O: usize, const K: usize, const S: usize, const P: usize> SaveToNpz
for Conv2D<I, O, K, S, P>
{
/// Saves [Self::weight] to `{pre}weight.npy` and [Self::bias] to `{pre}bias.npy`
/// using [npz_fwrite()].
fn write<W: Write + Seek>(&self, pre: &str, w: &mut ZipWriter<W>) -> ZipResult<()> {
npz_fwrite(w, format!("{pre}weight.npy"), self.weight.data())?;
npz_fwrite(w, format!("{pre}bias.npy"), self.bias.data())?;
Ok(())
}
}

impl<const I: usize, const O: usize, const K: usize, const S: usize, const P: usize> LoadFromNpz
for Conv2D<I, O, K, S, P>
{
/// Reads [Self::weight] from `{pre}weight.npy` and [Self::bias] from `{pre}bias.npy`
/// using [npz_fread()].
fn read<R: Read + Seek>(&mut self, pre: &str, r: &mut ZipArchive<R>) -> Result<(), NpzError> {
npz_fread(r, format!("{pre}weight.npy"), self.weight.mut_data())?;
npz_fread(r, format!("{pre}bias.npy"), self.bias.mut_data())?;
Ok(())
}
}

impl<
T: Tape,
const I: usize,
Expand Down Expand Up @@ -139,8 +113,6 @@ where
mod tests {
use super::*;
use rand::thread_rng;
use std::fs::File;
use tempfile::NamedTempFile;

#[test]
fn test_forward_3d_sizes() {
Expand Down Expand Up @@ -187,47 +159,6 @@ mod tests {
let _: Tensor3D<1, 8, 8> = <(A, B, C)>::default().forward_mut(Img::zeros());
}

#[test]
fn test_save_conv2d() {
let model: Conv2D<2, 4, 3> = Default::default();
let file = NamedTempFile::new().expect("failed to create tempfile");
model
.save(file.path().to_str().unwrap())
.expect("failed to save model");
let f = File::open(file.path()).expect("failed to open resulting file");
let mut zip = ZipArchive::new(f).expect("failed to create zip archive from file");
{
let weight_file = zip
.by_name("weight.npy")
.expect("failed to find weight.npy file");
assert!(weight_file.size() > 0);
}
{
let bias_file = zip
.by_name("bias.npy")
.expect("failed to find bias.npy file");
assert!(bias_file.size() > 0);
}
}

#[test]
fn test_load_conv() {
let mut rng = thread_rng();
let mut saved_model: Conv2D<2, 4, 3> = Default::default();
saved_model.reset_params(&mut rng);

let file = NamedTempFile::new().expect("failed to create tempfile");
assert!(saved_model.save(file.path().to_str().unwrap()).is_ok());

let mut loaded_model: Conv2D<2, 4, 3> = Default::default();
assert!(loaded_model.weight.data() != saved_model.weight.data());
assert!(loaded_model.bias.data() != saved_model.bias.data());

assert!(loaded_model.load(file.path().to_str().unwrap()).is_ok());
assert_eq!(loaded_model.weight.data(), saved_model.weight.data());
assert_eq!(loaded_model.bias.data(), saved_model.bias.data());
}

#[test]
fn test_conv_with_optimizer() {
let mut rng = thread_rng();
Expand Down
6 changes: 0 additions & 6 deletions src/nn/dropout.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,9 +68,6 @@ impl<const N: usize> ResetParams for DropoutOneIn<N> {
fn reset_params<R: Rng>(&mut self, _: &mut R) {}
}

impl<const N: usize> SaveToNpz for DropoutOneIn<N> {}
impl<const N: usize> LoadFromNpz for DropoutOneIn<N> {}

impl<const N: usize, T: Tensor<Dtype = f32, Tape = NoneTape>> Module<T> for DropoutOneIn<N> {
type Output = T;
/// Does nothing
Expand Down Expand Up @@ -166,9 +163,6 @@ impl ResetParams for Dropout {
fn reset_params<R: rand::Rng>(&mut self, _: &mut R) {}
}

impl SaveToNpz for Dropout {}
impl LoadFromNpz for Dropout {}

impl<T: Tensor<Dtype = f32, Tape = NoneTape>> Module<T> for Dropout {
type Output = T;
/// Does nothing.
Expand Down
3 changes: 0 additions & 3 deletions src/nn/flatten.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,6 @@ impl CanUpdateWithGradients for FlattenImage {
fn update<G: GradientProvider>(&mut self, _: &mut G, _: &mut UnusedTensors) {}
}

impl SaveToNpz for FlattenImage {}
impl LoadFromNpz for FlattenImage {}

impl<const M: usize, const N: usize, const O: usize, H: Tape> Module<Tensor3D<M, N, O, H>>
for FlattenImage
where
Expand Down
41 changes: 0 additions & 41 deletions src/nn/generalized_residual.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
use crate::gradients::{CanUpdateWithGradients, GradientProvider, UnusedTensors};
use crate::prelude::*;
use std::io::{Read, Seek, Write};
use zip::{result::ZipResult, ZipArchive, ZipWriter};

/// A residual connection `R` around `F`: `F(x) + R(x)`,
/// as introduced in [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385).
Expand Down Expand Up @@ -89,28 +87,11 @@ where
}
}

impl<F: SaveToNpz, R: SaveToNpz> SaveToNpz for GeneralizedResidual<F, R> {
/// Pass through to `F`/`R`'s [SaveToNpz].
fn write<W: Write + Seek>(&self, p: &str, w: &mut ZipWriter<W>) -> ZipResult<()> {
self.f.write(&format!("{p}.f"), w)?;
self.r.write(&format!("{p}.r"), w)
}
}

impl<F: LoadFromNpz, R: LoadFromNpz> LoadFromNpz for GeneralizedResidual<F, R> {
/// Pass through to `F`/`R`'s [LoadFromNpz].
fn read<Z: Read + Seek>(&mut self, p: &str, r: &mut ZipArchive<Z>) -> Result<(), NpzError> {
self.f.read(&format!("{p}.f"), r)?;
self.r.read(&format!("{p}.r"), r)
}
}

#[cfg(test)]
mod tests {
use super::*;
use crate::tests::assert_close;
use rand::{prelude::StdRng, SeedableRng};
use tempfile::NamedTempFile;

#[test]
fn test_reset_generalized_residual() {
Expand Down Expand Up @@ -148,26 +129,4 @@ mod tests {
assert_close(g.ref_gradient(&model.r.weight), &[[-0.025407, 0.155879]; 2]);
assert_close(g.ref_gradient(&model.r.bias), &[0.5; 2]);
}

#[test]
fn test_save_load_generalized_residual() {
let mut rng = StdRng::seed_from_u64(0);
let mut saved_model: GeneralizedResidual<Linear<5, 3>, Linear<5, 3>> = Default::default();
saved_model.reset_params(&mut rng);

let file = NamedTempFile::new().expect("failed to create tempfile");
assert!(saved_model.save(file.path().to_str().unwrap()).is_ok());

let mut loaded_model: GeneralizedResidual<Linear<5, 3>, Linear<5, 3>> = Default::default();
assert_ne!(loaded_model.f.weight.data(), saved_model.f.weight.data());
assert_ne!(loaded_model.f.bias.data(), saved_model.f.bias.data());
assert_ne!(loaded_model.r.weight.data(), saved_model.r.weight.data());
assert_ne!(loaded_model.r.bias.data(), saved_model.r.bias.data());

assert!(loaded_model.load(file.path().to_str().unwrap()).is_ok());
assert_eq!(loaded_model.f.weight.data(), saved_model.f.weight.data());
assert_eq!(loaded_model.f.bias.data(), saved_model.f.bias.data());
assert_eq!(loaded_model.r.weight.data(), saved_model.r.weight.data());
assert_eq!(loaded_model.r.bias.data(), saved_model.r.bias.data());
}
}
Loading