diff --git a/benches/pq.rs b/benches/pq.rs index 2d87781..d2d5869 100644 --- a/benches/pq.rs +++ b/benches/pq.rs @@ -7,12 +7,12 @@ use rand_distr::Normal; use test::Bencher; use reductive::ndarray_rand::RandomExt; -use reductive::pq::{QuantizeVector, TrainPQ, PQ}; +use reductive::pq::{QuantizeVector, TrainPq, Pq}; #[bench] fn pq_quantize(bencher: &mut Bencher) { let data: Array2 = Array2::random((100, 128), Normal::new(0., 1.).unwrap()); - let pq = PQ::train_pq(16, 4, 10, 1, data.view()).unwrap(); + let pq = Pq::train_pq(16, 4, 10, 1, data.view()).unwrap(); bencher.iter(|| { for v in data.outer_iter() { @@ -24,7 +24,7 @@ fn pq_quantize(bencher: &mut Bencher) { #[bench] fn pq_quantize_batch(bencher: &mut Bencher) { let data: Array2 = Array2::random((100, 128), Normal::new(0., 1.).unwrap()); - let pq = PQ::train_pq(16, 4, 10, 1, data.view()).unwrap(); + let pq = Pq::train_pq(16, 4, 10, 1, data.view()).unwrap(); bencher.iter(|| { let _: Array2 = pq.quantize_batch(data.view()); diff --git a/src/pq/gaussian_opq.rs b/src/pq/gaussian_opq.rs index 55fee85..efcbeec 100644 --- a/src/pq/gaussian_opq.rs +++ b/src/pq/gaussian_opq.rs @@ -6,7 +6,7 @@ use ndarray_linalg::types::Scalar; use num_traits::AsPrimitive; use rand::{CryptoRng, RngCore, SeedableRng}; -use super::{TrainPQ, OPQ, PQ}; +use super::{Pq, TrainPq, OPQ}; /// Optimized product quantizer for Gaussian variables (Ge et al., 2013). /// @@ -23,7 +23,7 @@ use super::{TrainPQ, OPQ, PQ}; /// quantization. pub struct GaussianOPQ; -impl TrainPQ for GaussianOPQ +impl TrainPq for GaussianOPQ where A: Lapack + NdFloat + Scalar + Sum, A::Real: NdFloat, @@ -36,12 +36,12 @@ where n_attempts: usize, instances: ArrayBase, rng: &mut R, - ) -> Result, rand::Error> + ) -> Result, rand::Error> where S: Sync + Data, R: CryptoRng + RngCore + SeedableRng + Send, { - PQ::check_quantizer_invariants( + Pq::check_quantizer_invariants( n_subquantizers, n_subquantizer_bits, n_iterations, @@ -51,7 +51,7 @@ where let projection = OPQ::create_projection_matrix(instances.view(), n_subquantizers); let rx = instances.dot(&projection); - let pq = PQ::train_pq_using( + let pq = Pq::train_pq_using( n_subquantizers, n_subquantizer_bits, n_iterations, @@ -60,7 +60,7 @@ where rng, )?; - Ok(PQ { + Ok(Pq { projection: Some(projection), quantizers: pq.quantizers, }) @@ -77,12 +77,12 @@ mod tests { use super::GaussianOPQ; use crate::linalg::EuclideanDistance; use crate::ndarray_rand::RandomExt; - use crate::pq::{QuantizeVector, ReconstructVector, TrainPQ, PQ}; + use crate::pq::{Pq, QuantizeVector, ReconstructVector, TrainPq}; /// Calculate the average euclidean distances between the the given /// instances and the instances returned by quantizing and then /// reconstructing the instances. - fn avg_euclidean_loss(instances: ArrayView2, quantizer: &PQ) -> f32 { + fn avg_euclidean_loss(instances: ArrayView2, quantizer: &Pq) -> f32 { let mut euclidean_loss = 0f32; let quantized: Array2 = quantizer.quantize_batch(instances); diff --git a/src/pq/mod.rs b/src/pq/mod.rs index 86602b0..34a62e0 100644 --- a/src/pq/mod.rs +++ b/src/pq/mod.rs @@ -14,7 +14,7 @@ pub(crate) mod primitives; #[allow(clippy::module_inception)] mod pq; -pub use self::pq::PQ; +pub use self::pq::Pq; mod traits; -pub use self::traits::{QuantizeVector, ReconstructVector, TrainPQ}; +pub use self::traits::{QuantizeVector, ReconstructVector, TrainPq}; diff --git a/src/pq/opq.rs b/src/pq/opq.rs index 8152cbf..c2cf281 100644 --- a/src/pq/opq.rs +++ b/src/pq/opq.rs @@ -18,7 +18,7 @@ use crate::kmeans::KMeansIteration; use crate::linalg::Covariance; use super::primitives; -use super::{TrainPQ, PQ}; +use super::{Pq, TrainPq}; /// Optimized product quantizer (Ge et al., 2013). /// @@ -37,7 +37,7 @@ use super::{TrainPQ, PQ}; /// no effect. pub struct OPQ; -impl TrainPQ for OPQ +impl TrainPq for OPQ where A: Lapack + NdFloat + Scalar + Sum, A::Real: NdFloat, @@ -50,12 +50,12 @@ where _n_attempts: usize, instances: ArrayBase, mut rng: &mut R, - ) -> Result, rand::Error> + ) -> Result, rand::Error> where S: Sync + Data, R: RngCore, { - PQ::check_quantizer_invariants( + Pq::check_quantizer_invariants( n_subquantizers, n_subquantizer_bits, n_iterations, @@ -92,7 +92,7 @@ where ); } - Ok(PQ { + Ok(Pq { projection: Some(projection), quantizers, }) @@ -147,7 +147,7 @@ impl OPQ { { (0..n_subquantizers) .map(|sq| { - PQ::subquantizer_initial_centroids( + Pq::subquantizer_initial_centroids( sq, n_subquantizers, codebook_len, @@ -282,12 +282,12 @@ mod tests { use super::OPQ; use crate::linalg::EuclideanDistance; use crate::ndarray_rand::RandomExt; - use crate::pq::{QuantizeVector, ReconstructVector, TrainPQ, PQ}; + use crate::pq::{Pq, QuantizeVector, ReconstructVector, TrainPq}; /// Calculate the average euclidean distances between the the given /// instances and the instances returned by quantizing and then /// reconstructing the instances. - fn avg_euclidean_loss(instances: ArrayView2, quantizer: &PQ) -> f32 { + fn avg_euclidean_loss(instances: ArrayView2, quantizer: &Pq) -> f32 { let mut euclidean_loss = 0f32; let quantized: Array2 = quantizer.quantize_batch(instances); diff --git a/src/pq/pq.rs b/src/pq/pq.rs index cebc6bc..24daf3a 100644 --- a/src/pq/pq.rs +++ b/src/pq/pq.rs @@ -12,7 +12,7 @@ use rand::{Rng, RngCore, SeedableRng}; use rayon::prelude::*; use super::primitives; -use super::{QuantizeVector, ReconstructVector, TrainPQ}; +use super::{QuantizeVector, ReconstructVector, TrainPq}; use crate::kmeans::{ InitialCentroids, KMeansWithCentroids, NIterationsCondition, RandomInstanceCentroids, }; @@ -25,12 +25,12 @@ use rand_xorshift::XorShiftRng; /// *i*-th subquantizer. Vector reconstruction consists of concatenating /// the centroids that represent the slices. #[derive(Clone, Debug, PartialEq)] -pub struct PQ { +pub struct Pq { pub(crate) projection: Option>, pub(crate) quantizers: Array3, } -impl PQ +impl Pq where A: NdFloat, { @@ -53,7 +53,7 @@ where ); } - PQ { + Pq { projection, quantizers, } @@ -156,7 +156,7 @@ where let sq_instances = instances.slice(s![.., offset..offset + sq_dims]); iter::repeat_with(|| { - let mut quantizer = PQ::subquantizer_initial_centroids( + let mut quantizer = Pq::subquantizer_initial_centroids( subquantizer_idx, n_subquantizers, codebook_len, @@ -183,7 +183,7 @@ where } } -impl TrainPQ for PQ +impl TrainPq for Pq where A: NdFloat + Sum, usize: AsPrimitive, @@ -195,7 +195,7 @@ where n_attempts: usize, instances: ArrayBase, mut rng: &mut R, - ) -> Result, rand::Error> + ) -> Result, rand::Error> where S: Sync + Data, R: RngCore + SeedableRng + Send, @@ -231,14 +231,14 @@ where let views = quantizers.iter().map(|a| a.view()).collect::>(); - Ok(PQ { + Ok(Pq { projection: None, quantizers: concatenate(Axis(0), &views).expect("Cannot concatenate subquantizers"), }) } } -impl QuantizeVector for PQ +impl QuantizeVector for Pq where A: NdFloat + Sum, { @@ -291,7 +291,7 @@ where } } -impl ReconstructVector for PQ +impl ReconstructVector for Pq where A: NdFloat + Sum, { @@ -349,15 +349,15 @@ mod tests { use rand::SeedableRng; use rand_chacha::ChaCha8Rng; - use super::PQ; + use super::Pq; use crate::linalg::EuclideanDistance; use crate::ndarray_rand::RandomExt; - use crate::pq::{QuantizeVector, ReconstructVector, TrainPQ}; + use crate::pq::{QuantizeVector, ReconstructVector, TrainPq}; /// Calculate the average euclidean distances between the the given /// instances and the instances returned by quantizing and then /// reconstructing the instances. - fn avg_euclidean_loss(instances: ArrayView2, quantizer: &PQ) -> f32 { + fn avg_euclidean_loss(instances: ArrayView2, quantizer: &Pq) -> f32 { let mut euclidean_loss = 0f32; let quantized: Array2 = quantizer.quantize_batch(instances); @@ -392,10 +392,10 @@ mod tests { ] } - fn test_pq() -> PQ { + fn test_pq() -> Pq { let quantizers = array![[[1., 0., 0.], [0., 1., 0.]], [[1., -1., 0.], [0., 1., 0.]],]; - PQ { + Pq { projection: None, quantizers, } @@ -428,7 +428,7 @@ mod tests { let mut rng = ChaCha8Rng::seed_from_u64(42); let uniform = Uniform::new(0f32, 1f32); let instances = Array2::random_using((256, 20), uniform, &mut rng); - let pq = PQ::train_pq_using(10, 7, 10, 1, instances.view(), &mut rng).unwrap(); + let pq = Pq::train_pq_using(10, 7, 10, 1, instances.view(), &mut rng).unwrap(); let loss = avg_euclidean_loss(instances.view(), &pq); // Loss is around 0.077. assert!(loss < 0.08); @@ -437,7 +437,7 @@ mod tests { #[test] fn quantize_with_type() { let uniform = Uniform::new(0f32, 1f32); - let pq = PQ { + let pq = Pq { projection: None, quantizers: Array3::random((1, 256, 10), uniform), }; @@ -448,7 +448,7 @@ mod tests { #[should_panic] fn quantize_with_too_narrow_type() { let uniform = Uniform::new(0f32, 1f32); - let pq = PQ { + let pq = Pq { projection: None, quantizers: Array3::random((1, 257, 10), uniform), }; diff --git a/src/pq/traits.rs b/src/pq/traits.rs index 4d3e4f3..e987ceb 100644 --- a/src/pq/traits.rs +++ b/src/pq/traits.rs @@ -3,13 +3,13 @@ use num_traits::{AsPrimitive, Bounded, Zero}; use rand::{CryptoRng, RngCore, SeedableRng}; use rand_chacha::ChaCha8Rng; -use crate::pq::PQ; +use crate::pq::Pq; /// Training triat for product quantizers. /// /// This traits specifies the training functions for product /// quantizers. -pub trait TrainPQ { +pub trait TrainPq { /// Train a product quantizer with the xorshift PRNG. /// /// Train a product quantizer with `n_subquantizers` subquantizers @@ -23,7 +23,7 @@ pub trait TrainPQ { n_iterations: usize, n_attempts: usize, instances: ArrayBase, - ) -> Result, rand::Error> + ) -> Result, rand::Error> where S: Sync + Data, { @@ -56,7 +56,7 @@ pub trait TrainPQ { n_attempts: usize, instances: ArrayBase, rng: &mut R, - ) -> Result, rand::Error> + ) -> Result, rand::Error> where S: Sync + Data, R: CryptoRng + RngCore + SeedableRng + Send;