From 1167d19935f79320d666bf8549f053229567d778 Mon Sep 17 00:00:00 2001 From: JustForFun88 Date: Fri, 1 Sep 2023 17:39:32 +0500 Subject: [PATCH] Make allocator not clone and document all changes --- src/external_trait_impls/rayon/map.rs | 40 +- src/external_trait_impls/rayon/raw.rs | 18 +- src/external_trait_impls/rayon/set.rs | 34 +- src/external_trait_impls/serde.rs | 20 +- src/map.rs | 186 ++++--- src/raw/mod.rs | 761 +++++++++++++++++++------- src/rustc_entry.rs | 26 +- src/set.rs | 138 +++-- 8 files changed, 777 insertions(+), 446 deletions(-) diff --git a/src/external_trait_impls/rayon/map.rs b/src/external_trait_impls/rayon/map.rs index 1124bfd326..2534dc9b2b 100644 --- a/src/external_trait_impls/rayon/map.rs +++ b/src/external_trait_impls/rayon/map.rs @@ -232,11 +232,11 @@ impl fmt::Debug for ParValuesMut<'_, K, V> { /// [`into_par_iter`]: /hashbrown/struct.HashMap.html#method.into_par_iter /// [`HashMap`]: /hashbrown/struct.HashMap.html /// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html -pub struct IntoParIter { +pub struct IntoParIter { inner: RawIntoParIter<(K, V), A>, } -impl ParallelIterator for IntoParIter { +impl ParallelIterator for IntoParIter { type Item = (K, V); #[cfg_attr(feature = "inline-more", inline)] @@ -248,9 +248,7 @@ impl ParallelIterator for IntoPar } } -impl fmt::Debug - for IntoParIter -{ +impl fmt::Debug for IntoParIter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ParIter { inner: unsafe { self.inner.par_iter() }, @@ -267,11 +265,11 @@ impl fmt::Debug /// /// [`par_drain`]: /hashbrown/struct.HashMap.html#method.par_drain /// [`HashMap`]: /hashbrown/struct.HashMap.html -pub struct ParDrain<'a, K, V, A: Allocator + Clone = Global> { +pub struct ParDrain<'a, K, V, A: Allocator = Global> { inner: RawParDrain<'a, (K, V), A>, } -impl ParallelIterator for ParDrain<'_, K, V, A> { +impl ParallelIterator for ParDrain<'_, K, V, A> { type Item = (K, V); #[cfg_attr(feature = "inline-more", inline)] @@ -283,9 +281,7 @@ impl ParallelIterator for ParDrai } } -impl fmt::Debug - for ParDrain<'_, K, V, A> -{ +impl fmt::Debug for ParDrain<'_, K, V, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ParIter { inner: unsafe { self.inner.par_iter() }, @@ -295,7 +291,7 @@ impl fmt::Debug } } -impl HashMap { +impl HashMap { /// Visits (potentially in parallel) immutably borrowed keys in an arbitrary order. #[cfg_attr(feature = "inline-more", inline)] pub fn par_keys(&self) -> ParKeys<'_, K, V> { @@ -315,7 +311,7 @@ impl HashMap { } } -impl HashMap { +impl HashMap { /// Visits (potentially in parallel) mutably borrowed values in an arbitrary order. #[cfg_attr(feature = "inline-more", inline)] pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V> { @@ -340,7 +336,7 @@ where K: Eq + Hash + Sync, V: PartialEq + Sync, S: BuildHasher + Sync, - A: Allocator + Clone + Sync, + A: Allocator + Sync, { /// Returns `true` if the map is equal to another, /// i.e. both maps contain the same keys mapped to the same values. @@ -354,9 +350,7 @@ where } } -impl IntoParallelIterator - for HashMap -{ +impl IntoParallelIterator for HashMap { type Item = (K, V); type Iter = IntoParIter; @@ -368,9 +362,7 @@ impl IntoParallelIterator } } -impl<'a, K: Sync, V: Sync, S, A: Allocator + Clone> IntoParallelIterator - for &'a HashMap -{ +impl<'a, K: Sync, V: Sync, S, A: Allocator> IntoParallelIterator for &'a HashMap { type Item = (&'a K, &'a V); type Iter = ParIter<'a, K, V>; @@ -383,9 +375,7 @@ impl<'a, K: Sync, V: Sync, S, A: Allocator + Clone> IntoParallelIterator } } -impl<'a, K: Sync, V: Send, S, A: Allocator + Clone> IntoParallelIterator - for &'a mut HashMap -{ +impl<'a, K: Sync, V: Send, S, A: Allocator> IntoParallelIterator for &'a mut HashMap { type Item = (&'a K, &'a mut V); type Iter = ParIterMut<'a, K, V>; @@ -424,7 +414,7 @@ where K: Eq + Hash + Send, V: Send, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { fn par_extend(&mut self, par_iter: I) where @@ -440,7 +430,7 @@ where K: Copy + Eq + Hash + Sync, V: Copy + Sync, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { fn par_extend(&mut self, par_iter: I) where @@ -456,7 +446,7 @@ where K: Eq + Hash, S: BuildHasher, I: IntoParallelIterator, - A: Allocator + Clone, + A: Allocator, HashMap: Extend, { let (list, len) = super::helpers::collect(par_iter); diff --git a/src/external_trait_impls/rayon/raw.rs b/src/external_trait_impls/rayon/raw.rs index 50db74c6d3..612be47a55 100644 --- a/src/external_trait_impls/rayon/raw.rs +++ b/src/external_trait_impls/rayon/raw.rs @@ -75,18 +75,18 @@ impl UnindexedProducer for ParIterProducer { } /// Parallel iterator which consumes a table and returns elements. -pub struct RawIntoParIter { +pub struct RawIntoParIter { table: RawTable, } -impl RawIntoParIter { +impl RawIntoParIter { #[cfg_attr(feature = "inline-more", inline)] pub(super) unsafe fn par_iter(&self) -> RawParIter { self.table.par_iter() } } -impl ParallelIterator for RawIntoParIter { +impl ParallelIterator for RawIntoParIter { type Item = T; #[cfg_attr(feature = "inline-more", inline)] @@ -108,23 +108,23 @@ impl ParallelIterator for RawIntoParIter { +pub struct RawParDrain<'a, T, A: Allocator = Global> { // We don't use a &'a mut RawTable because we want RawParDrain to be // covariant over T. table: NonNull>, marker: PhantomData<&'a RawTable>, } -unsafe impl Send for RawParDrain<'_, T, A> {} +unsafe impl Send for RawParDrain<'_, T, A> {} -impl RawParDrain<'_, T, A> { +impl RawParDrain<'_, T, A> { #[cfg_attr(feature = "inline-more", inline)] pub(super) unsafe fn par_iter(&self) -> RawParIter { self.table.as_ref().par_iter() } } -impl ParallelIterator for RawParDrain<'_, T, A> { +impl ParallelIterator for RawParDrain<'_, T, A> { type Item = T; #[cfg_attr(feature = "inline-more", inline)] @@ -142,7 +142,7 @@ impl ParallelIterator for RawParDrain<'_, T, A> { } } -impl Drop for RawParDrain<'_, T, A> { +impl Drop for RawParDrain<'_, T, A> { fn drop(&mut self) { // If drive_unindexed is not called then simply clear the table. unsafe { @@ -203,7 +203,7 @@ impl Drop for ParDrainProducer { } } -impl RawTable { +impl RawTable { /// Returns a parallel iterator over the elements in a `RawTable`. #[cfg_attr(feature = "inline-more", inline)] pub unsafe fn par_iter(&self) -> RawParIter { diff --git a/src/external_trait_impls/rayon/set.rs b/src/external_trait_impls/rayon/set.rs index ee4f6e6693..3de98fccb8 100644 --- a/src/external_trait_impls/rayon/set.rs +++ b/src/external_trait_impls/rayon/set.rs @@ -16,11 +16,11 @@ use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, Pa /// [`into_par_iter`]: /hashbrown/struct.HashSet.html#method.into_par_iter /// [`HashSet`]: /hashbrown/struct.HashSet.html /// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html -pub struct IntoParIter { +pub struct IntoParIter { inner: map::IntoParIter, } -impl ParallelIterator for IntoParIter { +impl ParallelIterator for IntoParIter { type Item = T; fn drive_unindexed(self, consumer: C) -> C::Result @@ -38,11 +38,11 @@ impl ParallelIterator for IntoParIter { +pub struct ParDrain<'a, T, A: Allocator = Global> { inner: map::ParDrain<'a, T, (), A>, } -impl ParallelIterator for ParDrain<'_, T, A> { +impl ParallelIterator for ParDrain<'_, T, A> { type Item = T; fn drive_unindexed(self, consumer: C) -> C::Result @@ -85,7 +85,7 @@ impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> { /// /// [`par_difference`]: /hashbrown/struct.HashSet.html#method.par_difference /// [`HashSet`]: /hashbrown/struct.HashSet.html -pub struct ParDifference<'a, T, S, A: Allocator + Clone = Global> { +pub struct ParDifference<'a, T, S, A: Allocator = Global> { a: &'a HashSet, b: &'a HashSet, } @@ -94,7 +94,7 @@ impl<'a, T, S, A> ParallelIterator for ParDifference<'a, T, S, A> where T: Eq + Hash + Sync, S: BuildHasher + Sync, - A: Allocator + Clone + Sync, + A: Allocator + Sync, { type Item = &'a T; @@ -118,7 +118,7 @@ where /// /// [`par_symmetric_difference`]: /hashbrown/struct.HashSet.html#method.par_symmetric_difference /// [`HashSet`]: /hashbrown/struct.HashSet.html -pub struct ParSymmetricDifference<'a, T, S, A: Allocator + Clone = Global> { +pub struct ParSymmetricDifference<'a, T, S, A: Allocator = Global> { a: &'a HashSet, b: &'a HashSet, } @@ -127,7 +127,7 @@ impl<'a, T, S, A> ParallelIterator for ParSymmetricDifference<'a, T, S, A> where T: Eq + Hash + Sync, S: BuildHasher + Sync, - A: Allocator + Clone + Sync, + A: Allocator + Sync, { type Item = &'a T; @@ -150,7 +150,7 @@ where /// /// [`par_intersection`]: /hashbrown/struct.HashSet.html#method.par_intersection /// [`HashSet`]: /hashbrown/struct.HashSet.html -pub struct ParIntersection<'a, T, S, A: Allocator + Clone = Global> { +pub struct ParIntersection<'a, T, S, A: Allocator = Global> { a: &'a HashSet, b: &'a HashSet, } @@ -159,7 +159,7 @@ impl<'a, T, S, A> ParallelIterator for ParIntersection<'a, T, S, A> where T: Eq + Hash + Sync, S: BuildHasher + Sync, - A: Allocator + Clone + Sync, + A: Allocator + Sync, { type Item = &'a T; @@ -181,7 +181,7 @@ where /// /// [`par_union`]: /hashbrown/struct.HashSet.html#method.par_union /// [`HashSet`]: /hashbrown/struct.HashSet.html -pub struct ParUnion<'a, T, S, A: Allocator + Clone = Global> { +pub struct ParUnion<'a, T, S, A: Allocator = Global> { a: &'a HashSet, b: &'a HashSet, } @@ -190,7 +190,7 @@ impl<'a, T, S, A> ParallelIterator for ParUnion<'a, T, S, A> where T: Eq + Hash + Sync, S: BuildHasher + Sync, - A: Allocator + Clone + Sync, + A: Allocator + Sync, { type Item = &'a T; @@ -216,7 +216,7 @@ impl HashSet where T: Eq + Hash + Sync, S: BuildHasher + Sync, - A: Allocator + Clone + Sync, + A: Allocator + Sync, { /// Visits (potentially in parallel) the values representing the union, /// i.e. all the values in `self` or `other`, without duplicates. @@ -289,7 +289,7 @@ where impl HashSet where T: Eq + Hash + Send, - A: Allocator + Clone + Send, + A: Allocator + Send, { /// Consumes (potentially in parallel) all values in an arbitrary order, /// while preserving the set's allocated memory for reuse. @@ -301,7 +301,7 @@ where } } -impl IntoParallelIterator for HashSet { +impl IntoParallelIterator for HashSet { type Item = T; type Iter = IntoParIter; @@ -313,7 +313,7 @@ impl IntoParallelIterator for HashSet IntoParallelIterator for &'a HashSet { +impl<'a, T: Sync, S, A: Allocator> IntoParallelIterator for &'a HashSet { type Item = &'a T; type Iter = ParIter<'a, T>; @@ -374,7 +374,7 @@ fn extend(set: &mut HashSet, par_iter: I) where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, I: IntoParallelIterator, HashSet: Extend, { diff --git a/src/external_trait_impls/serde.rs b/src/external_trait_impls/serde.rs index c5f6e25910..0a76dbec25 100644 --- a/src/external_trait_impls/serde.rs +++ b/src/external_trait_impls/serde.rs @@ -27,7 +27,7 @@ mod map { K: Serialize + Eq + Hash, V: Serialize, H: BuildHasher, - A: Allocator + Clone, + A: Allocator, { #[cfg_attr(feature = "inline-more", inline)] fn serialize(&self, serializer: S) -> Result @@ -43,7 +43,7 @@ mod map { K: Deserialize<'de> + Eq + Hash, V: Deserialize<'de>, S: BuildHasher + Default, - A: Allocator + Clone + Default, + A: Allocator + Default, { fn deserialize(deserializer: D) -> Result where @@ -51,7 +51,7 @@ mod map { { struct MapVisitor where - A: Allocator + Clone, + A: Allocator, { marker: PhantomData>, } @@ -61,7 +61,7 @@ mod map { K: Deserialize<'de> + Eq + Hash, V: Deserialize<'de>, S: BuildHasher + Default, - A: Allocator + Clone + Default, + A: Allocator + Default, { type Value = HashMap; @@ -112,7 +112,7 @@ mod set { where T: Serialize + Eq + Hash, H: BuildHasher, - A: Allocator + Clone, + A: Allocator, { #[cfg_attr(feature = "inline-more", inline)] fn serialize(&self, serializer: S) -> Result @@ -127,7 +127,7 @@ mod set { where T: Deserialize<'de> + Eq + Hash, S: BuildHasher + Default, - A: Allocator + Clone + Default, + A: Allocator + Default, { fn deserialize(deserializer: D) -> Result where @@ -135,7 +135,7 @@ mod set { { struct SeqVisitor where - A: Allocator + Clone, + A: Allocator, { marker: PhantomData>, } @@ -144,7 +144,7 @@ mod set { where T: Deserialize<'de> + Eq + Hash, S: BuildHasher + Default, - A: Allocator + Clone + Default, + A: Allocator + Default, { type Value = HashSet; @@ -184,13 +184,13 @@ mod set { { struct SeqInPlaceVisitor<'a, T, S, A>(&'a mut HashSet) where - A: Allocator + Clone; + A: Allocator; impl<'a, 'de, T, S, A> Visitor<'de> for SeqInPlaceVisitor<'a, T, S, A> where T: Deserialize<'de> + Eq + Hash, S: BuildHasher + Default, - A: Allocator + Clone, + A: Allocator, { type Value = (); diff --git a/src/map.rs b/src/map.rs index 15b3c023cb..edd1977717 100644 --- a/src/map.rs +++ b/src/map.rs @@ -185,7 +185,7 @@ pub enum DefaultHashBuilder {} /// .iter().cloned().collect(); /// // use the values stored in map /// ``` -pub struct HashMap { +pub struct HashMap { pub(crate) hash_builder: S, pub(crate) table: RawTable<(K, V), A>, } @@ -324,7 +324,7 @@ impl HashMap { } #[cfg(feature = "ahash")] -impl HashMap { +impl HashMap { /// Creates an empty `HashMap` using the given allocator. /// /// The hash map is initially created with a capacity of 0, so it will not allocate until it @@ -505,7 +505,7 @@ impl HashMap { } } -impl HashMap { +impl HashMap { /// Returns a reference to the underlying allocator. #[inline] pub fn allocator(&self) -> &A { @@ -1069,7 +1069,7 @@ impl HashMap where K: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { /// Reserves capacity for at least `additional` more elements to be inserted /// in the `HashMap`. The collection may reserve more space to avoid @@ -1936,7 +1936,7 @@ where } } -impl HashMap { +impl HashMap { /// Creates a raw entry builder for the HashMap. /// /// Raw entries provide the lowest level of control for searching and @@ -2167,7 +2167,7 @@ where K: Eq + Hash, V: PartialEq, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { fn eq(&self, other: &Self) -> bool { if self.len() != other.len() { @@ -2184,7 +2184,7 @@ where K: Eq + Hash, V: Eq, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { } @@ -2192,7 +2192,7 @@ impl Debug for HashMap where K: Debug, V: Debug, - A: Allocator + Clone, + A: Allocator, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_map().entries(self.iter()).finish() @@ -2202,7 +2202,7 @@ where impl Default for HashMap where S: Default, - A: Default + Allocator + Clone, + A: Default + Allocator, { /// Creates an empty `HashMap`, with the `Default` value for the hasher and allocator. /// @@ -2230,7 +2230,7 @@ where K: Eq + Hash, Q: Hash + Equivalent, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { type Output = V; @@ -2261,7 +2261,7 @@ where impl From<[(K, V); N]> for HashMap where K: Eq + Hash, - A: Default + Allocator + Clone, + A: Default + Allocator, { /// # Examples /// @@ -2406,11 +2406,11 @@ impl IterMut<'_, K, V> { /// assert_eq!(iter.next(), None); /// assert_eq!(iter.next(), None); /// ``` -pub struct IntoIter { +pub struct IntoIter { inner: RawIntoIter<(K, V), A>, } -impl IntoIter { +impl IntoIter { /// Returns a iterator of references over the remaining items. #[cfg_attr(feature = "inline-more", inline)] pub(super) fn iter(&self) -> Iter<'_, K, V> { @@ -2450,11 +2450,11 @@ impl IntoIter { /// assert_eq!(keys.next(), None); /// assert_eq!(keys.next(), None); /// ``` -pub struct IntoKeys { +pub struct IntoKeys { inner: IntoIter, } -impl Iterator for IntoKeys { +impl Iterator for IntoKeys { type Item = K; #[inline] @@ -2467,16 +2467,16 @@ impl Iterator for IntoKeys { } } -impl ExactSizeIterator for IntoKeys { +impl ExactSizeIterator for IntoKeys { #[inline] fn len(&self) -> usize { self.inner.len() } } -impl FusedIterator for IntoKeys {} +impl FusedIterator for IntoKeys {} -impl fmt::Debug for IntoKeys { +impl fmt::Debug for IntoKeys { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list() .entries(self.inner.iter().map(|(k, _)| k)) @@ -2512,11 +2512,11 @@ impl fmt::Debug for IntoKeys /// assert_eq!(values.next(), None); /// assert_eq!(values.next(), None); /// ``` -pub struct IntoValues { +pub struct IntoValues { inner: IntoIter, } -impl Iterator for IntoValues { +impl Iterator for IntoValues { type Item = V; #[inline] @@ -2529,16 +2529,16 @@ impl Iterator for IntoValues { } } -impl ExactSizeIterator for IntoValues { +impl ExactSizeIterator for IntoValues { #[inline] fn len(&self) -> usize { self.inner.len() } } -impl FusedIterator for IntoValues {} +impl FusedIterator for IntoValues {} -impl fmt::Debug for IntoValues { +impl fmt::Debug for IntoValues { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list() .entries(self.inner.iter().map(|(_, v)| v)) @@ -2670,11 +2670,11 @@ impl fmt::Debug for Values<'_, K, V> { /// assert_eq!(drain_iter.next(), None); /// assert_eq!(drain_iter.next(), None); /// ``` -pub struct Drain<'a, K, V, A: Allocator + Clone = Global> { +pub struct Drain<'a, K, V, A: Allocator = Global> { inner: RawDrain<'a, (K, V), A>, } -impl Drain<'_, K, V, A> { +impl Drain<'_, K, V, A> { /// Returns a iterator of references over the remaining items. #[cfg_attr(feature = "inline-more", inline)] pub(super) fn iter(&self) -> Iter<'_, K, V> { @@ -2717,7 +2717,7 @@ impl Drain<'_, K, V, A> { /// assert_eq!(map.len(), 1); /// ``` #[must_use = "Iterators are lazy unless consumed"] -pub struct ExtractIf<'a, K, V, F, A: Allocator + Clone = Global> +pub struct ExtractIf<'a, K, V, F, A: Allocator = Global> where F: FnMut(&K, &mut V) -> bool, { @@ -2728,7 +2728,7 @@ where impl Iterator for ExtractIf<'_, K, V, F, A> where F: FnMut(&K, &mut V) -> bool, - A: Allocator + Clone, + A: Allocator, { type Item = (K, V); @@ -2746,12 +2746,12 @@ where impl FusedIterator for ExtractIf<'_, K, V, F> where F: FnMut(&K, &mut V) -> bool {} /// Portions of `ExtractIf` shared with `set::ExtractIf` -pub(super) struct ExtractIfInner<'a, K, V, A: Allocator + Clone> { +pub(super) struct ExtractIfInner<'a, K, V, A: Allocator> { pub iter: RawIter<(K, V)>, pub table: &'a mut RawTable<(K, V), A>, } -impl ExtractIfInner<'_, K, V, A> { +impl ExtractIfInner<'_, K, V, A> { #[cfg_attr(feature = "inline-more", inline)] pub(super) fn next(&mut self, f: &mut F) -> Option<(K, V)> where @@ -2855,7 +2855,7 @@ pub struct ValuesMut<'a, K, V> { /// /// assert_eq!(map.len(), 6); /// ``` -pub struct RawEntryBuilderMut<'a, K, V, S, A: Allocator + Clone = Global> { +pub struct RawEntryBuilderMut<'a, K, V, S, A: Allocator = Global> { map: &'a mut HashMap, } @@ -2943,7 +2943,7 @@ pub struct RawEntryBuilderMut<'a, K, V, S, A: Allocator + Clone = Global> { /// vec.sort_unstable(); /// assert_eq!(vec, [('a', 10), ('b', 20), ('c', 30), ('d', 40), ('e', 50), ('f', 60)]); /// ``` -pub enum RawEntryMut<'a, K, V, S, A: Allocator + Clone = Global> { +pub enum RawEntryMut<'a, K, V, S, A: Allocator = Global> { /// An occupied entry. /// /// # Examples @@ -3034,7 +3034,7 @@ pub enum RawEntryMut<'a, K, V, S, A: Allocator + Clone = Global> { /// assert_eq!(map.get(&"b"), None); /// assert_eq!(map.len(), 1); /// ``` -pub struct RawOccupiedEntryMut<'a, K, V, S, A: Allocator + Clone = Global> { +pub struct RawOccupiedEntryMut<'a, K, V, S, A: Allocator = Global> { elem: Bucket<(K, V)>, table: &'a mut RawTable<(K, V), A>, hash_builder: &'a S, @@ -3045,7 +3045,7 @@ where K: Send, V: Send, S: Send, - A: Send + Allocator + Clone, + A: Send + Allocator, { } unsafe impl Sync for RawOccupiedEntryMut<'_, K, V, S, A> @@ -3053,7 +3053,7 @@ where K: Sync, V: Sync, S: Sync, - A: Sync + Allocator + Clone, + A: Sync + Allocator, { } @@ -3105,7 +3105,7 @@ where /// } /// assert!(map[&"c"] == 30 && map.len() == 3); /// ``` -pub struct RawVacantEntryMut<'a, K, V, S, A: Allocator + Clone = Global> { +pub struct RawVacantEntryMut<'a, K, V, S, A: Allocator = Global> { table: &'a mut RawTable<(K, V), A>, hash_builder: &'a S, } @@ -3144,11 +3144,11 @@ pub struct RawVacantEntryMut<'a, K, V, S, A: Allocator + Clone = Global> { /// assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash, &k), kv); /// } /// ``` -pub struct RawEntryBuilder<'a, K, V, S, A: Allocator + Clone = Global> { +pub struct RawEntryBuilder<'a, K, V, S, A: Allocator = Global> { map: &'a HashMap, } -impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> RawEntryBuilderMut<'a, K, V, S, A> { /// Creates a `RawEntryMut` from the given key. /// /// # Examples @@ -3205,7 +3205,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> { } } -impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> RawEntryBuilderMut<'a, K, V, S, A> { /// Creates a `RawEntryMut` from the given hash and matching function. /// /// # Examples @@ -3256,7 +3256,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> { } } -impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilder<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> RawEntryBuilder<'a, K, V, S, A> { /// Access an immutable entry by key. /// /// # Examples @@ -3349,7 +3349,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilder<'a, K, V, S, A> { } } -impl<'a, K, V, S, A: Allocator + Clone> RawEntryMut<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> RawEntryMut<'a, K, V, S, A> { /// Sets the value of the entry, and returns a RawOccupiedEntryMut. /// /// # Examples @@ -3543,7 +3543,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryMut<'a, K, V, S, A> { } } -impl<'a, K, V, S, A: Allocator + Clone> RawOccupiedEntryMut<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> RawOccupiedEntryMut<'a, K, V, S, A> { /// Gets a reference to the key in the entry. /// /// # Examples @@ -3942,7 +3942,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawOccupiedEntryMut<'a, K, V, S, A> { } } -impl<'a, K, V, S, A: Allocator + Clone> RawVacantEntryMut<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> RawVacantEntryMut<'a, K, V, S, A> { /// Sets the value of the entry with the VacantEntry's key, /// and returns a mutable reference to it. /// @@ -4088,13 +4088,13 @@ impl<'a, K, V, S, A: Allocator + Clone> RawVacantEntryMut<'a, K, V, S, A> { } } -impl Debug for RawEntryBuilderMut<'_, K, V, S, A> { +impl Debug for RawEntryBuilderMut<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RawEntryBuilder").finish() } } -impl Debug for RawEntryMut<'_, K, V, S, A> { +impl Debug for RawEntryMut<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { RawEntryMut::Vacant(ref v) => f.debug_tuple("RawEntry").field(v).finish(), @@ -4103,7 +4103,7 @@ impl Debug for RawEntryMut<'_, K, V } } -impl Debug for RawOccupiedEntryMut<'_, K, V, S, A> { +impl Debug for RawOccupiedEntryMut<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RawOccupiedEntryMut") .field("key", self.key()) @@ -4112,13 +4112,13 @@ impl Debug for RawOccupiedEntryMut< } } -impl Debug for RawVacantEntryMut<'_, K, V, S, A> { +impl Debug for RawVacantEntryMut<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RawVacantEntryMut").finish() } } -impl Debug for RawEntryBuilder<'_, K, V, S, A> { +impl Debug for RawEntryBuilder<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RawEntryBuilder").finish() } @@ -4169,7 +4169,7 @@ impl Debug for RawEntryBuilder<'_, K, V, S, A> { /// ``` pub enum Entry<'a, K, V, S, A = Global> where - A: Allocator + Clone, + A: Allocator, { /// An occupied entry. /// @@ -4202,7 +4202,7 @@ where Vacant(VacantEntry<'a, K, V, S, A>), } -impl Debug for Entry<'_, K, V, S, A> { +impl Debug for Entry<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Entry::Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), @@ -4251,7 +4251,7 @@ impl Debug for Entry<'_, K, V, S, A /// assert_eq!(map.get(&"c"), None); /// assert_eq!(map.len(), 2); /// ``` -pub struct OccupiedEntry<'a, K, V, S = DefaultHashBuilder, A: Allocator + Clone = Global> { +pub struct OccupiedEntry<'a, K, V, S = DefaultHashBuilder, A: Allocator = Global> { hash: u64, key: Option, elem: Bucket<(K, V)>, @@ -4263,7 +4263,7 @@ where K: Send, V: Send, S: Send, - A: Send + Allocator + Clone, + A: Send + Allocator, { } unsafe impl Sync for OccupiedEntry<'_, K, V, S, A> @@ -4271,11 +4271,11 @@ where K: Sync, V: Sync, S: Sync, - A: Sync + Allocator + Clone, + A: Sync + Allocator, { } -impl Debug for OccupiedEntry<'_, K, V, S, A> { +impl Debug for OccupiedEntry<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OccupiedEntry") .field("key", self.key()) @@ -4314,13 +4314,13 @@ impl Debug for OccupiedEntry<'_, K, /// } /// assert!(map[&"b"] == 20 && map.len() == 2); /// ``` -pub struct VacantEntry<'a, K, V, S = DefaultHashBuilder, A: Allocator + Clone = Global> { +pub struct VacantEntry<'a, K, V, S = DefaultHashBuilder, A: Allocator = Global> { hash: u64, key: K, table: &'a mut HashMap, } -impl Debug for VacantEntry<'_, K, V, S, A> { +impl Debug for VacantEntry<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("VacantEntry").field(self.key()).finish() } @@ -4380,7 +4380,7 @@ impl Debug for VacantEntry<'_, K, V, S, A> /// ``` pub enum EntryRef<'a, 'b, K, Q: ?Sized, V, S, A = Global> where - A: Allocator + Clone, + A: Allocator, { /// An occupied entry. /// @@ -4413,7 +4413,7 @@ where Vacant(VacantEntryRef<'a, 'b, K, Q, V, S, A>), } -impl, Q: ?Sized + Debug, V: Debug, S, A: Allocator + Clone> Debug +impl, Q: ?Sized + Debug, V: Debug, S, A: Allocator> Debug for EntryRef<'_, '_, K, Q, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -4491,7 +4491,7 @@ impl<'a, K: Borrow, Q: ?Sized> AsRef for KeyOrRef<'a, K, Q> { /// assert_eq!(map.get("c"), None); /// assert_eq!(map.len(), 2); /// ``` -pub struct OccupiedEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone = Global> { +pub struct OccupiedEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator = Global> { hash: u64, key: Option>, elem: Bucket<(K, V)>, @@ -4504,7 +4504,7 @@ where Q: Sync + ?Sized, V: Send, S: Send, - A: Send + Allocator + Clone, + A: Send + Allocator, { } unsafe impl<'a, 'b, K, Q, V, S, A> Sync for OccupiedEntryRef<'a, 'b, K, Q, V, S, A> @@ -4513,11 +4513,11 @@ where Q: Sync + ?Sized, V: Sync, S: Sync, - A: Sync + Allocator + Clone, + A: Sync + Allocator, { } -impl, Q: ?Sized + Debug, V: Debug, S, A: Allocator + Clone> Debug +impl, Q: ?Sized + Debug, V: Debug, S, A: Allocator> Debug for OccupiedEntryRef<'_, '_, K, Q, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -4558,13 +4558,13 @@ impl, Q: ?Sized + Debug, V: Debug, S, A: Allocator + Clone> Debug /// } /// assert!(map["b"] == 20 && map.len() == 2); /// ``` -pub struct VacantEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone = Global> { +pub struct VacantEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator = Global> { hash: u64, key: KeyOrRef<'b, K, Q>, table: &'a mut HashMap, } -impl, Q: ?Sized + Debug, V, S, A: Allocator + Clone> Debug +impl, Q: ?Sized + Debug, V, S, A: Allocator> Debug for VacantEntryRef<'_, '_, K, Q, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -4596,14 +4596,14 @@ impl, Q: ?Sized + Debug, V, S, A: Allocator + Clone> Debug /// } /// assert_eq!(map[&"a"], 100); /// ``` -pub struct OccupiedError<'a, K, V, S, A: Allocator + Clone = Global> { +pub struct OccupiedError<'a, K, V, S, A: Allocator = Global> { /// The entry in the map that was already occupied. pub entry: OccupiedEntry<'a, K, V, S, A>, /// The value which was not inserted, because the entry was already occupied. pub value: V, } -impl Debug for OccupiedError<'_, K, V, S, A> { +impl Debug for OccupiedError<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OccupiedError") .field("key", self.entry.key()) @@ -4613,9 +4613,7 @@ impl Debug for OccupiedError<'_, K, } } -impl<'a, K: Debug, V: Debug, S, A: Allocator + Clone> fmt::Display - for OccupiedError<'a, K, V, S, A> -{ +impl<'a, K: Debug, V: Debug, S, A: Allocator> fmt::Display for OccupiedError<'a, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, @@ -4627,7 +4625,7 @@ impl<'a, K: Debug, V: Debug, S, A: Allocator + Clone> fmt::Display } } -impl<'a, K, V, S, A: Allocator + Clone> IntoIterator for &'a HashMap { +impl<'a, K, V, S, A: Allocator> IntoIterator for &'a HashMap { type Item = (&'a K, &'a V); type IntoIter = Iter<'a, K, V>; @@ -4659,7 +4657,7 @@ impl<'a, K, V, S, A: Allocator + Clone> IntoIterator for &'a HashMap } } -impl<'a, K, V, S, A: Allocator + Clone> IntoIterator for &'a mut HashMap { +impl<'a, K, V, S, A: Allocator> IntoIterator for &'a mut HashMap { type Item = (&'a K, &'a mut V); type IntoIter = IterMut<'a, K, V>; @@ -4696,7 +4694,7 @@ impl<'a, K, V, S, A: Allocator + Clone> IntoIterator for &'a mut HashMap IntoIterator for HashMap { +impl IntoIterator for HashMap { type Item = (K, V); type IntoIter = IntoIter; @@ -4791,7 +4789,7 @@ where } } -impl Iterator for IntoIter { +impl Iterator for IntoIter { type Item = (K, V); #[cfg_attr(feature = "inline-more", inline)] @@ -4803,15 +4801,15 @@ impl Iterator for IntoIter { self.inner.size_hint() } } -impl ExactSizeIterator for IntoIter { +impl ExactSizeIterator for IntoIter { #[cfg_attr(feature = "inline-more", inline)] fn len(&self) -> usize { self.inner.len() } } -impl FusedIterator for IntoIter {} +impl FusedIterator for IntoIter {} -impl fmt::Debug for IntoIter { +impl fmt::Debug for IntoIter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.iter()).finish() } @@ -4897,7 +4895,7 @@ impl fmt::Debug for ValuesMut<'_, K, V> { } } -impl<'a, K, V, A: Allocator + Clone> Iterator for Drain<'a, K, V, A> { +impl<'a, K, V, A: Allocator> Iterator for Drain<'a, K, V, A> { type Item = (K, V); #[cfg_attr(feature = "inline-more", inline)] @@ -4909,26 +4907,26 @@ impl<'a, K, V, A: Allocator + Clone> Iterator for Drain<'a, K, V, A> { self.inner.size_hint() } } -impl ExactSizeIterator for Drain<'_, K, V, A> { +impl ExactSizeIterator for Drain<'_, K, V, A> { #[cfg_attr(feature = "inline-more", inline)] fn len(&self) -> usize { self.inner.len() } } -impl FusedIterator for Drain<'_, K, V, A> {} +impl FusedIterator for Drain<'_, K, V, A> {} impl fmt::Debug for Drain<'_, K, V, A> where K: fmt::Debug, V: fmt::Debug, - A: Allocator + Clone, + A: Allocator, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.iter()).finish() } } -impl<'a, K, V, S, A: Allocator + Clone> Entry<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> Entry<'a, K, V, S, A> { /// Sets the value of the entry, and returns an OccupiedEntry. /// /// # Examples @@ -5175,7 +5173,7 @@ impl<'a, K, V, S, A: Allocator + Clone> Entry<'a, K, V, S, A> { } } -impl<'a, K, V: Default, S, A: Allocator + Clone> Entry<'a, K, V, S, A> { +impl<'a, K, V: Default, S, A: Allocator> Entry<'a, K, V, S, A> { /// Ensures a value is in the entry by inserting the default value if empty, /// and returns a mutable reference to the value in the entry. /// @@ -5208,7 +5206,7 @@ impl<'a, K, V: Default, S, A: Allocator + Clone> Entry<'a, K, V, S, A> { } } -impl<'a, K, V, S, A: Allocator + Clone> OccupiedEntry<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> OccupiedEntry<'a, K, V, S, A> { /// Gets a reference to the key in the entry. /// /// # Examples @@ -5563,7 +5561,7 @@ impl<'a, K, V, S, A: Allocator + Clone> OccupiedEntry<'a, K, V, S, A> { } } -impl<'a, K, V, S, A: Allocator + Clone> VacantEntry<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> VacantEntry<'a, K, V, S, A> { /// Gets a reference to the key that would be used when inserting a value /// through the `VacantEntry`. /// @@ -5650,7 +5648,7 @@ impl<'a, K, V, S, A: Allocator + Clone> VacantEntry<'a, K, V, S, A> { } } -impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V, S, A> { +impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator> EntryRef<'a, 'b, K, Q, V, S, A> { /// Sets the value of the entry, and returns an OccupiedEntryRef. /// /// # Examples @@ -5897,7 +5895,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V, } } -impl<'a, 'b, K, Q: ?Sized, V: Default, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V, S, A> { +impl<'a, 'b, K, Q: ?Sized, V: Default, S, A: Allocator> EntryRef<'a, 'b, K, Q, V, S, A> { /// Ensures a value is in the entry by inserting the default value if empty, /// and returns a mutable reference to the value in the entry. /// @@ -5930,7 +5928,7 @@ impl<'a, 'b, K, Q: ?Sized, V: Default, S, A: Allocator + Clone> EntryRef<'a, 'b, } } -impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b, K, Q, V, S, A> { +impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator> OccupiedEntryRef<'a, 'b, K, Q, V, S, A> { /// Gets a reference to the key in the entry. /// /// # Examples @@ -6282,7 +6280,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b, } } -impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> VacantEntryRef<'a, 'b, K, Q, V, S, A> { +impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator> VacantEntryRef<'a, 'b, K, Q, V, S, A> { /// Gets a reference to the key that would be used when inserting a value /// through the `VacantEntryRef`. /// @@ -6382,7 +6380,7 @@ impl FromIterator<(K, V)> for HashMap where K: Eq + Hash, S: BuildHasher + Default, - A: Default + Allocator + Clone, + A: Default + Allocator, { #[cfg_attr(feature = "inline-more", inline)] fn from_iter>(iter: T) -> Self { @@ -6402,7 +6400,7 @@ impl Extend<(K, V)> for HashMap where K: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { /// Inserts all new key-values from the iterator to existing `HashMap`. /// Replace values with existing keys with new values returned from the iterator. @@ -6486,7 +6484,7 @@ where K: Eq + Hash + Copy, V: Copy, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { /// Inserts all new key-values from the iterator to existing `HashMap`. /// Replace values with existing keys with new values returned from the iterator. @@ -6551,7 +6549,7 @@ where K: Eq + Hash + Copy, V: Copy, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { /// Inserts all new key-values from the iterator to existing `HashMap`. /// Replace values with existing keys with new values returned from the iterator. @@ -6618,12 +6616,12 @@ fn assert_covariance() { fn iter_val<'a, 'new>(v: Iter<'a, u8, &'static str>) -> Iter<'a, u8, &'new str> { v } - fn into_iter_key<'new, A: Allocator + Clone>( + fn into_iter_key<'new, A: Allocator>( v: IntoIter<&'static str, u8, A>, ) -> IntoIter<&'new str, u8, A> { v } - fn into_iter_val<'new, A: Allocator + Clone>( + fn into_iter_val<'new, A: Allocator>( v: IntoIter, ) -> IntoIter { v @@ -8624,7 +8622,7 @@ mod test_map { ) -> Result, DefaultHashBuilder, A>, String> where I: Iterator + Clone + ExactSizeIterator, - A: Allocator + Clone, + A: Allocator, T: PartialEq + core::fmt::Debug, { use crate::scopeguard::guard; diff --git a/src/raw/mod.rs b/src/raw/mod.rs index 945705965e..96e18df14b 100644 --- a/src/raw/mod.rs +++ b/src/raw/mod.rs @@ -4,7 +4,6 @@ use crate::TryReserveError; use core::iter::FusedIterator; use core::marker::PhantomData; use core::mem; -use core::mem::ManuallyDrop; use core::mem::MaybeUninit; use core::ptr::NonNull; use core::{hint, ptr}; @@ -93,6 +92,13 @@ impl Fallibility { } } +trait SizedTypeProperties: Sized { + const IS_ZERO_SIZED: bool = mem::size_of::() == 0; + const NEEDS_DROP: bool = mem::needs_drop::(); +} + +impl SizedTypeProperties for T {} + /// Control byte value for an empty bucket. const EMPTY: u8 = 0b1111_1111; @@ -294,8 +300,6 @@ impl Clone for Bucket { } impl Bucket { - const IS_ZERO_SIZED_TYPE: bool = mem::size_of::() == 0; - /// Creates a [`Bucket`] that contain pointer to the data. /// The pointer calculation is performed by calculating the /// offset from given `base` pointer (convenience for @@ -364,7 +368,7 @@ impl Bucket { // // where: T0...Tlast - our stored data; C0...Clast - control bytes // or metadata for data. - let ptr = if Self::IS_ZERO_SIZED_TYPE { + let ptr = if T::IS_ZERO_SIZED { // won't overflow because index must be less than length (bucket_mask) // and bucket_mask is guaranteed to be less than `isize::MAX` // (see TableLayout::calculate_layout_for method) @@ -438,7 +442,7 @@ impl Bucket { // (base.as_ptr() as usize - self.ptr.as_ptr() as usize) / mem::size_of::() // // where: T0...Tlast - our stored data; C0...Clast - control bytes or metadata for data. - if Self::IS_ZERO_SIZED_TYPE { + if T::IS_ZERO_SIZED { // this can not be UB self.ptr.as_ptr() as usize - 1 } else { @@ -502,7 +506,7 @@ impl Bucket { /// ``` #[inline] pub fn as_ptr(&self) -> *mut T { - if Self::IS_ZERO_SIZED_TYPE { + if T::IS_ZERO_SIZED { // Just return an arbitrary ZST pointer which is properly aligned // invalid pointer is good enough for ZST invalid_mut(mem::align_of::()) @@ -550,7 +554,7 @@ impl Bucket { /// [`RawTableInner::buckets`]: RawTableInner::buckets #[inline] unsafe fn next_n(&self, offset: usize) -> Self { - let ptr = if Self::IS_ZERO_SIZED_TYPE { + let ptr = if T::IS_ZERO_SIZED { // invalid pointer is good enough for ZST invalid_mut(self.ptr.as_ptr() as usize + offset) } else { @@ -774,15 +778,16 @@ impl Bucket { } /// A raw hash table with an unsafe API. -pub struct RawTable { - table: RawTableInner, +pub struct RawTable { + table: RawTableInner, + alloc: A, // Tell dropck that we own instances of T. marker: PhantomData, } /// Non-generic part of `RawTable` which allows functions to be instantiated only once regardless /// of how many different key-value types are used. -struct RawTableInner { +struct RawTableInner { // Mask to get an index from a hash value. The value is one less than the // number of buckets in the table. bucket_mask: usize, @@ -796,8 +801,6 @@ struct RawTableInner { // Number of elements in the table, only really used by len() items: usize, - - alloc: A, } impl RawTable { @@ -809,7 +812,8 @@ impl RawTable { #[inline] pub const fn new() -> Self { Self { - table: RawTableInner::new_in(Global), + table: RawTableInner::new(), + alloc: Global, marker: PhantomData, } } @@ -828,9 +832,8 @@ impl RawTable { } } -impl RawTable { +impl RawTable { const TABLE_LAYOUT: TableLayout = TableLayout::new::(); - const DATA_NEEDS_DROP: bool = mem::needs_drop::(); /// Creates a new empty hash table without allocating any memory, using the /// given allocator. @@ -841,7 +844,8 @@ impl RawTable { #[inline] pub const fn new_in(alloc: A) -> Self { Self { - table: RawTableInner::new_in(alloc), + table: RawTableInner::new(), + alloc, marker: PhantomData, } } @@ -859,60 +863,46 @@ impl RawTable { Ok(Self { table: RawTableInner::new_uninitialized( - alloc, + &alloc, Self::TABLE_LAYOUT, buckets, fallibility, )?, + alloc, marker: PhantomData, }) } - /// Attempts to allocate a new hash table with at least enough capacity - /// for inserting the given number of elements without reallocating. - fn fallible_with_capacity( - alloc: A, - capacity: usize, - fallibility: Fallibility, - ) -> Result { + /// Attempts to allocate a new hash table using the given allocator, with at least enough + /// capacity for inserting the given number of elements without reallocating. + #[cfg(feature = "raw")] + pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result { Ok(Self { table: RawTableInner::fallible_with_capacity( - alloc, + &alloc, Self::TABLE_LAYOUT, capacity, - fallibility, + Fallibility::Fallible, )?, + alloc, marker: PhantomData, }) } - /// Attempts to allocate a new hash table using the given allocator, with at least enough - /// capacity for inserting the given number of elements without reallocating. - #[cfg(feature = "raw")] - pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result { - Self::fallible_with_capacity(alloc, capacity, Fallibility::Fallible) - } - /// Allocates a new hash table using the given allocator, with at least enough capacity for /// inserting the given number of elements without reallocating. pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { - // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. - match Self::fallible_with_capacity(alloc, capacity, Fallibility::Infallible) { - Ok(capacity) => capacity, - Err(_) => unsafe { hint::unreachable_unchecked() }, + Self { + table: RawTableInner::with_capacity(&alloc, Self::TABLE_LAYOUT, capacity), + alloc, + marker: PhantomData, } } /// Returns a reference to the underlying allocator. #[inline] pub fn allocator(&self) -> &A { - &self.table.alloc - } - - /// Deallocates the table without dropping any entries. - #[cfg_attr(feature = "inline-more", inline)] - unsafe fn free_buckets(&mut self) { - self.table.free_buckets(Self::TABLE_LAYOUT); + &self.alloc } /// Returns pointer to one past last element of data table. @@ -1030,15 +1020,10 @@ impl RawTable { // Ensure that the table is reset even if one of the drops panic let mut self_ = guard(self, |self_| self_.clear_no_drop()); unsafe { - self_.drop_elements(); - } - } - - unsafe fn drop_elements(&mut self) { - if Self::DATA_NEEDS_DROP && !self.is_empty() { - for item in self.iter() { - item.drop(); - } + // SAFETY: ScopeGuard sets to zero the `items` field of the table + // even in case of panic during the dropping of the elements so + // that there will be no double drop of the elements. + self_.table.drop_elements::(); } } @@ -1049,7 +1034,16 @@ impl RawTable { // space for. let min_size = usize::max(self.table.items, min_size); if min_size == 0 { - *self = Self::new_in(self.table.alloc.clone()); + let mut old_inner = mem::replace(&mut self.table, RawTableInner::new()); + unsafe { + // SAFETY: + // 1. We call the function only once; + // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] + // and [`TableLayout`] that were used to allocate this table. + // 3. If any elements' drop function panics, then there will only be a memory leak, + // because we have replaced the inner table with a new one. + old_inner.drop_inner_table::(&self.alloc, Self::TABLE_LAYOUT); + } return; } @@ -1066,11 +1060,25 @@ impl RawTable { if min_buckets < self.buckets() { // Fast path if the table is empty if self.table.items == 0 { - *self = Self::with_capacity_in(min_size, self.table.alloc.clone()); + let new_inner = + RawTableInner::with_capacity(&self.alloc, Self::TABLE_LAYOUT, min_size); + let mut old_inner = mem::replace(&mut self.table, new_inner); + unsafe { + // SAFETY: + // 1. We call the function only once; + // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] + // and [`TableLayout`] that were used to allocate this table. + // 3. If any elements' drop function panics, then there will only be a memory leak, + // because we have replaced the inner table with a new one. + old_inner.drop_inner_table::(&self.alloc, Self::TABLE_LAYOUT); + } } else { // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. unsafe { - // SAFETY: We check that `capacity > 0`. + // SAFETY: + // 1. We know for sure that `min_size >= self.table.items`. + // 2. The [`RawTableInner`] must already have properly initialized control bytes since + // we never exposed RawTable::new_uninitialized in a public API. if self .resize(min_size, hasher, Fallibility::Infallible) .is_err() @@ -1090,11 +1098,16 @@ impl RawTable { pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) { if unlikely(additional > self.table.growth_left) { // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. - if self - .reserve_rehash(additional, hasher, Fallibility::Infallible) - .is_err() - { - unsafe { hint::unreachable_unchecked() } + unsafe { + // SAFETY: The [`RawTableInner`] must already have properly initialized control + // bytes since we never exposed RawTable::new_uninitialized in a public API. + if self + .reserve_rehash(additional, hasher, Fallibility::Infallible) + .is_err() + { + // SAFETY: All allocation errors will be caught inside `RawTableInner::reserve_rehash`. + hint::unreachable_unchecked() + } } } } @@ -1108,28 +1121,45 @@ impl RawTable { hasher: impl Fn(&T) -> u64, ) -> Result<(), TryReserveError> { if additional > self.table.growth_left { - self.reserve_rehash(additional, hasher, Fallibility::Fallible) + // SAFETY: The [`RawTableInner`] must already have properly initialized control + // bytes since we never exposed RawTable::new_uninitialized in a public API. + unsafe { self.reserve_rehash(additional, hasher, Fallibility::Fallible) } } else { Ok(()) } } /// Out-of-line slow path for `reserve` and `try_reserve`. + /// + /// # Safety + /// + /// The [`RawTableInner`] must have properly initialized control bytes, + /// otherwise calling this function results in [`undefined behavior`] + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[cold] #[inline(never)] - fn reserve_rehash( + unsafe fn reserve_rehash( &mut self, additional: usize, hasher: impl Fn(&T) -> u64, fallibility: Fallibility, ) -> Result<(), TryReserveError> { unsafe { + // SAFETY: + // 1. We know for sure that `alloc` and `layout` matches the [`Allocator`] and + // [`TableLayout`] that were used to allocate this table. + // 2. The `drop` function is the actual drop function of the elements stored in + // the table. + // 3. The caller ensures that the control bytes of the `RawTableInner` + // are already initialized. self.table.reserve_rehash_inner( + &self.alloc, additional, &|table, index| hasher(table.bucket::(index).as_ref()), fallibility, Self::TABLE_LAYOUT, - if Self::DATA_NEEDS_DROP { + if T::NEEDS_DROP { Some(mem::transmute(ptr::drop_in_place:: as unsafe fn(*mut T))) } else { None @@ -1143,13 +1173,25 @@ impl RawTable { /// /// # Safety /// - /// If `self.table.items != 0`, calling of this function with `capacity` equal to 0 - /// (`capacity == 0`) results in [`undefined behavior`]. + /// The [`RawTableInner`] must have properly initialized control bytes, + /// otherwise calling this function results in [`undefined behavior`] /// - /// Note: It is recommended (but not required) that the new table's `capacity` - /// be greater than or equal to `self.items`. In case if `capacity <= self.items` - /// this function can never return. See [`RawTableInner::find_insert_slot`] for - /// more information. + /// The caller of this function must ensure that `capacity >= self.table.items` + /// otherwise: + /// + /// * If `self.table.items != 0`, calling of this function with `capacity` + /// equal to 0 (`capacity == 0`) results in [`undefined behavior`]. + /// + /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and + /// `self.table.items > capacity_to_buckets(capacity)` + /// calling this function results in [`undefined behavior`]. + /// + /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and + /// `self.table.items > capacity_to_buckets(capacity)` + /// calling this function are never return (will go into an + /// infinite loop). + /// + /// See [`RawTableInner::find_insert_slot`] for more information. /// /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html @@ -1160,10 +1202,13 @@ impl RawTable { fallibility: Fallibility, ) -> Result<(), TryReserveError> { // SAFETY: - // 1. The caller of this function guarantees that `capacity > 0`. - // 2. The `table_layout` is the same [`TableLayout`] that was used - // to allocate this table. + // 1. The caller of this function guarantees that `capacity >= self.table.items`. + // 2. We know for sure that `alloc` and `layout` matches the [`Allocator`] and + // [`TableLayout`] that were used to allocate this table. + // 3. The caller ensures that the control bytes of the `RawTableInner` + // are already initialized. self.table.resize_inner( + &self.alloc, capacity, &|table, index| hasher(table.bucket::(index).as_ref()), fallibility, @@ -1445,11 +1490,11 @@ impl RawTable { /// struct, we have to make the `iter` method unsafe. #[inline] pub unsafe fn iter(&self) -> RawIter { - let data = Bucket::from_base_index(self.data_end(), 0); - RawIter { - iter: RawIterRange::new(self.table.ctrl.as_ptr(), data, self.table.buckets()), - items: self.table.items, - } + // SAFETY: + // 1. The caller must uphold the safety contract for `iter` method. + // 2. The [`RawTableInner`] must already have properly initialized control bytes since + // we never exposed RawTable::new_uninitialized in a public API. + self.table.iter() } /// Returns an iterator over occupied buckets that could match a given hash. @@ -1489,8 +1534,8 @@ impl RawTable { debug_assert_eq!(iter.len(), self.len()); RawDrain { iter, - table: ManuallyDrop::new(mem::replace(self, Self::new_in(self.table.alloc.clone()))), - orig_table: NonNull::from(self), + table: mem::replace(&mut self.table, RawTableInner::new()), + orig_table: NonNull::from(&mut self.table), marker: PhantomData, } } @@ -1528,7 +1573,7 @@ impl RawTable { Some(( unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset)) }, layout, - unsafe { ptr::read(&self.table.alloc) }, + unsafe { ptr::read(&self.alloc) }, )) }; mem::forget(self); @@ -1536,39 +1581,38 @@ impl RawTable { } } -unsafe impl Send for RawTable +unsafe impl Send for RawTable where T: Send, A: Send, { } -unsafe impl Sync for RawTable +unsafe impl Sync for RawTable where T: Sync, A: Sync, { } -impl RawTableInner { +impl RawTableInner { /// Creates a new empty hash table without allocating any memory. /// /// In effect this returns a table with exactly 1 bucket. However we can /// leave the data pointer dangling since that bucket is never accessed /// due to our load factor forcing us to always have at least 1 free bucket. #[inline] - const fn new_in(alloc: A) -> Self { + const fn new() -> Self { Self { // Be careful to cast the entire slice to a raw pointer. ctrl: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) }, bucket_mask: 0, items: 0, growth_left: 0, - alloc, } } } -impl RawTableInner { +impl RawTableInner { /// Allocates a new [`RawTableInner`] with the given number of buckets. /// The control bytes and buckets are left uninitialized. /// @@ -1582,12 +1626,15 @@ impl RawTableInner { /// /// [`Allocator`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html #[cfg_attr(feature = "inline-more", inline)] - unsafe fn new_uninitialized( - alloc: A, + unsafe fn new_uninitialized( + alloc: &A, table_layout: TableLayout, buckets: usize, fallibility: Fallibility, - ) -> Result { + ) -> Result + where + A: Allocator, + { debug_assert!(buckets.is_power_of_two()); // Avoid `Option::ok_or_else` because it bloats LLVM IR. @@ -1596,7 +1643,7 @@ impl RawTableInner { None => return Err(fallibility.capacity_overflow()), }; - let ptr: NonNull = match do_alloc(&alloc, layout) { + let ptr: NonNull = match do_alloc(alloc, layout) { Ok(block) => block.cast(), Err(_) => return Err(fallibility.alloc_err(layout)), }; @@ -1608,7 +1655,6 @@ impl RawTableInner { bucket_mask: buckets - 1, items: 0, growth_left: bucket_mask_to_capacity(buckets - 1), - alloc, }) } @@ -1617,14 +1663,17 @@ impl RawTableInner { /// /// All the control bytes are initialized with the [`EMPTY`] bytes. #[inline] - fn fallible_with_capacity( - alloc: A, + fn fallible_with_capacity( + alloc: &A, table_layout: TableLayout, capacity: usize, fallibility: Fallibility, - ) -> Result { + ) -> Result + where + A: Allocator, + { if capacity == 0 { - Ok(Self::new_in(alloc)) + Ok(Self::new()) } else { // SAFETY: We checked that we could successfully allocate the new table, and then // initialized all control bytes with the constant `EMPTY` byte. @@ -1643,6 +1692,28 @@ impl RawTableInner { } } + /// Allocates a new [`RawTableInner`] with at least enough capacity for inserting + /// the given number of elements without reallocating. + /// + /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program + /// in case of allocation error. Use [`fallible_with_capacity`] instead if you want to + /// handle memory allocation failure. + /// + /// All the control bytes are initialized with the [`EMPTY`] bytes. + /// + /// [`fallible_with_capacity`]: RawTableInner::fallible_with_capacity + fn with_capacity(alloc: &A, table_layout: TableLayout, capacity: usize) -> Self + where + A: Allocator, + { + // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. + match Self::fallible_with_capacity(alloc, table_layout, capacity, Fallibility::Infallible) { + Ok(table_inner) => table_inner, + // SAFETY: All allocation errors will be caught inside `RawTableInner::new_uninitialized`. + Err(_) => unsafe { hint::unreachable_unchecked() }, + } + } + /// Fixes up an insertion slot due to false positives for groups smaller than the group width. /// This must only be used on insertion slots found by `find_insert_slot_in_group`. #[inline] @@ -1916,6 +1987,9 @@ impl RawTableInner { /// to do during the first insert due to tombstones). If the caller does not do /// this, then calling this function may result in a memory leak. /// + /// * The [`RawTableInner`] must have properly initialized control bytes otherwise + /// calling this function results in [`undefined behavior`]. + /// /// Calling this function on a table that has not been allocated results in /// [`undefined behavior`]. /// @@ -1963,6 +2037,155 @@ impl RawTableInner { } } + /// Returns an iterator over every element in the table. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result + /// is [`undefined behavior`]: + /// + /// * The caller has to ensure that the `RawTableInner` outlives the + /// `RawIter`. Because we cannot make the `next` method unsafe on + /// the `RawIter` struct, we have to make the `iter` method unsafe. + /// + /// * The [`RawTableInner`] must have properly initialized control bytes. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + unsafe fn iter(&self) -> RawIter { + // SAFETY: + // 1. Since the caller of this function ensures that the control bytes + // are properly initialized and `self.data_end()` points to the start + // of the array of control bytes, therefore: `ctrl` is valid for reads, + // properly aligned to `Group::WIDTH` and points to the properly initialized + // control bytes. + // 2. `data` bucket index in the table is equal to the `ctrl` index (i.e. + // equal to zero). + // 3. We pass the exact value of buckets of the table to the function. + // + // `ctrl` points here (to the start + // of the first control byte `CT0`) + // ∨ + // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, Group::WIDTH + // \________ ________/ + // \/ + // `n = buckets - 1`, i.e. `RawIndexTableInner::buckets() - 1` + // + // where: T0...T_n - our stored data; + // CT0...CT_n - control bytes or metadata for `data`. + let data = Bucket::from_base_index(self.data_end(), 0); + RawIter { + // SAFETY: See explanation above + iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()), + items: self.items, + } + } + + /// Executes the destructors (if any) of the values stored in the table. + /// + /// # Note + /// + /// This function does not erase the control bytes of the table and does + /// not make any changes to the `items` or `growth_left` fields of the + /// table. If necessary, the caller of this function must manually set + /// up these table fields, for example using the [`clear_no_drop`] function. + /// + /// Be careful during calling this function, because drop function of + /// the elements can panic, and this can leave table in an inconsistent + /// state. + /// + /// # Safety + /// + /// If `T` is a type that should be dropped and **the table is not empty**, + /// calling this function more than once results in [`undefined behavior`]. + /// + /// If `T` is not [`Copy`], attempting to use values stored in the table after + /// calling this function may result in [`undefined behavior`]. + /// + /// It is safe to call this function on a table that has not been allocated, + /// on a table with uninitialized control bytes, and on a table with no actual + /// data but with `Full` control bytes if `self.items == 0`. + /// + /// See also [`Bucket::drop`] / [`Bucket::as_ptr`] methods, for more information + /// about of properly removing or saving `element` from / into the [`RawTable`] / + /// [`RawTableInner`]. + /// + /// [`Bucket::drop`]: Bucket::drop + /// [`Bucket::as_ptr`]: Bucket::as_ptr + /// [`clear_no_drop`]: RawTableInner::clear_no_drop + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + unsafe fn drop_elements(&mut self) { + // Check that `self.items != 0`. Protects against the possibility + // of creating an iterator on an table with uninitialized control bytes. + if T::NEEDS_DROP && self.items != 0 { + // SAFETY: We know for sure that RawTableInner will outlive the + // returned `RawIter` iterator, and the caller of this function + // must uphold the safety contract for `drop_elements` method. + for item in self.iter::() { + // SAFETY: The caller must uphold the safety contract for + // `drop_elements` method. + item.drop(); + } + } + } + + /// Executes the destructors (if any) of the values stored in the table and than + /// deallocates the table. + /// + /// # Note + /// + /// Calling this function automatically makes invalid (dangling) all instances of + /// buckets ([`Bucket`]) and makes invalid (dangling) the `ctrl` field of the table. + /// + /// This function does not make any changes to the `bucket_mask`, `items` or `growth_left` + /// fields of the table. If necessary, the caller of this function must manually set + /// up these table fields. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is [`undefined behavior`]: + /// + /// * Calling this function more than once; + /// + /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used + /// to allocate this table. + /// + /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that + /// was used to allocate this table. + /// + /// The caller of this function should pay attention to the possibility of the + /// elements' drop function panicking, because this: + /// + /// * May leave the table in an inconsistent state; + /// + /// * Memory is never deallocated, so a memory leak may occur. + /// + /// Attempt to use the `ctrl` field of the table (dereference) after calling this + /// function results in [`undefined behavior`]. + /// + /// It is safe to call this function on a table that has not been allocated, + /// on a table with uninitialized control bytes, and on a table with no actual + /// data but with `Full` control bytes if `self.items == 0`. + /// + /// See also [`RawTableInner::drop_elements`] or [`RawTableInner::free_buckets`] + /// for more information. + /// + /// [`RawTableInner::drop_elements`]: RawTableInner::drop_elements + /// [`RawTableInner::free_buckets`]: RawTableInner::free_buckets + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + unsafe fn drop_inner_table(&mut self, alloc: &A, table_layout: TableLayout) { + if !self.is_empty_singleton() { + unsafe { + // SAFETY: The caller must uphold the safety contract for `drop_inner_table` method. + self.drop_elements::(); + // SAFETY: + // 1. We have checked that our table is allocated. + // 2. The caller must uphold the safety contract for `drop_inner_table` method. + self.free_buckets(alloc, table_layout); + } + } + } + #[inline] unsafe fn bucket(&self, index: usize) -> Bucket { debug_assert_ne!(self.bucket_mask, 0); @@ -2213,6 +2436,12 @@ impl RawTableInner { /// and return it inside ScopeGuard to protect against panic in the hash /// function. /// + /// # Safety: + /// + /// The `alloc` must be the same [`Allocator`] as the `Allocator` used + /// to allocate this table otherwise calling this function may result in + /// [`undefined behavior`]. + /// /// # Note /// /// It is recommended (but not required): @@ -2229,21 +2458,21 @@ impl RawTableInner { /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[allow(clippy::mut_mut)] #[inline] - fn prepare_resize( + unsafe fn prepare_resize<'a, A>( &self, + alloc: &'a A, table_layout: TableLayout, capacity: usize, fallibility: Fallibility, - ) -> Result, TryReserveError> { + ) -> Result, TryReserveError> + where + A: Allocator, + { debug_assert!(self.items <= capacity); // Allocate and initialize the new table. - let new_table = RawTableInner::fallible_with_capacity( - self.alloc.clone(), - table_layout, - capacity, - fallibility, - )?; + let new_table = + RawTableInner::fallible_with_capacity(alloc, table_layout, capacity, fallibility)?; // The hash function may panic, in which case we simply free the new // table without dropping any elements that may have been copied into @@ -2255,9 +2484,11 @@ impl RawTableInner { if !self_.is_empty_singleton() { // SAFETY: // 1. We have checked that our table is allocated. - // 2. We know for sure that `table_layout` matches the [`TableLayout`] - // that was used to allocate this table. - unsafe { self_.free_buckets(table_layout) }; + // 2. The caller of this function ensures that `alloc` is the + // same [`Allocator`] used to allocate this table. + // 3. We know for sure that `table_layout` matches the [`TableLayout`] + // used to allocate this table. + self_.free_buckets(alloc, table_layout); } })) } @@ -2266,16 +2497,38 @@ impl RawTableInner { /// /// This uses dynamic dispatch to reduce the amount of /// code generated, but it is eliminated by LLVM optimizations when inlined. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is + /// [`undefined behavior`]: + /// + /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used + /// to allocate this table. + /// + /// * The `layout` must be the same [`TableLayout`] as the `TableLayout` + /// used to allocate this table. + /// + /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of + /// the elements stored in the table. + /// + /// * The [`RawTableInner`] must have properly initialized control bytes. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[allow(clippy::inline_always)] #[inline(always)] - unsafe fn reserve_rehash_inner( + unsafe fn reserve_rehash_inner( &mut self, + alloc: &A, additional: usize, hasher: &dyn Fn(&mut Self, usize) -> u64, fallibility: Fallibility, layout: TableLayout, drop: Option, - ) -> Result<(), TryReserveError> { + ) -> Result<(), TryReserveError> + where + A: Allocator, + { // Avoid `Option::ok_or_else` because it bloats LLVM IR. let new_items = match self.items.checked_add(additional) { Some(new_items) => new_items, @@ -2285,6 +2538,16 @@ impl RawTableInner { if new_items <= full_capacity / 2 { // Rehash in-place without re-allocating if we have plenty of spare // capacity that is locked up due to DELETED entries. + + // SAFETY: + // 1. We know for sure that `[`RawTableInner`]` has already been allocated + // (since new_items <= full_capacity / 2); + // 2. The caller ensures that `drop` function is the actual drop function of + // the elements stored in the table. + // 3. The caller ensures that `layout` matches the [`TableLayout`] that was + // used to allocate this table. + // 4. The caller ensures that the control bytes of the `RawTableInner` + // are already initialized. self.rehash_in_place(hasher, layout.size, drop); Ok(()) } else { @@ -2292,10 +2555,13 @@ impl RawTableInner { // to avoid churning deletes into frequent rehashes. // // SAFETY: - // 1. The `capacity` is guaranteed to be greater than zero (`capacity > 0`). - // 2. The caller ensures that `table_layout` matches the [`TableLayout`] - // that was used to allocate this table. + // 1. We know for sure that `capacity >= self.items`. + // 2. The caller ensures that `alloc` and `layout` matches the [`Allocator`] and + // [`TableLayout`] that were used to allocate this table. + // 3. The caller ensures that the control bytes of the `RawTableInner` + // are already initialized. self.resize_inner( + alloc, usize::max(new_items, full_capacity + 1), hasher, fallibility, @@ -2308,17 +2574,22 @@ impl RawTableInner { /// /// # Safety /// - /// The caller has to ensure that the `RawTableInner` outlives the - /// `FullBucketsIndices`. Because we cannot make the `next` method - /// unsafe on the `FullBucketsIndices` struct, we have to make the - /// `full_buckets_indices` method unsafe. + /// Behavior is undefined if any of the following conditions are violated: + /// + /// * The caller has to ensure that the `RawTableInner` outlives the + /// `FullBucketsIndices`. Because we cannot make the `next` method + /// unsafe on the `FullBucketsIndices` struct, we have to make the + /// `full_buckets_indices` method unsafe. + /// + /// * The [`RawTableInner`] must have properly initialized control bytes. #[inline(always)] unsafe fn full_buckets_indices(&self) -> FullBucketsIndices { // SAFETY: - // 1. The first `self.ctrl` pointed to the start of the array of control - // bytes, and therefore: `ctrl` is valid for reads, properly aligned - // to `Group::WIDTH` and points to the properly initialized control - // bytes. + // 1. Since the caller of this function ensures that the control bytes + // are properly initialized and `self.ctrl(0)` points to the start + // of the array of control bytes, therefore: `ctrl` is valid for reads, + // properly aligned to `Group::WIDTH` and points to the properly initialized + // control bytes. // 2. The value of `items` is equal to the amount of data (values) added // to the table. // @@ -2352,14 +2623,30 @@ impl RawTableInner { /// /// # Safety /// - /// Caller of this function must observe the following safety rules: + /// If any of the following conditions are violated, the result is + /// [`undefined behavior`]: /// - /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` - /// that was used to allocate this table, otherwise calling this function - /// results in [`undefined behavior`] + /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used + /// to allocate this table; + /// + /// * The `layout` must be the same [`TableLayout`] as the `TableLayout` + /// used to allocate this table; + /// + /// * The [`RawTableInner`] must have properly initialized control bytes. + /// + /// The caller of this function must ensure that `capacity >= self.items` + /// otherwise: /// - /// * If `self.items != 0`, calling of this function with `capacity` equal to 0 - /// (`capacity == 0`) results in [`undefined behavior`]. + /// * If `self.items != 0`, calling of this function with `capacity == 0` + /// results in [`undefined behavior`]. + /// + /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and + /// `self.items > capacity_to_buckets(capacity)` calling this function + /// results in [`undefined behavior`]. + /// + /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and + /// `self.items > capacity_to_buckets(capacity)` calling this function + /// are never return (will go into an infinite loop). /// /// Note: It is recommended (but not required) that the new table's `capacity` /// be greater than or equal to `self.items`. In case if `capacity <= self.items` @@ -2370,17 +2657,24 @@ impl RawTableInner { /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[allow(clippy::inline_always)] #[inline(always)] - unsafe fn resize_inner( + unsafe fn resize_inner( &mut self, + alloc: &A, capacity: usize, hasher: &dyn Fn(&mut Self, usize) -> u64, fallibility: Fallibility, layout: TableLayout, - ) -> Result<(), TryReserveError> { - let mut new_table = self.prepare_resize(layout, capacity, fallibility)?; + ) -> Result<(), TryReserveError> + where + A: Allocator, + { + // SAFETY: We know for sure that `alloc` and `layout` matches the [`Allocator`] and [`TableLayout`] + // that were used to allocate this table. + let mut new_table = self.prepare_resize(alloc, layout, capacity, fallibility)?; - // SAFETY: We know for sure that `RawTableInner` will outlive - // the returned `FullBucketsIndices` iterator. + // SAFETY: We know for sure that RawTableInner will outlive the + // returned `FullBucketsIndices` iterator, and the caller of this + // function ensures that the control bytes are properly initialized. for full_byte_index in self.full_buckets_indices() { // This may panic. let hash = hasher(self, full_byte_index); @@ -2443,6 +2737,21 @@ impl RawTableInner { /// /// This uses dynamic dispatch to reduce the amount of /// code generated, but it is eliminated by LLVM optimizations when inlined. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is [`undefined behavior`]: + /// + /// * The `size_of` must be equal to the size of the elements stored in the table; + /// + /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of + /// the elements stored in the table. + /// + /// * The [`RawTableInner`] has already been allocated; + /// + /// * The [`RawTableInner`] must have properly initialized control bytes. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[allow(clippy::inline_always)] #[cfg_attr(feature = "inline-more", inline(always))] #[cfg_attr(not(feature = "inline-more"), inline)] @@ -2532,7 +2841,8 @@ impl RawTableInner { /// /// This function must be called only after [`drop_elements`](RawTable::drop_elements), /// else it can lead to leaking of memory. Also calling this function automatically - /// makes invalid (dangling) all instances of buckets ([`Bucket`]) and the table itself. + /// makes invalid (dangling) all instances of buckets ([`Bucket`]) and makes invalid + /// (dangling) the `ctrl` field of the table. /// /// # Safety /// @@ -2540,6 +2850,9 @@ impl RawTableInner { /// /// * The [`RawTableInner`] has already been allocated; /// + /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used + /// to allocate this table. + /// /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that was used /// to allocate this table. /// @@ -2549,11 +2862,14 @@ impl RawTableInner { /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate #[inline] - unsafe fn free_buckets(&mut self, table_layout: TableLayout) { + unsafe fn free_buckets(&mut self, alloc: &A, table_layout: TableLayout) + where + A: Allocator, + { // SAFETY: The caller must uphold the safety contract for `free_buckets` // method. let (ptr, layout) = self.allocation_info(table_layout); - self.alloc.deallocate(ptr, layout); + alloc.deallocate(ptr, layout); } /// Returns a pointer to the allocated memory and the layout that was used to @@ -2731,7 +3047,7 @@ impl RawTableInner { impl Clone for RawTable { fn clone(&self) -> Self { if self.table.is_empty_singleton() { - Self::new_in(self.table.alloc.clone()) + Self::new_in(self.alloc.clone()) } else { unsafe { // Avoid `Result::ok_or_else` because it bloats LLVM IR. @@ -2740,7 +3056,7 @@ impl Clone for RawTable { // and therefore сapacity overflow cannot occur, `self.table.buckets()` is power // of two and all allocator errors will be caught inside `RawTableInner::new_uninitialized`. let mut new_table = match Self::new_uninitialized( - self.table.alloc.clone(), + self.alloc.clone(), self.table.buckets(), Fallibility::Infallible, ) { @@ -2764,8 +3080,16 @@ impl Clone for RawTable { fn clone_from(&mut self, source: &Self) { if source.table.is_empty_singleton() { - // Dereference drops old `self` table - *self = Self::new_in(self.table.alloc.clone()); + let mut old_inner = mem::replace(&mut self.table, RawTableInner::new()); + unsafe { + // SAFETY: + // 1. We call the function only once; + // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] + // and [`TableLayout`] that were used to allocate this table. + // 3. If any elements' drop function panics, then there will only be a memory leak, + // because we have replaced the inner table with a new one. + old_inner.drop_inner_table::(&self.alloc, Self::TABLE_LAYOUT); + } } else { unsafe { // Make sure that if any panics occurs, we clear the table and @@ -2784,38 +3108,29 @@ impl Clone for RawTable { // SAFETY: If something gets wrong we clear our table right after // dropping the elements, so there is no double drop, since `items` // will be equal to zero. - self_.drop_elements(); + self_.table.drop_elements::(); // If necessary, resize our table to match the source. if self_.buckets() != source.buckets() { - // Skip our drop by using ptr::write. - if !self_.table.is_empty_singleton() { - // SAFETY: We have verified that the table is allocated. - self_.free_buckets(); + let new_inner = match RawTableInner::new_uninitialized( + &self_.alloc, + Self::TABLE_LAYOUT, + source.buckets(), + Fallibility::Infallible, + ) { + Ok(table) => table, + Err(_) => hint::unreachable_unchecked(), + }; + // Replace the old inner with new uninitialized one. It's ok, since if something gets + // wrong `ScopeGuard` will initialize all control bytes and leave empty table. + let mut old_inner = mem::replace(&mut self_.table, new_inner); + if !old_inner.is_empty_singleton() { + // SAFETY: + // 1. We have checked that our table is allocated. + // 2. We know for sure that `alloc` and `table_layout` matches + // the [`Allocator`] and [`TableLayout`] that were used to allocate this table. + old_inner.free_buckets(&self_.alloc, Self::TABLE_LAYOUT); } - // Let's read `alloc` for reusing in new table allocator - // SAFETY: - // * `&mut self_.table.alloc` is valid for reading, properly - // aligned, and points to a properly initialized value as - // it is derived from a reference. - // - // * We want to overwrite our own table. - let alloc = ptr::read(&self_.table.alloc); - (&mut **self_ as *mut Self).write( - // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. - // - // SAFETY: This is safe as we are taking the size of an already allocated table - // and therefore сapacity overflow cannot occur, `self.table.buckets()` is power - // of two and all allocator errors will be caught inside `RawTableInner::new_uninitialized`. - match Self::new_uninitialized( - alloc, - source.buckets(), - Fallibility::Infallible, - ) { - Ok(table) => table, - Err(_) => hint::unreachable_unchecked(), - }, - ); } // Cloning elements may fail (the clone function may panic), but the `ScopeGuard` @@ -2877,7 +3192,7 @@ impl RawTable { // to make sure we drop only the elements that have been // cloned so far. let mut guard = guard((0, &mut *self), |(index, self_)| { - if Self::DATA_NEEDS_DROP { + if T::NEEDS_DROP { for i in 0..=*index { if self_.is_bucket_full(i) { self_.bucket(i).drop(); @@ -2947,7 +3262,7 @@ impl RawTable { } } -impl Default for RawTable { +impl Default for RawTable { #[inline] fn default() -> Self { Self::new_in(Default::default()) @@ -2955,31 +3270,41 @@ impl Default for RawTable { } #[cfg(feature = "nightly")] -unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawTable { +unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawTable { #[cfg_attr(feature = "inline-more", inline)] fn drop(&mut self) { - if !self.table.is_empty_singleton() { - unsafe { - self.drop_elements(); - self.free_buckets(); - } + unsafe { + // SAFETY: + // 1. We call the function only once; + // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] + // and [`TableLayout`] that were used to allocate this table. + // 3. If the drop function of any elements fails, then only a memory leak will occur, + // and we don't care because we are inside the `Drop` function of the `RawTable`, + // so there won't be any table left in an inconsistent state. + self.table + .drop_inner_table::(&self.alloc, Self::TABLE_LAYOUT); } } } #[cfg(not(feature = "nightly"))] -impl Drop for RawTable { +impl Drop for RawTable { #[cfg_attr(feature = "inline-more", inline)] fn drop(&mut self) { - if !self.table.is_empty_singleton() { - unsafe { - self.drop_elements(); - self.free_buckets(); - } + unsafe { + // SAFETY: + // 1. We call the function only once; + // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] + // and [`TableLayout`] that were used to allocate this table. + // 3. If the drop function of any elements fails, then only a memory leak will occur, + // and we don't care because we are inside the `Drop` function of the `RawTable`, + // so there won't be any table left in an inconsistent state. + self.table + .drop_inner_table::(&self.alloc, Self::TABLE_LAYOUT); } } } -impl IntoIterator for RawTable { +impl IntoIterator for RawTable { type Item = T; type IntoIter = RawIntoIter; @@ -3013,14 +3338,38 @@ pub(crate) struct RawIterRange { impl RawIterRange { /// Returns a `RawIterRange` covering a subset of a table. /// - /// The control byte address must be aligned to the group size. + /// # Safety + /// + /// If any of the following conditions are violated, the result is + /// [`undefined behavior`]: + /// + /// * `ctrl` must be [valid] for reads, i.e. table outlives the `RawIterRange`; + /// + /// * `ctrl` must be properly aligned to the group size (Group::WIDTH); + /// + /// * `ctrl` must point to the array of properly initialized control bytes; + /// + /// * `data` must be the [`Bucket`] at the `ctrl` index in the table; + /// + /// * the value of `len` must be less than or equal to the number of table buckets, + /// and the returned value of `ctrl.as_ptr().add(len).offset_from(ctrl.as_ptr())` + /// must be positive. + /// + /// * The `ctrl.add(len)` pointer must be either in bounds or one + /// byte past the end of the same [allocated table]. + /// + /// * The `len` must be a power of two. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[cfg_attr(feature = "inline-more", inline)] unsafe fn new(ctrl: *const u8, data: Bucket, len: usize) -> Self { debug_assert_ne!(len, 0); debug_assert_eq!(ctrl as usize % Group::WIDTH, 0); + // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`] let end = ctrl.add(len); // Load the first group and advance ctrl to point to the next group + // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`] let current_group = Group::load_aligned(ctrl).match_full(); let next_ctrl = ctrl.add(Group::WIDTH); @@ -3164,8 +3513,6 @@ pub struct RawIter { } impl RawIter { - const DATA_NEEDS_DROP: bool = mem::needs_drop::(); - /// Refresh the iterator so that it reflects a removal from the given bucket. /// /// For the iterator to remain valid, this method must be called once @@ -3281,7 +3628,7 @@ impl RawIter { } unsafe fn drop_elements(&mut self) { - if Self::DATA_NEEDS_DROP && self.len() != 0 { + if T::NEEDS_DROP && self.items != 0 { for item in self { item.drop(); } @@ -3450,26 +3797,26 @@ impl ExactSizeIterator for FullBucketsIndices {} impl FusedIterator for FullBucketsIndices {} /// Iterator which consumes a table and returns elements. -pub struct RawIntoIter { +pub struct RawIntoIter { iter: RawIter, allocation: Option<(NonNull, Layout, A)>, marker: PhantomData, } -impl RawIntoIter { +impl RawIntoIter { #[cfg_attr(feature = "inline-more", inline)] pub fn iter(&self) -> RawIter { self.iter.clone() } } -unsafe impl Send for RawIntoIter +unsafe impl Send for RawIntoIter where T: Send, A: Send, { } -unsafe impl Sync for RawIntoIter +unsafe impl Sync for RawIntoIter where T: Sync, A: Sync, @@ -3477,7 +3824,7 @@ where } #[cfg(feature = "nightly")] -unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawIntoIter { +unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawIntoIter { #[cfg_attr(feature = "inline-more", inline)] fn drop(&mut self) { unsafe { @@ -3492,7 +3839,7 @@ unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawIntoIter { } } #[cfg(not(feature = "nightly"))] -impl Drop for RawIntoIter { +impl Drop for RawIntoIter { #[cfg_attr(feature = "inline-more", inline)] fn drop(&mut self) { unsafe { @@ -3507,7 +3854,7 @@ impl Drop for RawIntoIter { } } -impl Iterator for RawIntoIter { +impl Iterator for RawIntoIter { type Item = T; #[cfg_attr(feature = "inline-more", inline)] @@ -3521,45 +3868,45 @@ impl Iterator for RawIntoIter { } } -impl ExactSizeIterator for RawIntoIter {} -impl FusedIterator for RawIntoIter {} +impl ExactSizeIterator for RawIntoIter {} +impl FusedIterator for RawIntoIter {} /// Iterator which consumes elements without freeing the table storage. -pub struct RawDrain<'a, T, A: Allocator + Clone = Global> { +pub struct RawDrain<'a, T, A: Allocator = Global> { iter: RawIter, // The table is moved into the iterator for the duration of the drain. This // ensures that an empty table is left if the drain iterator is leaked // without dropping. - table: ManuallyDrop>, - orig_table: NonNull>, + table: RawTableInner, + orig_table: NonNull, // We don't use a &'a mut RawTable because we want RawDrain to be // covariant over T. marker: PhantomData<&'a RawTable>, } -impl RawDrain<'_, T, A> { +impl RawDrain<'_, T, A> { #[cfg_attr(feature = "inline-more", inline)] pub fn iter(&self) -> RawIter { self.iter.clone() } } -unsafe impl Send for RawDrain<'_, T, A> +unsafe impl Send for RawDrain<'_, T, A> where T: Send, A: Send, { } -unsafe impl Sync for RawDrain<'_, T, A> +unsafe impl Sync for RawDrain<'_, T, A> where T: Sync, A: Sync, { } -impl Drop for RawDrain<'_, T, A> { +impl Drop for RawDrain<'_, T, A> { #[cfg_attr(feature = "inline-more", inline)] fn drop(&mut self) { unsafe { @@ -3573,12 +3920,12 @@ impl Drop for RawDrain<'_, T, A> { // Move the now empty table back to its original location. self.orig_table .as_ptr() - .copy_from_nonoverlapping(&*self.table, 1); + .copy_from_nonoverlapping(&self.table, 1); } } } -impl Iterator for RawDrain<'_, T, A> { +impl Iterator for RawDrain<'_, T, A> { type Item = T; #[cfg_attr(feature = "inline-more", inline)] @@ -3595,8 +3942,8 @@ impl Iterator for RawDrain<'_, T, A> { } } -impl ExactSizeIterator for RawDrain<'_, T, A> {} -impl FusedIterator for RawDrain<'_, T, A> {} +impl ExactSizeIterator for RawDrain<'_, T, A> {} +impl FusedIterator for RawDrain<'_, T, A> {} /// Iterator over occupied buckets that could match a given hash. /// @@ -3641,7 +3988,7 @@ struct RawIterHashInner { impl RawIterHash { #[cfg_attr(feature = "inline-more", inline)] #[cfg(feature = "raw")] - unsafe fn new(table: &RawTable, hash: u64) -> Self { + unsafe fn new(table: &RawTable, hash: u64) -> Self { RawIterHash { inner: RawIterHashInner::new(&table.table, hash), _marker: PhantomData, @@ -3651,7 +3998,7 @@ impl RawIterHash { impl RawIterHashInner { #[cfg_attr(feature = "inline-more", inline)] #[cfg(feature = "raw")] - unsafe fn new(table: &RawTableInner, hash: u64) -> Self { + unsafe fn new(table: &RawTableInner, hash: u64) -> Self { let h2_hash = h2(hash); let probe_seq = table.probe_seq(hash); let group = Group::load(table.ctrl(probe_seq.pos)); diff --git a/src/rustc_entry.rs b/src/rustc_entry.rs index 89447d27db..defbd4bb88 100644 --- a/src/rustc_entry.rs +++ b/src/rustc_entry.rs @@ -9,7 +9,7 @@ impl HashMap where K: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { /// Gets the given key's corresponding entry in the map for in-place manipulation. /// @@ -62,7 +62,7 @@ where /// [`rustc_entry`]: struct.HashMap.html#method.rustc_entry pub enum RustcEntry<'a, K, V, A = Global> where - A: Allocator + Clone, + A: Allocator, { /// An occupied entry. Occupied(RustcOccupiedEntry<'a, K, V, A>), @@ -71,7 +71,7 @@ where Vacant(RustcVacantEntry<'a, K, V, A>), } -impl Debug for RustcEntry<'_, K, V, A> { +impl Debug for RustcEntry<'_, K, V, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), @@ -86,7 +86,7 @@ impl Debug for RustcEntry<'_, K, V, A> /// [`RustcEntry`]: enum.RustcEntry.html pub struct RustcOccupiedEntry<'a, K, V, A = Global> where - A: Allocator + Clone, + A: Allocator, { key: Option, elem: Bucket<(K, V)>, @@ -97,18 +97,18 @@ unsafe impl Send for RustcOccupiedEntry<'_, K, V, A> where K: Send, V: Send, - A: Allocator + Clone + Send, + A: Allocator + Send, { } unsafe impl Sync for RustcOccupiedEntry<'_, K, V, A> where K: Sync, V: Sync, - A: Allocator + Clone + Sync, + A: Allocator + Sync, { } -impl Debug for RustcOccupiedEntry<'_, K, V, A> { +impl Debug for RustcOccupiedEntry<'_, K, V, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OccupiedEntry") .field("key", self.key()) @@ -123,20 +123,20 @@ impl Debug for RustcOccupiedEntry<'_, /// [`RustcEntry`]: enum.RustcEntry.html pub struct RustcVacantEntry<'a, K, V, A = Global> where - A: Allocator + Clone, + A: Allocator, { hash: u64, key: K, table: &'a mut RawTable<(K, V), A>, } -impl Debug for RustcVacantEntry<'_, K, V, A> { +impl Debug for RustcVacantEntry<'_, K, V, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("VacantEntry").field(self.key()).finish() } } -impl<'a, K, V, A: Allocator + Clone> RustcEntry<'a, K, V, A> { +impl<'a, K, V, A: Allocator> RustcEntry<'a, K, V, A> { /// Sets the value of the entry, and returns a RustcOccupiedEntry. /// /// # Examples @@ -265,7 +265,7 @@ impl<'a, K, V, A: Allocator + Clone> RustcEntry<'a, K, V, A> { } } -impl<'a, K, V: Default, A: Allocator + Clone> RustcEntry<'a, K, V, A> { +impl<'a, K, V: Default, A: Allocator> RustcEntry<'a, K, V, A> { /// Ensures a value is in the entry by inserting the default value if empty, /// and returns a mutable reference to the value in the entry. /// @@ -293,7 +293,7 @@ impl<'a, K, V: Default, A: Allocator + Clone> RustcEntry<'a, K, V, A> { } } -impl<'a, K, V, A: Allocator + Clone> RustcOccupiedEntry<'a, K, V, A> { +impl<'a, K, V, A: Allocator> RustcOccupiedEntry<'a, K, V, A> { /// Gets a reference to the key in the entry. /// /// # Examples @@ -518,7 +518,7 @@ impl<'a, K, V, A: Allocator + Clone> RustcOccupiedEntry<'a, K, V, A> { } } -impl<'a, K, V, A: Allocator + Clone> RustcVacantEntry<'a, K, V, A> { +impl<'a, K, V, A: Allocator> RustcVacantEntry<'a, K, V, A> { /// Gets a reference to the key that would be used when inserting a value /// through the `RustcVacantEntry`. /// diff --git a/src/set.rs b/src/set.rs index 52f6fdaf21..0b8d9849b3 100644 --- a/src/set.rs +++ b/src/set.rs @@ -112,7 +112,7 @@ use crate::raw::{Allocator, Global}; /// [`HashMap`]: struct.HashMap.html /// [`PartialEq`]: https://doc.rust-lang.org/std/cmp/trait.PartialEq.html /// [`RefCell`]: https://doc.rust-lang.org/std/cell/struct.RefCell.html -pub struct HashSet { +pub struct HashSet { pub(crate) map: HashMap, } @@ -193,7 +193,7 @@ impl HashSet { } #[cfg(feature = "ahash")] -impl HashSet { +impl HashSet { /// Creates an empty `HashSet`. /// /// The hash set is initially created with a capacity of 0, so it will not allocate until it @@ -256,7 +256,7 @@ impl HashSet { } } -impl HashSet { +impl HashSet { /// Returns the number of elements the set can hold without reallocating. /// /// # Examples @@ -511,7 +511,7 @@ impl HashSet { impl HashSet where - A: Allocator + Clone, + A: Allocator, { /// Returns a reference to the underlying allocator. #[inline] @@ -619,7 +619,7 @@ impl HashSet where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { /// Reserves capacity for at least `additional` more elements to be inserted /// in the `HashSet`. The collection may reserve more space to avoid @@ -1223,7 +1223,7 @@ where } } -impl HashSet { +impl HashSet { /// Returns a reference to the [`RawTable`] used underneath [`HashSet`]. /// This function is only available if the `raw` feature of the crate is enabled. /// @@ -1269,7 +1269,7 @@ impl PartialEq for HashSet where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { fn eq(&self, other: &Self) -> bool { if self.len() != other.len() { @@ -1284,14 +1284,14 @@ impl Eq for HashSet where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { } impl fmt::Debug for HashSet where T: fmt::Debug, - A: Allocator + Clone, + A: Allocator, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_set().entries(self.iter()).finish() @@ -1300,7 +1300,7 @@ where impl From> for HashSet where - A: Allocator + Clone, + A: Allocator, { fn from(map: HashMap) -> Self { Self { map } @@ -1311,7 +1311,7 @@ impl FromIterator for HashSet where T: Eq + Hash, S: BuildHasher + Default, - A: Default + Allocator + Clone, + A: Default + Allocator, { #[cfg_attr(feature = "inline-more", inline)] fn from_iter>(iter: I) -> Self { @@ -1326,7 +1326,7 @@ where impl From<[T; N]> for HashSet where T: Eq + Hash, - A: Default + Allocator + Clone, + A: Default + Allocator, { /// # Examples /// @@ -1346,7 +1346,7 @@ impl Extend for HashSet where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { #[cfg_attr(feature = "inline-more", inline)] fn extend>(&mut self, iter: I) { @@ -1370,7 +1370,7 @@ impl<'a, T, S, A> Extend<&'a T> for HashSet where T: 'a + Eq + Hash + Copy, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { #[cfg_attr(feature = "inline-more", inline)] fn extend>(&mut self, iter: I) { @@ -1393,7 +1393,7 @@ where impl Default for HashSet where S: Default, - A: Default + Allocator + Clone, + A: Default + Allocator, { /// Creates an empty `HashSet` with the `Default` value for the hasher. #[cfg_attr(feature = "inline-more", inline)] @@ -1408,7 +1408,7 @@ impl BitOr<&HashSet> for &HashSet where T: Eq + Hash + Clone, S: BuildHasher + Default, - A: Allocator + Clone, + A: Allocator, { type Output = HashSet; @@ -1441,7 +1441,7 @@ impl BitAnd<&HashSet> for &HashSet where T: Eq + Hash + Clone, S: BuildHasher + Default, - A: Allocator + Clone, + A: Allocator, { type Output = HashSet; @@ -1552,7 +1552,7 @@ pub struct Iter<'a, K> { /// /// [`HashSet`]: struct.HashSet.html /// [`into_iter`]: struct.HashSet.html#method.into_iter -pub struct IntoIter { +pub struct IntoIter { iter: map::IntoIter, } @@ -1563,7 +1563,7 @@ pub struct IntoIter { /// /// [`HashSet`]: struct.HashSet.html /// [`drain`]: struct.HashSet.html#method.drain -pub struct Drain<'a, K, A: Allocator + Clone = Global> { +pub struct Drain<'a, K, A: Allocator = Global> { iter: map::Drain<'a, K, (), A>, } @@ -1575,7 +1575,7 @@ pub struct Drain<'a, K, A: Allocator + Clone = Global> { /// [`extract_if`]: struct.HashSet.html#method.extract_if /// [`HashSet`]: struct.HashSet.html #[must_use = "Iterators are lazy unless consumed"] -pub struct ExtractIf<'a, K, F, A: Allocator + Clone = Global> +pub struct ExtractIf<'a, K, F, A: Allocator = Global> where F: FnMut(&K) -> bool, { @@ -1590,7 +1590,7 @@ where /// /// [`HashSet`]: struct.HashSet.html /// [`intersection`]: struct.HashSet.html#method.intersection -pub struct Intersection<'a, T, S, A: Allocator + Clone = Global> { +pub struct Intersection<'a, T, S, A: Allocator = Global> { // iterator of the first set iter: Iter<'a, T>, // the second set @@ -1604,7 +1604,7 @@ pub struct Intersection<'a, T, S, A: Allocator + Clone = Global> { /// /// [`HashSet`]: struct.HashSet.html /// [`difference`]: struct.HashSet.html#method.difference -pub struct Difference<'a, T, S, A: Allocator + Clone = Global> { +pub struct Difference<'a, T, S, A: Allocator = Global> { // iterator of the first set iter: Iter<'a, T>, // the second set @@ -1618,7 +1618,7 @@ pub struct Difference<'a, T, S, A: Allocator + Clone = Global> { /// /// [`HashSet`]: struct.HashSet.html /// [`symmetric_difference`]: struct.HashSet.html#method.symmetric_difference -pub struct SymmetricDifference<'a, T, S, A: Allocator + Clone = Global> { +pub struct SymmetricDifference<'a, T, S, A: Allocator = Global> { iter: Chain, Difference<'a, T, S, A>>, } @@ -1629,11 +1629,11 @@ pub struct SymmetricDifference<'a, T, S, A: Allocator + Clone = Global> { /// /// [`HashSet`]: struct.HashSet.html /// [`union`]: struct.HashSet.html#method.union -pub struct Union<'a, T, S, A: Allocator + Clone = Global> { +pub struct Union<'a, T, S, A: Allocator = Global> { iter: Chain, Difference<'a, T, S, A>>, } -impl<'a, T, S, A: Allocator + Clone> IntoIterator for &'a HashSet { +impl<'a, T, S, A: Allocator> IntoIterator for &'a HashSet { type Item = &'a T; type IntoIter = Iter<'a, T>; @@ -1643,7 +1643,7 @@ impl<'a, T, S, A: Allocator + Clone> IntoIterator for &'a HashSet { } } -impl IntoIterator for HashSet { +impl IntoIterator for HashSet { type Item = T; type IntoIter = IntoIter; @@ -1709,7 +1709,7 @@ impl fmt::Debug for Iter<'_, K> { } } -impl Iterator for IntoIter { +impl Iterator for IntoIter { type Item = K; #[cfg_attr(feature = "inline-more", inline)] @@ -1725,22 +1725,22 @@ impl Iterator for IntoIter { self.iter.size_hint() } } -impl ExactSizeIterator for IntoIter { +impl ExactSizeIterator for IntoIter { #[cfg_attr(feature = "inline-more", inline)] fn len(&self) -> usize { self.iter.len() } } -impl FusedIterator for IntoIter {} +impl FusedIterator for IntoIter {} -impl fmt::Debug for IntoIter { +impl fmt::Debug for IntoIter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let entries_iter = self.iter.iter().map(|(k, _)| k); f.debug_list().entries(entries_iter).finish() } } -impl Iterator for Drain<'_, K, A> { +impl Iterator for Drain<'_, K, A> { type Item = K; #[cfg_attr(feature = "inline-more", inline)] @@ -1756,22 +1756,22 @@ impl Iterator for Drain<'_, K, A> { self.iter.size_hint() } } -impl ExactSizeIterator for Drain<'_, K, A> { +impl ExactSizeIterator for Drain<'_, K, A> { #[cfg_attr(feature = "inline-more", inline)] fn len(&self) -> usize { self.iter.len() } } -impl FusedIterator for Drain<'_, K, A> {} +impl FusedIterator for Drain<'_, K, A> {} -impl fmt::Debug for Drain<'_, K, A> { +impl fmt::Debug for Drain<'_, K, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let entries_iter = self.iter.iter().map(|(k, _)| k); f.debug_list().entries(entries_iter).finish() } } -impl Iterator for ExtractIf<'_, K, F, A> +impl Iterator for ExtractIf<'_, K, F, A> where F: FnMut(&K) -> bool, { @@ -1790,9 +1790,9 @@ where } } -impl FusedIterator for ExtractIf<'_, K, F, A> where F: FnMut(&K) -> bool {} +impl FusedIterator for ExtractIf<'_, K, F, A> where F: FnMut(&K) -> bool {} -impl Clone for Intersection<'_, T, S, A> { +impl Clone for Intersection<'_, T, S, A> { #[cfg_attr(feature = "inline-more", inline)] fn clone(&self) -> Self { Intersection { @@ -1806,7 +1806,7 @@ impl<'a, T, S, A> Iterator for Intersection<'a, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { type Item = &'a T; @@ -1831,7 +1831,7 @@ impl fmt::Debug for Intersection<'_, T, S, A> where T: fmt::Debug + Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() @@ -1842,11 +1842,11 @@ impl FusedIterator for Intersection<'_, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { } -impl Clone for Difference<'_, T, S, A> { +impl Clone for Difference<'_, T, S, A> { #[cfg_attr(feature = "inline-more", inline)] fn clone(&self) -> Self { Difference { @@ -1860,7 +1860,7 @@ impl<'a, T, S, A> Iterator for Difference<'a, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { type Item = &'a T; @@ -1885,7 +1885,7 @@ impl FusedIterator for Difference<'_, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { } @@ -1893,14 +1893,14 @@ impl fmt::Debug for Difference<'_, T, S, A> where T: fmt::Debug + Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } -impl Clone for SymmetricDifference<'_, T, S, A> { +impl Clone for SymmetricDifference<'_, T, S, A> { #[cfg_attr(feature = "inline-more", inline)] fn clone(&self) -> Self { SymmetricDifference { @@ -1913,7 +1913,7 @@ impl<'a, T, S, A> Iterator for SymmetricDifference<'a, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { type Item = &'a T; @@ -1931,7 +1931,7 @@ impl FusedIterator for SymmetricDifference<'_, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { } @@ -1939,14 +1939,14 @@ impl fmt::Debug for SymmetricDifference<'_, T, S, A> where T: fmt::Debug + Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } -impl Clone for Union<'_, T, S, A> { +impl Clone for Union<'_, T, S, A> { #[cfg_attr(feature = "inline-more", inline)] fn clone(&self) -> Self { Union { @@ -1959,7 +1959,7 @@ impl FusedIterator for Union<'_, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { } @@ -1967,7 +1967,7 @@ impl fmt::Debug for Union<'_, T, S, A> where T: fmt::Debug + Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() @@ -1978,7 +1978,7 @@ impl<'a, T, S, A> Iterator for Union<'a, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { type Item = &'a T; @@ -2030,7 +2030,7 @@ where /// ``` pub enum Entry<'a, T, S, A = Global> where - A: Allocator + Clone, + A: Allocator, { /// An occupied entry. /// @@ -2063,7 +2063,7 @@ where Vacant(VacantEntry<'a, T, S, A>), } -impl fmt::Debug for Entry<'_, T, S, A> { +impl fmt::Debug for Entry<'_, T, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Entry::Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), @@ -2108,11 +2108,11 @@ impl fmt::Debug for Entry<'_, T, S, A> { /// assert_eq!(set.get(&"c"), None); /// assert_eq!(set.len(), 2); /// ``` -pub struct OccupiedEntry<'a, T, S, A: Allocator + Clone = Global> { +pub struct OccupiedEntry<'a, T, S, A: Allocator = Global> { inner: map::OccupiedEntry<'a, T, (), S, A>, } -impl fmt::Debug for OccupiedEntry<'_, T, S, A> { +impl fmt::Debug for OccupiedEntry<'_, T, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OccupiedEntry") .field("value", self.get()) @@ -2146,17 +2146,17 @@ impl fmt::Debug for OccupiedEntry<'_, T, /// } /// assert!(set.contains("b") && set.len() == 2); /// ``` -pub struct VacantEntry<'a, T, S, A: Allocator + Clone = Global> { +pub struct VacantEntry<'a, T, S, A: Allocator = Global> { inner: map::VacantEntry<'a, T, (), S, A>, } -impl fmt::Debug for VacantEntry<'_, T, S, A> { +impl fmt::Debug for VacantEntry<'_, T, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("VacantEntry").field(self.get()).finish() } } -impl<'a, T, S, A: Allocator + Clone> Entry<'a, T, S, A> { +impl<'a, T, S, A: Allocator> Entry<'a, T, S, A> { /// Sets the value of the entry, and returns an OccupiedEntry. /// /// # Examples @@ -2233,7 +2233,7 @@ impl<'a, T, S, A: Allocator + Clone> Entry<'a, T, S, A> { } } -impl OccupiedEntry<'_, T, S, A> { +impl OccupiedEntry<'_, T, S, A> { /// Gets a reference to the value in the entry. /// /// # Examples @@ -2320,7 +2320,7 @@ impl OccupiedEntry<'_, T, S, A> { } } -impl<'a, T, S, A: Allocator + Clone> VacantEntry<'a, T, S, A> { +impl<'a, T, S, A: Allocator> VacantEntry<'a, T, S, A> { /// Gets a reference to the value that would be used when inserting /// through the `VacantEntry`. /// @@ -2400,34 +2400,30 @@ fn assert_covariance() { fn iter<'a, 'new>(v: Iter<'a, &'static str>) -> Iter<'a, &'new str> { v } - fn into_iter<'new, A: Allocator + Clone>( - v: IntoIter<&'static str, A>, - ) -> IntoIter<&'new str, A> { + fn into_iter<'new, A: Allocator>(v: IntoIter<&'static str, A>) -> IntoIter<&'new str, A> { v } - fn difference<'a, 'new, A: Allocator + Clone>( + fn difference<'a, 'new, A: Allocator>( v: Difference<'a, &'static str, DefaultHashBuilder, A>, ) -> Difference<'a, &'new str, DefaultHashBuilder, A> { v } - fn symmetric_difference<'a, 'new, A: Allocator + Clone>( + fn symmetric_difference<'a, 'new, A: Allocator>( v: SymmetricDifference<'a, &'static str, DefaultHashBuilder, A>, ) -> SymmetricDifference<'a, &'new str, DefaultHashBuilder, A> { v } - fn intersection<'a, 'new, A: Allocator + Clone>( + fn intersection<'a, 'new, A: Allocator>( v: Intersection<'a, &'static str, DefaultHashBuilder, A>, ) -> Intersection<'a, &'new str, DefaultHashBuilder, A> { v } - fn union<'a, 'new, A: Allocator + Clone>( + fn union<'a, 'new, A: Allocator>( v: Union<'a, &'static str, DefaultHashBuilder, A>, ) -> Union<'a, &'new str, DefaultHashBuilder, A> { v } - fn drain<'new, A: Allocator + Clone>( - d: Drain<'static, &'static str, A>, - ) -> Drain<'new, &'new str, A> { + fn drain<'new, A: Allocator>(d: Drain<'static, &'static str, A>) -> Drain<'new, &'new str, A> { d } }