diff --git a/library/alloc/benches/btree/map.rs b/library/alloc/benches/btree/map.rs index 7c2e5694a62fc..ccb04d09180d3 100644 --- a/library/alloc/benches/btree/map.rs +++ b/library/alloc/benches/btree/map.rs @@ -283,6 +283,8 @@ pub fn iter_1m(b: &mut Bencher) { bench_iter(b, 1_000, 1_000_000); } +// On Windows, this is a factor 11 away from causing stack overflow, +// where node size grows to roughly `node::CAPACITY * FAT * 8` = 300 KB. const FAT: usize = 256; // The returned map has small keys and values. @@ -296,11 +298,6 @@ fn fat_val_map(n: usize) -> BTreeMap { (0..n).map(|i| (i, [i; FAT])).collect::>() } -// The returned map has large keys and values. -fn fat_map(n: usize) -> BTreeMap<[usize; FAT], [usize; FAT]> { - (0..n).map(|i| ([i; FAT], [i; FAT])).collect::>() -} - #[bench] pub fn clone_slim_100(b: &mut Bencher) { let src = slim_map(100); @@ -513,74 +510,3 @@ pub fn clone_fat_val_100_and_remove_half(b: &mut Bencher) { map }) } - -#[bench] -pub fn clone_fat_100(b: &mut Bencher) { - let src = fat_map(100); - b.iter(|| src.clone()) -} - -#[bench] -pub fn clone_fat_100_and_clear(b: &mut Bencher) { - let src = fat_map(100); - b.iter(|| src.clone().clear()) -} - -#[bench] -pub fn clone_fat_100_and_drain_all(b: &mut Bencher) { - let src = fat_map(100); - b.iter(|| src.clone().drain_filter(|_, _| true).count()) -} - -#[bench] -pub fn clone_fat_100_and_drain_half(b: &mut Bencher) { - let src = fat_map(100); - b.iter(|| { - let mut map = src.clone(); - assert_eq!(map.drain_filter(|i, _| i[0] % 2 == 0).count(), 100 / 2); - assert_eq!(map.len(), 100 / 2); - }) -} - -#[bench] -pub fn clone_fat_100_and_into_iter(b: &mut Bencher) { - let src = fat_map(100); - b.iter(|| src.clone().into_iter().count()) -} - -#[bench] -pub fn clone_fat_100_and_pop_all(b: &mut Bencher) { - let src = fat_map(100); - b.iter(|| { - let mut map = src.clone(); - while map.pop_first().is_some() {} - map - }); -} - -#[bench] -pub fn clone_fat_100_and_remove_all(b: &mut Bencher) { - let src = fat_map(100); - b.iter(|| { - let mut map = src.clone(); - while let Some(elt) = map.iter().map(|(&i, _)| i).next() { - let v = map.remove(&elt); - debug_assert!(v.is_some()); - } - map - }); -} - -#[bench] -pub fn clone_fat_100_and_remove_half(b: &mut Bencher) { - let src = fat_map(100); - b.iter(|| { - let mut map = src.clone(); - for i in (0..100).step_by(2) { - let v = map.remove(&[i; FAT]); - debug_assert!(v.is_some()); - } - assert_eq!(map.len(), 100 / 2); - map - }) -} diff --git a/library/alloc/src/collections/btree/map/tests.rs b/library/alloc/src/collections/btree/map/tests.rs index f92aed8ce15bf..dcf88daae31ba 100644 --- a/library/alloc/src/collections/btree/map/tests.rs +++ b/library/alloc/src/collections/btree/map/tests.rs @@ -137,8 +137,9 @@ impl<'a, K: 'a, V: 'a> NodeRef, K, V, marker::LeafOrInternal> } } -// Tests our value of MIN_INSERTS_HEIGHT_2. It may change according to the -// implementation of insertion, but it's best to be aware of when it does. +// Tests our value of MIN_INSERTS_HEIGHT_2. Failure may mean you just need to +// adapt that value to match a change in node::CAPACITY or the choices made +// during insertion, otherwise other test cases may fail or be less useful. #[test] fn test_levels() { let mut map = BTreeMap::new(); diff --git a/library/alloc/src/collections/btree/node/tests.rs b/library/alloc/src/collections/btree/node/tests.rs index 48ce9f2bd89c8..43d24809251dc 100644 --- a/library/alloc/src/collections/btree/node/tests.rs +++ b/library/alloc/src/collections/btree/node/tests.rs @@ -113,7 +113,7 @@ fn test_partial_cmp_eq() { #[cfg(target_arch = "x86_64")] fn test_sizes() { assert_eq!(core::mem::size_of::>(), 16); - assert_eq!(core::mem::size_of::>(), 16 + CAPACITY * 8 * 2); - assert_eq!(core::mem::size_of::>(), 112); - assert_eq!(core::mem::size_of::>(), 112 + CAPACITY * 8 * 2); + assert_eq!(core::mem::size_of::>(), 16 + CAPACITY * 2 * 8); + assert_eq!(core::mem::size_of::>(), 16 + (CAPACITY + 1) * 8); + assert_eq!(core::mem::size_of::>(), 16 + (CAPACITY * 3 + 1) * 8); }