diff --git a/agate_bench/src/main.rs b/agate_bench/src/main.rs index cf83f042..17e3b4d9 100644 --- a/agate_bench/src/main.rs +++ b/agate_bench/src/main.rs @@ -55,6 +55,7 @@ fn main() { (@arg key_nums: --key_nums +takes_value default_value("1024") "key numbers") (@arg batch_size: --batch_size +takes_value default_value("1000") "pairs in one txn") (@arg value_size: --value_size +takes_value default_value("1024") "value size") + (@arg seq: --seq +takes_value default_value("true") "write sequentially") ) (@subcommand randread => (about: "randomly read from database") @@ -79,6 +80,7 @@ fn main() { (@arg key_nums: --key_nums +takes_value default_value("1024") "key numbers") (@arg batch_size: --batch_size +takes_value default_value("1000") "pairs in one txn") (@arg value_size: --value_size +takes_value default_value("1024") "value size") + (@arg seq: --seq +takes_value default_value("true") "write sequentially") ) (@subcommand rocks_randread => (about: "randomly read from database") @@ -119,13 +121,14 @@ fn main() { let key_nums: u64 = sub_matches.value_of("key_nums").unwrap().parse().unwrap(); let batch_size: u64 = sub_matches.value_of("batch_size").unwrap().parse().unwrap(); let value_size: usize = sub_matches.value_of("value_size").unwrap().parse().unwrap(); + let seq: bool = sub_matches.value_of("seq").unwrap().parse().unwrap(); let agate = Arc::new(agate_opts.open().unwrap()); let chunk_size = key_nums / threads; let begin = Instant::now(); - agate_populate(agate, key_nums, chunk_size, batch_size, value_size); + agate_populate(agate, key_nums, chunk_size, batch_size, value_size, seq); let cost = begin.elapsed(); @@ -199,13 +202,14 @@ fn main() { let key_nums: u64 = sub_matches.value_of("key_nums").unwrap().parse().unwrap(); let batch_size: u64 = sub_matches.value_of("batch_size").unwrap().parse().unwrap(); let value_size: usize = sub_matches.value_of("value_size").unwrap().parse().unwrap(); + let seq: bool = sub_matches.value_of("seq").unwrap().parse().unwrap(); let db = Arc::new(rocksdb::DB::open(&rocks_opts, &directory).unwrap()); let chunk_size = key_nums / threads; let begin = Instant::now(); - rocks_populate(db, key_nums, chunk_size, batch_size, value_size); + rocks_populate(db, key_nums, chunk_size, batch_size, value_size, seq); let cost = begin.elapsed(); diff --git a/benches/benches_agate_rocks.rs b/benches/benches_agate_rocks.rs index b5ee3089..4abd3424 100644 --- a/benches/benches_agate_rocks.rs +++ b/benches/benches_agate_rocks.rs @@ -35,7 +35,7 @@ fn bench_agate(c: &mut Criterion) { ..Default::default() }; - c.bench_function("agate populate small value", |b| { + c.bench_function("agate sequentially populate small value", |b| { b.iter_custom(|iters| { let mut total = Duration::new(0, 0); @@ -44,7 +44,38 @@ fn bench_agate(c: &mut Criterion) { let agate = Arc::new(opts.open().unwrap()); let now = Instant::now(); - agate_populate(agate, KEY_NUMS, CHUNK_SIZE, BATCH_SIZE, SMALL_VALUE_SIZE); + agate_populate( + agate, + KEY_NUMS, + CHUNK_SIZE, + BATCH_SIZE, + SMALL_VALUE_SIZE, + true, + ); + total = total.add(now.elapsed()); + }); + + total + }); + }); + + c.bench_function("agate randomly populate small value", |b| { + b.iter_custom(|iters| { + let mut total = Duration::new(0, 0); + + (0..iters).into_iter().for_each(|_| { + remove_files(dir_path); + let agate = Arc::new(opts.open().unwrap()); + + let now = Instant::now(); + agate_populate( + agate, + KEY_NUMS, + CHUNK_SIZE, + BATCH_SIZE, + SMALL_VALUE_SIZE, + false, + ); total = total.add(now.elapsed()); }); @@ -72,7 +103,31 @@ fn bench_agate(c: &mut Criterion) { opts.dir = dir_path.to_path_buf(); opts.value_dir = dir_path.to_path_buf(); - c.bench_function("agate populate large value", |b| { + c.bench_function("agate sequentially populate large value", |b| { + b.iter_custom(|iters| { + let mut total = Duration::new(0, 0); + + (0..iters).into_iter().for_each(|_| { + remove_files(dir_path); + let agate = Arc::new(opts.open().unwrap()); + + let now = Instant::now(); + agate_populate( + agate, + KEY_NUMS, + CHUNK_SIZE, + BATCH_SIZE, + LARGE_VALUE_SIZE, + true, + ); + total = total.add(now.elapsed()); + }); + + total + }); + }); + + c.bench_function("agate randomly populate large value", |b| { b.iter_custom(|iters| { let mut total = Duration::new(0, 0); @@ -81,7 +136,14 @@ fn bench_agate(c: &mut Criterion) { let agate = Arc::new(opts.open().unwrap()); let now = Instant::now(); - agate_populate(agate, KEY_NUMS, CHUNK_SIZE, BATCH_SIZE, LARGE_VALUE_SIZE); + agate_populate( + agate, + KEY_NUMS, + CHUNK_SIZE, + BATCH_SIZE, + LARGE_VALUE_SIZE, + false, + ); total = total.add(now.elapsed()); }); @@ -113,7 +175,7 @@ fn bench_rocks(c: &mut Criterion) { opts.create_if_missing(true); opts.set_compression_type(rocksdb::DBCompressionType::None); - c.bench_function("rocks populate small value", |b| { + c.bench_function("rocks sequentially populate small value", |b| { b.iter_custom(|iters| { let mut total = Duration::new(0, 0); @@ -122,7 +184,31 @@ fn bench_rocks(c: &mut Criterion) { let db = Arc::new(rocksdb::DB::open(&opts, &dir).unwrap()); let now = Instant::now(); - rocks_populate(db, KEY_NUMS, CHUNK_SIZE, BATCH_SIZE, SMALL_VALUE_SIZE); + rocks_populate(db, KEY_NUMS, CHUNK_SIZE, BATCH_SIZE, SMALL_VALUE_SIZE, true); + total = total.add(now.elapsed()); + }); + + total + }); + }); + + c.bench_function("rocks randomly populate small value", |b| { + b.iter_custom(|iters| { + let mut total = Duration::new(0, 0); + + (0..iters).into_iter().for_each(|_| { + remove_files(dir_path); + let db = Arc::new(rocksdb::DB::open(&opts, &dir).unwrap()); + + let now = Instant::now(); + rocks_populate( + db, + KEY_NUMS, + CHUNK_SIZE, + BATCH_SIZE, + SMALL_VALUE_SIZE, + false, + ); total = total.add(now.elapsed()); }); @@ -146,7 +232,24 @@ fn bench_rocks(c: &mut Criterion) { let dir = TempDir::new("rocks-bench-large-value").unwrap(); let dir_path = dir.path(); - c.bench_function("rocks populate large value", |b| { + c.bench_function("rocks sequentially populate large value", |b| { + b.iter_custom(|iters| { + let mut total = Duration::new(0, 0); + + (0..iters).into_iter().for_each(|_| { + remove_files(dir_path); + let db = Arc::new(rocksdb::DB::open(&opts, &dir).unwrap()); + + let now = Instant::now(); + rocks_populate(db, KEY_NUMS, CHUNK_SIZE, BATCH_SIZE, LARGE_VALUE_SIZE, true); + total = total.add(now.elapsed()); + }); + + total + }); + }); + + c.bench_function("rocks randomly populate large value", |b| { b.iter_custom(|iters| { let mut total = Duration::new(0, 0); @@ -155,7 +258,14 @@ fn bench_rocks(c: &mut Criterion) { let db = Arc::new(rocksdb::DB::open(&opts, &dir).unwrap()); let now = Instant::now(); - rocks_populate(db, KEY_NUMS, CHUNK_SIZE, BATCH_SIZE, LARGE_VALUE_SIZE); + rocks_populate( + db, + KEY_NUMS, + CHUNK_SIZE, + BATCH_SIZE, + LARGE_VALUE_SIZE, + false, + ); total = total.add(now.elapsed()); }); diff --git a/benches/common.rs b/benches/common.rs index cf60ac45..44e83e22 100644 --- a/benches/common.rs +++ b/benches/common.rs @@ -104,6 +104,7 @@ pub fn agate_populate( chunk_size: u64, batch_size: u64, value_size: usize, + seq: bool, ) { let mut handles = vec![]; @@ -111,13 +112,18 @@ pub fn agate_populate( let agate = agate.clone(); handles.push(std::thread::spawn(move || { + let mut rng = rand::thread_rng(); let range = chunk_start..chunk_start + chunk_size; for batch_start in range.step_by(batch_size as usize) { let mut txn = agate.new_transaction_at(unix_time(), true); (batch_start..batch_start + batch_size).for_each(|key| { - let (key, value) = gen_kv_pair(key, value_size); + let (key, value) = if seq { + gen_kv_pair(key, value_size) + } else { + gen_kv_pair(rng.gen_range(0, key_nums), value_size) + }; txn.set(key, value).unwrap(); }); @@ -205,6 +211,7 @@ pub fn rocks_populate( chunk_size: u64, batch_size: u64, value_size: usize, + seq: bool, ) { let mut write_options = rocksdb::WriteOptions::default(); write_options.set_sync(true); @@ -221,10 +228,15 @@ pub fn rocks_populate( let range = chunk_start..chunk_start + chunk_size; for batch_start in range.step_by(batch_size as usize) { + let mut rng = rand::thread_rng(); let mut batch = rocksdb::WriteBatch::default(); (batch_start..batch_start + batch_size).for_each(|key| { - let (key, value) = gen_kv_pair(key, value_size); + let (key, value) = if seq { + gen_kv_pair(key, value_size) + } else { + gen_kv_pair(rng.gen_range(0, key_nums), value_size) + }; batch.put(key, value); });