Skip to content

Commit

Permalink
ci(benchmarks): simplify benchmarks (#7939)
Browse files Browse the repository at this point in the history
Pure refactor. Use `bench_function` instead of `bench_with_input` and
just borrow data from outside closure. This shortens the code and (I
think) makes it easier to read.
  • Loading branch information
overlookmotel authored Dec 16, 2024
1 parent 3c73e86 commit 6f8bb1c
Show file tree
Hide file tree
Showing 6 changed files with 74 additions and 85 deletions.
3 changes: 2 additions & 1 deletion tasks/benchmark/benches/isolated_declarations.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,10 @@ fn bench_isolated_declarations(criterion: &mut Criterion) {
);

let id = BenchmarkId::from_parameter(&file.file_name);
let source_text = file.source_text.as_str();
let source_type = SourceType::from_path(&file.file_name).unwrap();

group.bench_with_input(id, &file.source_text, |b, source_text| {
group.bench_function(id, |b| {
b.iter_with_large_drop(|| {
let allocator = Allocator::default();
let ParserReturn { program, .. } =
Expand Down
28 changes: 13 additions & 15 deletions tasks/benchmark/benches/lexer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,22 +23,20 @@ fn bench_lexer(criterion: &mut Criterion) {
.collect::<Vec<_>>();

for file in files {
let id = BenchmarkId::from_parameter(&file.file_name);
let source_text = file.source_text.as_str();
let source_type = SourceType::from_path(&file.file_name).unwrap();
group.bench_with_input(
BenchmarkId::from_parameter(&file.file_name),
&file.source_text,
|b, source_text| {
// Do not include initializing allocator in benchmark.
// User code would likely reuse the same allocator over and over to parse multiple files,
// so we do the same here.
let mut allocator = Allocator::default();
b.iter(|| {
let mut lexer = Lexer::new_for_benchmarks(&allocator, source_text, source_type);
while lexer.next_token().kind != Kind::Eof {}
allocator.reset();
});
},
);
group.bench_function(id, |b| {
// Do not include initializing allocator in benchmark.
// User code would likely reuse the same allocator over and over to parse multiple files,
// so we do the same here.
let mut allocator = Allocator::default();
b.iter(|| {
let mut lexer = Lexer::new_for_benchmarks(&allocator, source_text, source_type);
while lexer.next_token().kind != Kind::Eof {}
allocator.reset();
});
});
}
group.finish();
}
Expand Down
37 changes: 17 additions & 20 deletions tasks/benchmark/benches/linter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,27 +24,24 @@ fn bench_linter(criterion: &mut Criterion) {
}

for file in test_files {
let id = BenchmarkId::from_parameter(&file.file_name);
let source_text = file.source_text.as_str();
let source_type = SourceType::from_path(&file.file_name).unwrap();
group.bench_with_input(
BenchmarkId::from_parameter(&file.file_name),
&file.source_text,
|b, source_text| {
let allocator = Allocator::default();
let ret = Parser::new(&allocator, source_text, source_type).parse();
let path = Path::new("");
let semantic_ret = SemanticBuilder::new()
.with_build_jsdoc(true)
.with_scope_tree_child_ids(true)
.with_cfg(true)
.build(&ret.program);
let semantic = semantic_ret.semantic;
let module_record =
Arc::new(ModuleRecord::new(path, &ret.module_record, &semantic));
let semantic = Rc::new(semantic);
let linter = LinterBuilder::all().with_fix(FixKind::All).build();
b.iter(|| linter.run(path, Rc::clone(&semantic), Arc::clone(&module_record)));
},
);
group.bench_function(id, |b| {
let allocator = Allocator::default();
let ret = Parser::new(&allocator, source_text, source_type).parse();
let path = Path::new("");
let semantic_ret = SemanticBuilder::new()
.with_build_jsdoc(true)
.with_scope_tree_child_ids(true)
.with_cfg(true)
.build(&ret.program);
let semantic = semantic_ret.semantic;
let module_record = Arc::new(ModuleRecord::new(path, &ret.module_record, &semantic));
let semantic = Rc::new(semantic);
let linter = LinterBuilder::all().with_fix(FixKind::All).build();
b.iter(|| linter.run(path, Rc::clone(&semantic), Arc::clone(&module_record)));
});
}
group.finish();
}
Expand Down
36 changes: 17 additions & 19 deletions tasks/benchmark/benches/parser.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,26 +7,24 @@ use oxc_tasks_common::TestFiles;
fn bench_parser(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("parser");
for file in TestFiles::complicated().files() {
let id = BenchmarkId::from_parameter(&file.file_name);
let source_text = file.source_text.as_str();
let source_type = SourceType::from_path(&file.file_name).unwrap();
group.bench_with_input(
BenchmarkId::from_parameter(&file.file_name),
&file.source_text,
|b, source_text| {
// Do not include initializing allocator in benchmark.
// User code would likely reuse the same allocator over and over to parse multiple files,
// so we do the same here.
let mut allocator = Allocator::default();
b.iter(|| {
Parser::new(&allocator, source_text, source_type)
.with_options(ParseOptions {
parse_regular_expression: true,
..ParseOptions::default()
})
.parse();
allocator.reset();
});
},
);
group.bench_function(id, |b| {
// Do not include initializing allocator in benchmark.
// User code would likely reuse the same allocator over and over to parse multiple files,
// so we do the same here.
let mut allocator = Allocator::default();
b.iter(|| {
Parser::new(&allocator, source_text, source_type)
.with_options(ParseOptions {
parse_regular_expression: true,
..ParseOptions::default()
})
.parse();
allocator.reset();
});
});
}
group.finish();
}
Expand Down
23 changes: 10 additions & 13 deletions tasks/benchmark/benches/prettier.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,20 +8,17 @@ use oxc_tasks_common::TestFiles;
fn bench_prettier(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("prettier");
for file in TestFiles::minimal().files() {
let id = BenchmarkId::from_parameter(&file.file_name);
let source_text = file.source_text.as_str();
let source_type = SourceType::from_path(&file.file_name).unwrap();
group.bench_with_input(
BenchmarkId::from_parameter(&file.file_name),
&file.source_text,
|b, source_text| {
b.iter(|| {
let allocator1 = Allocator::default();
let allocator2 = Allocator::default();
let ret = Parser::new(&allocator1, source_text, source_type).parse();
let _ =
Prettier::new(&allocator2, PrettierOptions::default()).build(&ret.program);
});
},
);
group.bench_function(id, |b| {
b.iter(|| {
let allocator1 = Allocator::default();
let allocator2 = Allocator::default();
let ret = Parser::new(&allocator1, source_text, source_type).parse();
let _ = Prettier::new(&allocator2, PrettierOptions::default()).build(&ret.program);
});
});
}
group.finish();
}
Expand Down
32 changes: 15 additions & 17 deletions tasks/benchmark/benches/semantic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,24 +8,22 @@ use oxc_tasks_common::TestFiles;
fn bench_semantic(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("semantic");
for file in TestFiles::complicated().files() {
let id = BenchmarkId::from_parameter(&file.file_name);
let source_text = file.source_text.as_str();
let source_type = SourceType::from_path(&file.file_name).unwrap();
group.bench_with_input(
BenchmarkId::from_parameter(&file.file_name),
&file.source_text,
|b, source_text| {
let allocator = Allocator::default();
let ret = Parser::new(&allocator, source_text, source_type).parse();
b.iter_with_large_drop(|| {
// We drop `Semantic` inside this closure as drop time is part of cost of using this API.
// We return `error`s to be dropped outside of the measured section, as usually
// code would have no errors. One of our benchmarks `cal.com.tsx` has a lot of errors,
// but that's atypical, so don't want to include it in benchmark time.
let ret = SemanticBuilder::new().with_build_jsdoc(true).build(&ret.program);
let ret = black_box(ret);
ret.errors
});
},
);
group.bench_function(id, |b| {
let allocator = Allocator::default();
let ret = Parser::new(&allocator, source_text, source_type).parse();
b.iter_with_large_drop(|| {
// We drop `Semantic` inside this closure as drop time is part of cost of using this API.
// We return `error`s to be dropped outside of the measured section, as usually
// code would have no errors. One of our benchmarks `cal.com.tsx` has a lot of errors,
// but that's atypical, so don't want to include it in benchmark time.
let ret = SemanticBuilder::new().with_build_jsdoc(true).build(&ret.program);
let ret = black_box(ret);
ret.errors
});
});
}
group.finish();
}
Expand Down

0 comments on commit 6f8bb1c

Please sign in to comment.