Skip to content

Commit

Permalink
introduce re_tracing (#2283)
Browse files Browse the repository at this point in the history
Finally biting the bullet, this has been eating me alive for way too
long 😈

The first commit is what matters, the second is all side effects.

(~1.4 K/D ratio btw 😎)
  • Loading branch information
teh-cmc authored May 31, 2023
1 parent 880aff0 commit ff3723b
Show file tree
Hide file tree
Showing 129 changed files with 427 additions and 597 deletions.
30 changes: 19 additions & 11 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ re_smart_channel = { path = "crates/re_smart_channel", version = "0.7.0-alpha.0"
re_string_interner = { path = "crates/re_string_interner", version = "0.7.0-alpha.0", default-features = false }
re_tensor_ops = { path = "crates/re_tensor_ops", version = "0.7.0-alpha.0", default-features = false }
re_time_panel = { path = "crates/re_time_panel", version = "=0.7.0-alpha.0", default-features = false }
re_tracing = { path = "crates/re_tracing", version = "0.7.0-alpha.0", default-features = false }
re_tuid = { path = "crates/re_tuid", version = "0.7.0-alpha.0", default-features = false }
re_ui = { path = "crates/re_ui", version = "0.7.0-alpha.0", default-features = false }
re_viewer = { path = "crates/re_viewer", version = "0.7.0-alpha.0", default-features = false }
Expand Down
5 changes: 1 addition & 4 deletions crates/re_arrow_store/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ core_benchmarks_only = []
re_format.workspace = true
re_log_types.workspace = true
re_log.workspace = true
re_tracing.workspace = true

# External dependencies:
ahash.workspace = true
Expand All @@ -48,10 +49,6 @@ parking_lot.workspace = true
smallvec.workspace = true
thiserror.workspace = true

# Native dependencies:
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
puffin.workspace = true

# Optional dependencies:
polars-core = { workspace = true, optional = true, features = [
"diagonal_concat",
Expand Down
22 changes: 0 additions & 22 deletions crates/re_arrow_store/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,25 +56,3 @@ pub use re_log_types::{TimeInt, TimeRange, TimeType, Timeline}; // for politenes
pub mod external {
pub use arrow2;
}

// ---

/// Native-only profiling macro for puffin.
#[doc(hidden)]
#[macro_export]
macro_rules! profile_function {
($($arg: tt)*) => {
#[cfg(not(target_arch = "wasm32"))]
puffin::profile_function!($($arg)*);
};
}

/// Native-only profiling macro for puffin.
#[doc(hidden)]
#[macro_export]
macro_rules! profile_scope {
($($arg: tt)*) => {
#[cfg(not(target_arch = "wasm32"))]
puffin::profile_scope!($($arg)*);
};
}
2 changes: 1 addition & 1 deletion crates/re_arrow_store/src/store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@ impl DataStore {
///
/// Useful to call after a gc.
pub fn oldest_time_per_timeline(&self) -> BTreeMap<Timeline, TimeInt> {
crate::profile_function!();
re_tracing::profile_function!();

let mut oldest_time_per_timeline = BTreeMap::default();

Expand Down
10 changes: 5 additions & 5 deletions crates/re_arrow_store/src/store_arrow.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ impl IndexedBucket {
/// - `$cluster_key`
/// - rest of component columns in ascending lexical order
pub fn serialize(&self) -> DataTableResult<(Schema, Chunk<Box<dyn Array>>)> {
crate::profile_function!();
re_tracing::profile_function!();

let Self {
timeline,
Expand Down Expand Up @@ -63,7 +63,7 @@ impl PersistentIndexedTable {
/// - `$cluster_key`
/// - rest of component columns in ascending lexical order
pub fn serialize(&self) -> DataTableResult<(Schema, Chunk<Box<dyn Array>>)> {
crate::profile_function!();
re_tracing::profile_function!();

let Self {
ent_path: _,
Expand Down Expand Up @@ -95,7 +95,7 @@ fn serialize(
col_num_instances: &[u32],
table: &IntMap<ComponentName, DataCellColumn>,
) -> DataTableResult<(Schema, Chunk<Box<dyn Array>>)> {
crate::profile_function!();
re_tracing::profile_function!();

let mut schema = Schema::default();
let mut columns = Vec::new();
Expand Down Expand Up @@ -129,7 +129,7 @@ fn serialize_control_columns(
col_row_id: &[RowId],
col_num_instances: &[u32],
) -> DataTableResult<(Schema, Vec<Box<dyn Array>>)> {
crate::profile_function!();
re_tracing::profile_function!();

let mut schema = Schema::default();
let mut columns = Vec::new();
Expand Down Expand Up @@ -175,7 +175,7 @@ fn serialize_data_columns(
cluster_key: &ComponentName,
table: &IntMap<ComponentName, DataCellColumn>,
) -> DataTableResult<(Schema, Vec<Box<dyn Array>>)> {
crate::profile_function!();
re_tracing::profile_function!();

let mut schema = Schema::default();
let mut columns = Vec::new();
Expand Down
10 changes: 5 additions & 5 deletions crates/re_arrow_store/src/store_dump.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ impl DataStore {

fn dump_timeless_tables(&self) -> impl Iterator<Item = DataTable> + '_ {
self.timeless_tables.values().map(|table| {
crate::profile_scope!("timeless_table");
re_tracing::profile_scope!("timeless_table");

let PersistentIndexedTable {
ent_path,
Expand All @@ -58,10 +58,10 @@ impl DataStore {

fn dump_temporal_tables(&self) -> impl Iterator<Item = DataTable> + '_ {
self.tables.values().flat_map(|table| {
crate::profile_scope!("temporal_table");
re_tracing::profile_scope!("temporal_table");

table.buckets.values().map(move |bucket| {
crate::profile_scope!("temporal_bucket");
re_tracing::profile_scope!("temporal_bucket");

bucket.sort_indices_if_needed();

Expand Down Expand Up @@ -105,14 +105,14 @@ impl DataStore {
self.tables
.values()
.filter_map(move |table| {
crate::profile_scope!("temporal_table_filtered");
re_tracing::profile_scope!("temporal_table_filtered");

if table.timeline != timeline_filter {
return None;
}

Some(table.buckets.values().filter_map(move |bucket| {
crate::profile_scope!("temporal_bucket_filtered");
re_tracing::profile_scope!("temporal_bucket_filtered");

bucket.sort_indices_if_needed();

Expand Down
8 changes: 4 additions & 4 deletions crates/re_arrow_store/src/store_gc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ impl DataStore {
//
// TODO(#1823): Workload specific optimizations.
pub fn gc(&mut self, target: GarbageCollectionTarget) -> (Vec<RowId>, DataStoreStats) {
crate::profile_function!();
re_tracing::profile_function!();

self.gc_id += 1;

Expand Down Expand Up @@ -126,7 +126,7 @@ impl DataStore {
///
/// Returns the list of `RowId`s that were purged from the store.
fn gc_drop_at_least_num_bytes(&mut self, mut num_bytes_to_drop: f64) -> Vec<RowId> {
crate::profile_function!();
re_tracing::profile_function!();

let mut row_ids = Vec::new();

Expand Down Expand Up @@ -165,7 +165,7 @@ impl IndexedTable {
///
/// Returns how many bytes were actually dropped, or zero if the row wasn't found.
fn try_drop_row(&mut self, row_id: RowId, time: i64) -> u64 {
crate::profile_function!();
re_tracing::profile_function!();

let table_has_more_than_one_bucket = self.buckets.len() > 1;

Expand Down Expand Up @@ -211,7 +211,7 @@ impl IndexedBucketInner {
///
/// Returns how many bytes were actually dropped, or zero if the row wasn't found.
fn try_drop_row(&mut self, row_id: RowId, time: i64) -> u64 {
crate::profile_function!();
re_tracing::profile_function!();

self.sort();

Expand Down
Loading

1 comment on commit ff3723b

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Performance Alert ⚠️

Possible performance regression was detected for benchmark 'Rust Benchmark'.
Benchmark result of this commit is worse than the previous benchmark result exceeding threshold 1.25.

Benchmark suite Current: ff3723b Previous: 880aff0 Ratio
datastore/num_rows=1000/num_instances=1000/packed=false/latest_at_missing/primary/default 289 ns/iter (± 17) 226 ns/iter (± 5) 1.28
datastore/num_rows=1000/num_instances=1000/packed=false/latest_at_missing/secondaries/default 448 ns/iter (± 2) 351 ns/iter (± 2) 1.28
datastore/num_rows=1000/num_instances=1000/gc/default 2584090 ns/iter (± 12199) 1728512 ns/iter (± 2896) 1.49
mono_points_arrow/generate_message_bundles 37472995 ns/iter (± 739336) 28474068 ns/iter (± 804916) 1.32
mono_points_arrow/decode_message_bundles 80694839 ns/iter (± 718628) 60119110 ns/iter (± 407525) 1.34
mono_points_arrow_batched/generate_messages 5410000 ns/iter (± 299573) 3582443 ns/iter (± 20373) 1.51
mono_points_arrow_batched/encode_total 33621759 ns/iter (± 2390778) 23610641 ns/iter (± 112337) 1.42
mono_points_arrow_batched/decode_log_msg 506050 ns/iter (± 2286) 327527 ns/iter (± 677) 1.55
mono_points_arrow_batched/decode_total 9773770 ns/iter (± 331537) 7734531 ns/iter (± 26036) 1.26
batch_points_arrow/decode_log_msg 75044 ns/iter (± 250) 47140 ns/iter (± 117) 1.59
batch_points_arrow/decode_total 81861 ns/iter (± 233) 52488 ns/iter (± 119) 1.56
arrow_mono_points/insert 2717570098 ns/iter (± 13336823) 1767197306 ns/iter (± 7580273) 1.54
arrow_mono_points/query 1347242 ns/iter (± 22906) 951900 ns/iter (± 1816) 1.42
arrow_batch_points/query 16100 ns/iter (± 114) 12723 ns/iter (± 9) 1.27
arrow_batch_vecs/query 446250 ns/iter (± 1202) 321502 ns/iter (± 416) 1.39

This comment was automatically generated by workflow using github-action-benchmark.

Please sign in to comment.