From e26a967318a66cdd08aa0b797a86632e61cef11e Mon Sep 17 00:00:00 2001 From: Clement Rey Date: Thu, 30 Mar 2023 13:08:48 +0200 Subject: [PATCH] wip --- Cargo.lock | 2 + crates/re_arrow_store/Cargo.toml | 2 + crates/re_arrow_store/benches/data_store.rs | 53 +- crates/re_arrow_store/src/lib.rs | 8 +- crates/re_arrow_store/src/polars_util.rs | 36 +- crates/re_arrow_store/src/store.rs | 978 ++++++++---------- crates/re_arrow_store/src/store_arrow.rs | 225 ++++ crates/re_arrow_store/src/store_format.rs | 319 +----- crates/re_arrow_store/src/store_gc.rs | 197 +--- crates/re_arrow_store/src/store_polars.rs | 144 ++- crates/re_arrow_store/src/store_read.rs | 886 +++++++--------- crates/re_arrow_store/src/store_sanity.rs | 386 +++---- crates/re_arrow_store/src/store_stats.rs | 355 +------ crates/re_arrow_store/src/store_write.rs | 957 ++++++----------- crates/re_arrow_store/src/test_util.rs | 73 +- crates/re_arrow_store/tests/correctness.rs | 151 ++- crates/re_arrow_store/tests/data_store.rs | 82 +- crates/re_arrow_store/tests/internals.rs | 12 +- crates/re_data_store/src/entity_properties.rs | 15 +- crates/re_data_store/src/log_db.rs | 66 +- .../re_log_types/src/component_types/mod.rs | 2 +- .../src/component_types/msg_id.rs | 4 + crates/re_log_types/src/data_cell.rs | 187 ++++ crates/re_log_types/src/data_row.rs | 34 +- crates/re_log_types/src/data_table.rs | 409 +++++--- crates/re_log_types/src/lib.rs | 9 +- crates/re_query/src/query.rs | 11 +- crates/re_query/src/range.rs | 28 +- crates/re_viewer/src/ui/memory_panel.rs | 44 +- 29 files changed, 2349 insertions(+), 3326 deletions(-) create mode 100644 crates/re_arrow_store/src/store_arrow.rs diff --git a/Cargo.lock b/Cargo.lock index 00e8f03c12947..d1010e7a110e5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3788,6 +3788,7 @@ dependencies = [ "ahash 0.8.2", "anyhow", "arrow2", + "arrow2_convert", "criterion", "document-features", "indent", @@ -3802,6 +3803,7 @@ dependencies = [ "re_format", "re_log", "re_log_types", + "smallvec", "static_assertions", "thiserror", ] diff --git a/crates/re_arrow_store/Cargo.toml b/crates/re_arrow_store/Cargo.toml index 8487f656b17f3..3d1e798ffe234 100644 --- a/crates/re_arrow_store/Cargo.toml +++ b/crates/re_arrow_store/Cargo.toml @@ -39,11 +39,13 @@ arrow2 = { workspace = true, features = [ "compute_concatenate", "compute_aggregate", ] } +arrow2_convert.workspace = true document-features = "0.2" indent = "0.1" itertools = { workspace = true } nohash-hasher = "0.2" parking_lot.workspace = true +smallvec = { version = "1.0", features = ["const_generics"]} static_assertions = "1.1" thiserror.workspace = true diff --git a/crates/re_arrow_store/benches/data_store.rs b/crates/re_arrow_store/benches/data_store.rs index cb85171147433..34531e8aef9d0 100644 --- a/crates/re_arrow_store/benches/data_store.rs +++ b/crates/re_arrow_store/benches/data_store.rs @@ -1,14 +1,15 @@ #[global_allocator] static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc; -use arrow2::array::{Array, UnionArray}; +use arrow2::array::UnionArray; use criterion::{criterion_group, criterion_main, Criterion}; use re_arrow_store::{DataStore, DataStoreConfig, LatestAtQuery, RangeQuery, TimeInt, TimeRange}; use re_log_types::{ component_types::{InstanceKey, Rect2D}, datagen::{build_frame_nr, build_some_instances, build_some_rects}, - Component as _, ComponentName, DataRow, DataTable, EntityPath, MsgId, TimeType, Timeline, + Component as _, ComponentName, DataCell, DataRow, DataTable, EntityPath, MsgId, TimeType, + Timeline, }; criterion_group!(benches, insert, latest_at, latest_at_missing, range); @@ -52,10 +53,7 @@ fn insert(c: &mut Criterion) { b.iter(|| { insert_table( DataStoreConfig { - index_bucket_nb_rows: num_rows_per_bucket, - component_bucket_nb_rows: num_rows_per_bucket, - index_bucket_size_bytes: u64::MAX, - component_bucket_size_bytes: u64::MAX, + indexed_bucket_num_rows: num_rows_per_bucket, ..Default::default() }, InstanceKey::name(), @@ -80,10 +78,11 @@ fn latest_at(c: &mut Criterion) { group.bench_function("default", |b| { let store = insert_table(Default::default(), InstanceKey::name(), &table); b.iter(|| { - let results = latest_data_at(&store, Rect2D::name(), &[Rect2D::name()]); - let rects = results[0] + let cells = latest_data_at(&store, Rect2D::name(), &[Rect2D::name()]); + let rects = cells[0] .as_ref() .unwrap() + .as_arrow_ref() .as_any() .downcast_ref::() .unwrap(); @@ -96,10 +95,7 @@ fn latest_at(c: &mut Criterion) { for num_rows_per_bucket in num_rows_per_bucket { let store = insert_table( DataStoreConfig { - index_bucket_nb_rows: num_rows_per_bucket, - component_bucket_nb_rows: num_rows_per_bucket, - index_bucket_size_bytes: u64::MAX, - component_bucket_size_bytes: u64::MAX, + indexed_bucket_num_rows: num_rows_per_bucket, ..Default::default() }, InstanceKey::name(), @@ -107,10 +103,11 @@ fn latest_at(c: &mut Criterion) { ); group.bench_function(format!("bucketsz={num_rows_per_bucket}"), |b| { b.iter(|| { - let results = latest_data_at(&store, Rect2D::name(), &[Rect2D::name()]); - let rects = results[0] + let cells = latest_data_at(&store, Rect2D::name(), &[Rect2D::name()]); + let rects = cells[0] .as_ref() .unwrap() + .as_arrow_ref() .as_any() .downcast_ref::() .unwrap(); @@ -161,10 +158,7 @@ fn latest_at_missing(c: &mut Criterion) { for num_rows_per_bucket in num_rows_per_bucket { let store = insert_table( DataStoreConfig { - index_bucket_nb_rows: num_rows_per_bucket, - component_bucket_nb_rows: num_rows_per_bucket, - index_bucket_size_bytes: u64::MAX, - component_bucket_size_bytes: u64::MAX, + indexed_bucket_num_rows: num_rows_per_bucket, ..Default::default() }, InstanceKey::name(), @@ -218,10 +212,7 @@ fn range(c: &mut Criterion) { for num_rows_per_bucket in num_rows_per_bucket { let store = insert_table( DataStoreConfig { - index_bucket_nb_rows: num_rows_per_bucket, - component_bucket_nb_rows: num_rows_per_bucket, - index_bucket_size_bytes: u64::MAX, - component_bucket_size_bytes: u64::MAX, + indexed_bucket_num_rows: num_rows_per_bucket, ..Default::default() }, InstanceKey::name(), @@ -229,14 +220,15 @@ fn range(c: &mut Criterion) { ); group.bench_function(format!("bucketsz={num_rows_per_bucket}"), |b| { b.iter(|| { - let msgs = range_data(&store, [Rect2D::name()]); - for (cur_time, (time, results)) in msgs.enumerate() { + let rows = range_data(&store, [Rect2D::name()]); + for (cur_time, (time, cells)) in rows.enumerate() { let time = time.unwrap(); assert_eq!(cur_time as i64, time.as_i64()); - let rects = results[0] + let rects = cells[0] .as_ref() .unwrap() + .as_arrow_ref() .as_any() .downcast_ref::() .unwrap(); @@ -287,26 +279,25 @@ fn latest_data_at( store: &DataStore, primary: ComponentName, secondaries: &[ComponentName; N], -) -> [Option>; N] { +) -> [Option; N] { let timeline_frame_nr = Timeline::new("frame_nr", TimeType::Sequence); let timeline_query = LatestAtQuery::new(timeline_frame_nr, (NUM_ROWS / 2).into()); let ent_path = EntityPath::from("rects"); - let row_indices = store + store .latest_at(&timeline_query, &ent_path, primary, secondaries) - .unwrap_or_else(|| [(); N].map(|_| None)); - store.get(secondaries, &row_indices) + .unwrap_or_else(|| [(); N].map(|_| None)) } fn range_data( store: &DataStore, components: [ComponentName; N], -) -> impl Iterator, [Option>; N])> + '_ { +) -> impl Iterator, [Option; N])> + '_ { let timeline_frame_nr = Timeline::new("frame_nr", TimeType::Sequence); let query = RangeQuery::new(timeline_frame_nr, TimeRange::new(0.into(), NUM_ROWS.into())); let ent_path = EntityPath::from("rects"); store .range(&query, &ent_path, components) - .map(move |(time, _, row_indices)| (time, store.get(&components, &row_indices))) + .map(move |(time, _, cells)| (time, cells)) } diff --git a/crates/re_arrow_store/src/lib.rs b/crates/re_arrow_store/src/lib.rs index 3f2e5454caaa1..978a8b0b4a8b1 100644 --- a/crates/re_arrow_store/src/lib.rs +++ b/crates/re_arrow_store/src/lib.rs @@ -16,6 +16,7 @@ mod arrow_util; mod store; +mod store_arrow; mod store_format; mod store_gc; mod store_read; @@ -33,17 +34,14 @@ pub mod polars_util; pub mod test_util; pub use self::arrow_util::ArrayExt; -pub use self::store::{ - DataStore, DataStoreConfig, IndexBucket, IndexRowNr, IndexTable, RowIndex, RowIndexKind, -}; +pub use self::store::{DataStore, DataStoreConfig}; pub use self::store_gc::GarbageCollectionTarget; pub use self::store_read::{LatestAtQuery, RangeQuery}; pub use self::store_stats::DataStoreStats; pub use self::store_write::{WriteError, WriteResult}; pub(crate) use self::store::{ - ComponentBucket, ComponentTable, IndexBucketIndices, PersistentComponentTable, - PersistentIndexTable, SecondaryIndex, TimeIndex, + IndexedBucket, IndexedBucketInner, IndexedTable, PersistentIndexedTable, }; // Re-exports diff --git a/crates/re_arrow_store/src/polars_util.rs b/crates/re_arrow_store/src/polars_util.rs index d98a6c59e0815..02642c123d04d 100644 --- a/crates/re_arrow_store/src/polars_util.rs +++ b/crates/re_arrow_store/src/polars_util.rs @@ -1,8 +1,7 @@ -use arrow2::array::Array; use itertools::Itertools; use polars_core::{prelude::*, series::Series}; use polars_ops::prelude::*; -use re_log_types::{ComponentName, EntityPath, TimeInt}; +use re_log_types::{ComponentName, DataCell, EntityPath, TimeInt}; use crate::{ArrayExt, DataStore, LatestAtQuery, RangeQuery}; @@ -38,12 +37,11 @@ pub fn latest_component( let cluster_key = store.cluster_key(); let components = &[cluster_key, primary]; - let row_indices = store + let cells = store .latest_at(query, ent_path, primary, components) - .unwrap_or([None; 2]); - let results = store.get(components, &row_indices); + .unwrap_or([(); 2].map(|_| None)); - dataframe_from_results(components, results) + dataframe_from_cells(&cells) } /// Queries any number of components and their cluster keys from their respective point-of-views, @@ -161,12 +159,11 @@ pub fn range_components<'a, const N: usize>( .chain( store .range(query, ent_path, components) - .map(move |(time, _, row_indices)| { - let results = store.get(&components, &row_indices); + .map(move |(time, _, cells)| { ( time, - row_indices[primary_col].is_some(), // is_primary - dataframe_from_results(&components, results), + cells[primary_col].is_some(), // is_primary + dataframe_from_cells(&cells), ) }), ) @@ -200,16 +197,19 @@ pub fn range_components<'a, const N: usize>( // --- Joins --- -pub fn dataframe_from_results( - components: &[ComponentName; N], - results: [Option>; N], +// TODO: none of this mess should be here + +pub fn dataframe_from_cells( + cells: &[Option; N], ) -> SharedResult { - let series: Result, _> = components + let series: Result, _> = cells .iter() - .zip(results) - .filter_map(|(component, col)| col.map(|col| (component, col))) - .map(|(&component, col)| { - Series::try_from((component.as_str(), col.as_ref().clean_for_polars())) + .flatten() + .map(|cell| { + Series::try_from(( + cell.component_name().as_str(), + cell.as_arrow_ref().clean_for_polars(), + )) }) .collect(); diff --git a/crates/re_arrow_store/src/store.rs b/crates/re_arrow_store/src/store.rs index 4d92abdd80a7d..09094bddcaf31 100644 --- a/crates/re_arrow_store/src/store.rs +++ b/crates/re_arrow_store/src/store.rs @@ -1,156 +1,36 @@ -use std::collections::{BTreeMap, HashMap, VecDeque}; -use std::num::NonZeroU64; +use std::collections::BTreeMap; use std::sync::atomic::AtomicU64; -use arrow2::array::{Array, Int64Array}; +use ahash::HashMap; +use arrow2::array::Int64Array; use arrow2::datatypes::{DataType, TimeUnit}; +use smallvec::SmallVec; use nohash_hasher::{IntMap, IntSet}; use parking_lot::RwLock; use re_log_types::{ - ComponentName, EntityPath, EntityPathHash, MsgId, TimeInt, TimePoint, TimeRange, Timeline, + ComponentName, DataCell, DataCellColumn, EntityPath, EntityPathHash, ErasedTimeVec, + NumInstancesVec, RowId, RowIdVec, TimeInt, TimePoint, TimeRange, Timeline, }; -// --- Indices & offsets --- - -/// A vector of times. Our primary column, always densely filled. -pub type TimeIndex = Vec; - -/// A vector of references into the component tables. None = null. -// TODO(cmc): keeping a separate validity might be a better option, maybe. -pub type SecondaryIndex = Vec>; -static_assertions::assert_eq_size!(u64, Option); - -// TODO(#639): We desperately need to work on the terminology here: -// -// - `TimeIndex` is a vector of `TimeInt`s. -// It's the primary column and it's always dense. -// It's used to search the datastore by time. -// -// - `ComponentIndex` (currently `SecondaryIndex`) is a vector of `ComponentRowNr`s. -// It's the secondary column and is sparse. -// It's used to search the datastore by component once the search by time is complete. -// -// - `ComponentRowNr` (currently `RowIndex`) is a row offset into a component table. -// It only makes sense when associated with a component name. -// It is absolute. -// It's used to fetch actual data from the datastore. -// -// - `IndexRowNr` is a row offset into an index bucket. -// It only makes sense when associated with an entity path and a specific time. -// It is relative per bucket. -// It's used to tiebreak results with an identical time, should you need too. - -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] -#[repr(u64)] -pub enum RowIndexKind { - Temporal = 0, - Timeless = 1, -} - -/// An opaque type that directly refers to a row of data within the datastore, iff it is -/// associated with a component name. -/// -/// See [`DataStore::latest_at`], [`DataStore::range`] & [`DataStore::get`]. -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] -pub struct RowIndex(pub(crate) NonZeroU64); - -impl RowIndex { - const KIND_MASK: u64 = 0x8000_0000_0000_0000; - - /// Panics if `v` is 0. - /// In debug, panics if `v` has its most significant bit set. - pub(crate) fn from_u63(kind: RowIndexKind, v: u64) -> Self { - debug_assert!(v & Self::KIND_MASK == 0); - - let v = v | ((kind as u64) << 63); - Self(v.try_into().unwrap()) - } - - pub(crate) fn as_u64(self) -> u64 { - self.0.get() & !Self::KIND_MASK - } - - pub(crate) fn kind(self) -> RowIndexKind { - match self.0.get() & Self::KIND_MASK > 0 { - false => RowIndexKind::Temporal, - true => RowIndexKind::Timeless, - } - } -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] -pub struct IndexRowNr(pub(crate) u64); +// TODO(cmc): Do a big pass for superfluous DataCell allocations all over the place. // --- Data store --- #[derive(Debug, Clone)] pub struct DataStoreConfig { - /// The maximum size of a component bucket before triggering a split. - /// Does not apply to timeless data. - /// - /// ⚠ When configuring this threshold, do keep in mind that component tables are shared - /// across all timelines and all entities! - /// - /// This effectively controls how fine grained the garbage collection of components is. - /// The lower the size, the more fine-grained the garbage collection is, at the cost of more - /// metadata overhead. - /// - /// Note that this cannot split a single huge row: if a user inserts a single row that's - /// larger than the threshold, then that bucket will become larger than the threshold, and - /// we will split from there on. - /// - /// See [`Self::DEFAULT`] for defaults. - pub component_bucket_size_bytes: u64, - - /// The maximum number of rows in a component bucket before triggering a split. + /// The maximum number of rows in an indexed bucket before triggering a split. /// Does not apply to timeless data. /// - /// ⚠ When configuring this threshold, do keep in mind that component tables are shared - /// across all timelines and all entities! - /// - /// This effectively controls how fine grained the garbage collection of components is. - /// The lower the number, the more fine-grained the garbage collection is, at the cost of more - /// metadata overhead. - /// - /// Note: since component buckets aren't sorted, the number of rows isn't necessarily a great - /// metric to use as a threshold, although we do expose it if only for symmetry. - /// Prefer using [`Self::component_bucket_size_bytes`], or both. - /// - /// See [`Self::DEFAULT`] for defaults. - pub component_bucket_nb_rows: u64, - - /// The maximum size of an index bucket before triggering a split. - /// Does not apply to timeless data. - /// - /// ⚠ When configuring this threshold, do keep in mind that index tables are always scoped + /// ⚠ When configuring this threshold, do keep in mind that indexed tables are always scoped /// to a specific timeline _and_ a specific entity. /// - /// This effectively controls two aspects of the runtime: - /// - how fine grained the garbage collection of indices is, - /// - and how many rows will have to be sorted in the worst case when an index gets out - /// of order. - /// The lower the size, the more fine-grained the garbage collection is and smaller the - /// number of rows to sort gets, at the cost of more metadata overhead. + /// This effectively puts an upper bound on the number of rows that need to be sorted when an + /// indexed bucket gets out of order. + /// This is a tradeoff: less rows means faster sorts at the cost of more metadata overhead. /// /// See [`Self::DEFAULT`] for defaults. - pub index_bucket_size_bytes: u64, - - /// The maximum number of rows in an index bucket before triggering a split. - /// Does not apply to timeless data. - /// - /// ⚠ When configuring this threshold, do keep in mind that index tables are always scoped - /// to a specific timeline _and_ a specific entity. - /// - /// This effectively controls two aspects of the runtime: - /// - how fine grained the garbage collection of indices is, - /// - and how many rows will have to be sorted in the worst case when an index gets out - /// of order. - /// The lower the size, the more fine-grained the garbage collection is and smaller the - /// number of rows to sort gets, at the cost of more metadata overhead. - /// - /// See [`Self::DEFAULT`] for defaults. - pub index_bucket_nb_rows: u64, + pub indexed_bucket_num_rows: u64, /// If enabled, will store the ID of the write request alongside the inserted data. /// @@ -158,15 +38,17 @@ pub struct DataStoreConfig { /// `u64` value stored per row. /// /// Enabled by default in debug builds. - /// - /// See [`DataStore::insert_id_key`]. pub store_insert_ids: bool, - /// Should soon-to-be inactive buckets be compacted before being archived? - pub enable_compaction: bool, + /// If enabled, the store will throw an error if and when it notices that a single component + /// type maps to more than one arrow datatype. + /// + /// Enabled by default in debug builds. + pub enable_typecheck: bool, } impl Default for DataStoreConfig { + #[inline] fn default() -> Self { Self::DEFAULT } @@ -174,21 +56,59 @@ impl Default for DataStoreConfig { impl DataStoreConfig { pub const DEFAULT: Self = Self { - component_bucket_size_bytes: 32 * 1024 * 1024, // 32MiB - component_bucket_nb_rows: u64::MAX, - index_bucket_size_bytes: 32 * 1024, // 32kiB - index_bucket_nb_rows: 1024, + indexed_bucket_num_rows: 1024, store_insert_ids: cfg!(debug_assertions), - // TODO(cmc): Compaction is disabled until we implement batching. - // See https://github.com/rerun-io/rerun/pull/1535 for rationale. - // - // This has no noticeable impact on performance. - enable_compaction: false, + enable_typecheck: cfg!(debug_assertions), }; } // --- +pub type InsertIdVec = SmallVec<[u64; 4]>; + +/// Keeps track of datatype information for all component types that have been written to the store +/// so far. +/// +/// See also [`DataStore::lookup_datatype`]. +#[derive(Default)] +pub struct DataTypeRegistry(IntMap); + +impl std::ops::Deref for DataTypeRegistry { + type Target = IntMap; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl std::ops::DerefMut for DataTypeRegistry { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +/// Keeps track of arbitrary per-row metadata. +#[derive(Default)] +pub struct MetadataRegistry(HashMap); + +impl std::ops::Deref for MetadataRegistry { + type Target = HashMap; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl std::ops::DerefMut for MetadataRegistry { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + /// A complete data store: covers all timelines, all entities, everything. /// /// ## Debugging @@ -213,48 +133,45 @@ pub struct DataStore { /// doing so as efficiently as possible. /// /// See [`Self::insert_row`] for more information. - pub(crate) cluster_key: ComponentName, + pub cluster_key: ComponentName, /// The configuration of the data store (e.g. bucket sizes). - pub(crate) config: DataStoreConfig, + pub config: DataStoreConfig, - /// Maps `MsgId`s to some metadata (just timepoints at the moment). + /// Keeps track of datatype information for all component types that have been written to + /// the store so far. /// - /// `BTreeMap` because of garbage collection. - pub(crate) messages: BTreeMap, + /// See also [`Self::lookup_datatype`]. + pub type_registry: DataTypeRegistry, - /// Used to cache auto-generated cluster components, i.e. `[0]`, `[0, 1]`, `[0, 1, 2]`, etc - /// so that they can be properly deduplicated. - pub(crate) cluster_comp_cache: IntMap, - - /// Dedicated index tables for timeless data. Never garbage collected. + /// Keeps track of arbitrary per-row metadata. /// - /// See also `Self::indices`. - pub(crate) timeless_indices: IntMap, + /// Only used to map `RowId`s to their original [`TimePoint`]s at the moment. + pub metadata_registry: MetadataRegistry, - /// Dedicated component tables for timeless data. Never garbage collected. - /// - /// See also `Self::components`. - pub(crate) timeless_components: IntMap, + /// Used to cache auto-generated cluster cells (`[0]`, `[0, 1]`, `[0, 1, 2]`, ...) + /// so that they can be properly deduplicated on insertion. + pub cluster_cell_cache: IntMap, - /// Maps an entity to its index, for a specific timeline. + /// All temporal [`IndexedTable`]s for all entities on all timelines. /// - /// An index maps specific points in time to rows in component tables. - pub(crate) indices: HashMap<(Timeline, EntityPathHash), IndexTable>, + /// See also [`Self::timeless_tables`]. + pub tables: HashMap<(Timeline, EntityPathHash), IndexedTable>, - /// Maps a component name to its associated table, for all timelines and all entities. + /// All timeless indexed tables for all entities. Never garbage collected. /// - /// A component table holds all the values ever inserted for a given component. - pub(crate) components: IntMap, + /// See also [`Self::tables`]. + pub timeless_tables: IntMap, /// Monotonically increasing ID for insertions. - pub(crate) insert_id: u64, + pub insert_id: u64, /// Monotonically increasing ID for queries. - pub(crate) query_id: AtomicU64, + pub query_id: AtomicU64, /// Monotonically increasing ID for GCs. - pub(crate) gc_id: u64, + #[allow(dead_code)] + pub gc_id: u64, } impl DataStore { @@ -263,22 +180,19 @@ impl DataStore { Self { cluster_key, config, - cluster_comp_cache: Default::default(), - messages: Default::default(), - indices: Default::default(), - components: Default::default(), - timeless_indices: Default::default(), - timeless_components: Default::default(), + cluster_cell_cache: Default::default(), + metadata_registry: Default::default(), + type_registry: Default::default(), + tables: Default::default(), + timeless_tables: Default::default(), insert_id: 0, query_id: AtomicU64::new(0), gc_id: 0, } } - /// The column name used for storing insert requests' IDs alongside the data. - /// - /// The insert IDs are stored as-is directly into the index tables, this is _not_ an - /// indirection into an associated component table! + /// The column name used for storing insert requests' IDs alongside the data when manipulating + /// dataframes. /// /// See [`DataStoreConfig::store_insert_ids`]. pub fn insert_id_key() -> ComponentName { @@ -290,156 +204,69 @@ impl DataStore { self.cluster_key } - /// Lookup the arrow `DataType` of a `Component` - pub fn lookup_data_type(&self, component: &ComponentName) -> Option<&DataType> { - self.components.get(component).map(|c| &c.datatype) + /// Lookup the arrow [`DataType`] of a [`re_log_types::Component`] in the internal + /// [`DataTypeRegistry`]. + pub fn lookup_datatype(&self, component: &ComponentName) -> Option<&DataType> { + self.type_registry.get(component) } -} - -// --- Persistent Indices --- -/// A `PersistentIndexTable` maps specific entries to rows in persistent component tables. -/// -/// See also `DataStore::IndexTable`. -#[derive(Debug)] -pub struct PersistentIndexTable { - /// The entity this table is related to, for debugging purposes. - pub(crate) ent_path: EntityPath, + /// The oldest time for which we have any data. + /// + /// Ignores timeless data. + /// + /// Useful to call after a gc. + pub fn oldest_time_per_timeline(&self) -> BTreeMap { + crate::profile_function!(); - /// Carrying the cluster key around to help with assertions and sanity checks all over the - /// place. - pub(crate) cluster_key: ComponentName, + let mut oldest_time_per_timeline = BTreeMap::default(); + + for index in self.tables.values() { + if let Some(bucket) = index.buckets.values().next() { + let entry = oldest_time_per_timeline + .entry(bucket.timeline) + .or_insert(TimeInt::MAX); + if let Some(time) = bucket.inner.read().col_time.first() { + *entry = TimeInt::min(*entry, (*time).into()); + } + } + } - /// The number of rows in the table: all indices should always be exactly of that length. - pub(crate) num_rows: u64, + oldest_time_per_timeline + } - /// All component indices for this bucket. + /// Returns a read-only iterator over the raw indexed tables. /// - /// One index per component: new components (and as such, new indices) can be added at any - /// time! - /// When that happens, they will be retro-filled with nulls until they are [`Self::num_rows`] - /// long. - pub(crate) indices: IntMap, - - /// Track all of the components that have been written to. - pub(crate) all_components: IntSet, + /// Do _not_ use this to try and assert the internal state of the datastore. + pub fn iter_indices( + &self, + ) -> impl ExactSizeIterator { + self.tables.iter().map(|((timeline, _), table)| { + ((*timeline, table.ent_path.clone() /* shallow */), table) + }) + } } -// --- Indices --- +// --- Temporal --- -/// An `IndexTable` maps specific points in time to rows in component tables. +/// An `IndexedTable` is an ever-growing, arbitrary large [`re_log_types::DataTable`] that is +/// optimized for time-based insertions and queries (which means a lot of bucketing). /// -/// Example of a time-based index table (`MAX_ROWS_PER_BUCKET=2`): -/// ```text -/// IndexTable { -/// timeline: log_time -/// entity: this/that -/// size: 3 buckets for a total of 152 B across 5 total rows -/// buckets: [ -/// IndexBucket { -/// index time bound: >= +0.000s -/// size: 64 B across 2 rows -/// - log_time: from 19:37:35.713798Z to 19:37:35.713798Z (all inclusive) -/// data (sorted=true): -/// +-------------------------------+--------------+---------------+--------------------+ -/// | log_time | rerun.rect2d | rerun.point2d | rerun.instance_key | -/// +-------------------------------+--------------+---------------+--------------------+ -/// | 2022-12-20 19:37:35.713798552 | | 2 | 2 | -/// | 2022-12-20 19:37:35.713798552 | 4 | | 2 | -/// +-------------------------------+--------------+---------------+--------------------+ -/// -/// } -/// IndexBucket { -/// index time bound: >= 19:37:36.713798Z -/// size: 64 B across 2 rows -/// - log_time: from 19:37:36.713798Z to 19:37:36.713798Z (all inclusive) -/// data (sorted=true): -/// +-------------------------------+--------------+--------------------+---------------+ -/// | log_time | rerun.rect2d | rerun.instance_key | rerun.point2d | -/// +-------------------------------+--------------+--------------------+---------------+ -/// | 2022-12-20 19:37:36.713798552 | 1 | 2 | | -/// | 2022-12-20 19:37:36.713798552 | | 4 | | -/// +-------------------------------+--------------+--------------------+---------------+ -/// -/// } -/// IndexBucket { -/// index time bound: >= 19:37:37.713798Z -/// size: 24 B across 1 rows -/// - log_time: from 19:37:37.713798Z to 19:37:37.713798Z (all inclusive) -/// data (sorted=true): -/// +-------------------------------+--------------+--------------------+ -/// | log_time | rerun.rect2d | rerun.instance_key | -/// +-------------------------------+--------------+--------------------+ -/// | 2022-12-20 19:37:37.713798552 | 2 | 3 | -/// +-------------------------------+--------------+--------------------+ -/// -/// } -/// ] -/// } -/// ``` +/// See also [`IndexedBucket`]. /// -/// Example of a sequence-based index table (`MAX_ROWS_PER_BUCKET=2`): -/// ```text -/// IndexTable { -/// timeline: frame_nr -/// entity: this/that -/// size: 3 buckets for a total of 256 B across 8 total rows -/// buckets: [ -/// IndexBucket { -/// index time bound: >= #0 -/// size: 96 B across 3 rows -/// - frame_nr: from #41 to #41 (all inclusive) -/// data (sorted=true): -/// +----------+---------------+--------------+--------------------+ -/// | frame_nr | rerun.point2d | rerun.rect2d | rerun.instance_key | -/// +----------+---------------+--------------+--------------------+ -/// | 41 | | | 1 | -/// | 41 | 1 | | 2 | -/// | 41 | | 3 | 2 | -/// +----------+---------------+--------------+--------------------+ -/// -/// } -/// IndexBucket { -/// index time bound: >= #42 -/// size: 96 B across 3 rows -/// - frame_nr: from #42 to #42 (all inclusive) -/// data (sorted=true): -/// +----------+--------------+--------------------+---------------+ -/// | frame_nr | rerun.rect2d | rerun.instance_key | rerun.point2d | -/// +----------+--------------+--------------------+---------------+ -/// | 42 | 1 | 2 | | -/// | 42 | | 4 | | -/// | 42 | | 2 | 2 | -/// +----------+--------------+--------------------+---------------+ -/// -/// } -/// IndexBucket { -/// index time bound: >= #43 -/// size: 64 B across 2 rows -/// - frame_nr: from #43 to #44 (all inclusive) -/// data (sorted=true): -/// +----------+--------------+---------------+--------------------+ -/// | frame_nr | rerun.rect2d | rerun.point2d | rerun.instance_key | -/// +----------+--------------+---------------+--------------------+ -/// | 43 | 4 | | 2 | -/// | 44 | | 3 | 2 | -/// +----------+--------------+---------------+--------------------+ -/// -/// } -/// ] -/// } -/// ``` -/// -/// See also: [`IndexBucket`]. +/// TODO #[derive(Debug)] -pub struct IndexTable { +pub struct IndexedTable { /// The timeline this table operates in, for debugging purposes. - pub(crate) timeline: Timeline, + pub timeline: Timeline, /// The entity this table is related to, for debugging purposes. - pub(crate) ent_path: EntityPath, + pub ent_path: EntityPath, + + /// Carrying the cluster key around to help with assertions and sanity checks all over the + /// place. + pub cluster_key: ComponentName, - /// The actual buckets, where the indices are stored. + /// The actual buckets, where the data is stored. /// /// The keys of this `BTreeMap` represent the lower bounds of the time-ranges covered by /// their associated buckets, _as seen from an indexing rather than a data standpoint_! @@ -447,90 +274,178 @@ pub struct IndexTable { /// This means that e.g. for the initial bucket, this will always be `-∞`, as from an /// indexing standpoint, all reads and writes with a time `t >= -∞` should go there, even /// though the bucket doesn't actually contains data with a timestamp of `-∞`! - pub(crate) buckets: BTreeMap, - - /// Carrying the cluster key around to help with assertions and sanity checks all over the - /// place. - pub(crate) cluster_key: ComponentName, + pub buckets: BTreeMap, /// Track all of the components that have been written to. /// - /// Note that this set will never be purged and will continue to return - /// components that may have been set in the past even if all instances of - /// that component have since been purged to free up space. - pub(crate) all_components: IntSet, + /// Note that this set will never be purged and will continue to return components that may + /// have been set in the past even if all instances of that component have since been purged + /// to free up space. + pub all_components: IntSet, + + /// The total number of rows in this indexed table, accounting for all buckets. + pub total_rows: u64, + + /// The size of this table in bytes across all of its buckets, accounting for both data and + /// metadata. + /// + /// Accurately computing the size of arrow arrays is surprisingly costly, which is why we + /// cache this. + /// Also: there are many buckets. + pub total_size_bytes: u64, } -impl IndexTable { - pub fn entity_path(&self) -> &EntityPath { - &self.ent_path +impl IndexedTable { + pub fn new(cluster_key: ComponentName, timeline: Timeline, ent_path: EntityPath) -> Self { + Self { + timeline, + ent_path, + buckets: [(i64::MIN.into(), IndexedBucket::new(cluster_key, timeline))].into(), + cluster_key, + all_components: Default::default(), + total_rows: 0, + total_size_bytes: 0, // TODO + } + } + + /// Recomputes the size of the table from scratch. + /// + /// Beware: this is costly! + pub fn compute_total_size_bytes(&mut self) -> u64 { + let Self { + timeline, + ent_path, + cluster_key, + buckets, + all_components, + total_rows, + total_size_bytes, + } = self; + + let buckets_size_bytes = buckets + .values_mut() + .map(|bucket| bucket.compute_total_size_bytes()) + .sum::(); + + let size_bytes = std::mem::size_of_val(timeline) + + std::mem::size_of_val(ent_path) + + std::mem::size_of_val(cluster_key) + + (all_components.len() * std::mem::size_of::()) + + std::mem::size_of_val(total_rows) + + std::mem::size_of_val(total_size_bytes); + + *total_size_bytes = size_bytes as u64 + buckets_size_bytes; + + *total_size_bytes + } + + /// Returns a read-only iterator over the raw buckets. + /// + /// Do _not_ use this to try and test the internal state of the datastore. + #[doc(hidden)] + pub fn iter_buckets(&self) -> impl ExactSizeIterator { + self.buckets.values() } } -/// An `IndexBucket` holds a size-delimited (data size and/or number of rows) chunk of a -/// [`IndexTable`]. -/// -/// - The data size limit is for garbage collection purposes. -/// - The number of rows limit is to bound sorting costs on the read path. -/// -/// See [`IndexTable`] to get an idea of what an `IndexBucket` looks like in practice. +/// An `IndexedBucket` holds a chunk of rows from an [`IndexedTable`] +/// (see [`DataStoreConfig::indexed_bucket_num_rows`]). #[derive(Debug)] -pub struct IndexBucket { +pub struct IndexedBucket { /// The timeline the bucket's parent table operates in, for debugging purposes. - pub(crate) timeline: Timeline, - - pub(crate) indices: RwLock, + pub timeline: Timeline, /// Carrying the cluster key around to help with assertions and sanity checks all over the /// place. - pub(crate) cluster_key: ComponentName, + pub cluster_key: ComponentName, + + // To simplify interior mutability. + pub inner: RwLock, } -/// Just the indices, to simplify interior mutability. +impl IndexedBucket { + fn new(cluster_key: ComponentName, timeline: Timeline) -> Self { + Self { + timeline, + inner: RwLock::new(IndexedBucketInner::default()), + cluster_key, + } + } + + /// Recomputes the size of the bucket from scratch. + /// + /// Beware: this is costly! + pub fn compute_total_size_bytes(&mut self) -> u64 { + self.inner.write().compute_total_size_bytes() + } +} + +/// See [`IndexedBucket`]; this is a helper struct to simplify interior mutability. #[derive(Debug)] -pub struct IndexBucketIndices { - /// Whether the indices (all of them!) are currently sorted. +pub struct IndexedBucketInner { + /// Are the rows in this table chunk sorted? + /// + /// Querying an [`IndexedBucket`] will always trigger a sort if the rows within aren't already + /// sorted. + pub is_sorted: bool, + + /// The time range covered by the primary time column (see [`Self::col_time`]). /// - /// Querying an `IndexBucket` will always trigger a sort if the indices aren't already sorted. - pub(crate) is_sorted: bool, + /// For an empty bucket, this defaults to `[+∞,-∞]`. + pub time_range: TimeRange, - /// The time range covered by the primary time index. + // The primary time column, which is what drives the ordering of every other column. + pub col_time: ErasedTimeVec, + + /// The entire column of insertion IDs, if enabled in [`DataStoreConfig`]. /// - /// This is the actual time range that's covered by the indexed data! - /// For an empty bucket, this defaults to [+∞,-∞]. - pub(crate) time_range: TimeRange, + /// Keeps track of insertion order from the point-of-view of the [`DataStore`]. + pub col_insert_id: InsertIdVec, - // The primary time index, which is guaranteed to be dense, and "drives" all other indices. - // - // All secondary indices are guaranteed to follow the same sort order and be the same length. - pub(crate) times: TimeIndex, + /// The entire column of `RowId`s. + /// + /// Keeps track of the unique identifier for each row that was generated by the clients. + pub col_row_id: RowIdVec, - /// All secondary indices for this bucket (i.e. everything but time). + /// The entire column of `num_instances`. /// - /// One index per component: new components (and as such, new indices) can be added at any - /// time! - /// When that happens, they will be retro-filled with nulls so that they share the same - /// length as the primary index ([`Self::times`]). - pub(crate) indices: IntMap, + /// Keeps track of the expected number of instances in each row. + pub col_num_instances: NumInstancesVec, + + /// All the rows for all the component columns. + /// + /// The cells are optional since not all rows will have data for every single component + /// (i.e. the table is sparse). + pub columns: IntMap, + + /// The size of this bucket in bytes, accounting for both data and metadata. + /// + /// Accurately computing the size of arrow arrays is surprisingly costly, which is why we + /// cache this. + pub total_size_bytes: u64, } -impl Default for IndexBucketIndices { +impl Default for IndexedBucketInner { fn default() -> Self { Self { is_sorted: true, time_range: TimeRange::new(i64::MAX.into(), i64::MIN.into()), - times: Default::default(), - indices: Default::default(), + col_time: Default::default(), + col_insert_id: Default::default(), + col_row_id: Default::default(), + col_num_instances: Default::default(), + columns: Default::default(), + total_size_bytes: std::mem::size_of::() as _, } } } -impl IndexBucket { - /// Returns an (name, [`Int64Array`]) with a logical type matching the timeline. +impl IndexedBucket { + /// Returns a (name, [`Int64Array`]) with a logical type matching the timeline. pub fn times(&self) -> (String, Int64Array) { crate::profile_function!(); - let times = Int64Array::from_vec(self.indices.read().times.clone()); + let times = Int64Array::from_slice(self.inner.read().col_time.as_slice()); let logical_type = match self.timeline.typ() { re_log_types::TimeType::Time => DataType::Timestamp(TimeUnit::Nanosecond, None), re_log_types::TimeType::Sequence => DataType::Int64, @@ -539,215 +454,150 @@ impl IndexBucket { } } -// --- Persistent Components --- - -/// A `PersistentComponentTable` holds all the timeless values ever inserted for a given component. -/// -/// See also `DataStore::ComponentTable`. -#[derive(Debug)] -pub struct PersistentComponentTable { - /// Name of the underlying component, for debugging purposes. - pub(crate) name: ComponentName, - - /// Type of the underlying component. - pub(crate) datatype: DataType, - - /// All the data for this table: many rows of a single column. - /// - /// Each chunk is a list of arrays of structs, i.e. `ListArray`: - /// - the list layer corresponds to the different rows, - /// - the array layer corresponds to the different instances within a single row, - /// - and finally the struct layer holds the components themselves. - /// E.g.: - /// ```text - /// [ - /// [{x: 8.687487, y: 1.9590926}, {x: 2.0559108, y: 0.1494348}, {x: 7.09219, y: 0.9616637}], - /// [{x: 7.158843, y: 0.68897724}, {x: 8.934421, y: 2.8420508}], - /// ] - /// ``` - /// - /// This can contain any number of chunks, depending on how the data was inserted (e.g. single - /// insertions vs. batches). - /// - /// Note that, as of today, we do not actually support batched insertion nor do we support - /// chunks of non-unit length (batches are inserted on a per-row basis internally). - /// As a result, chunks always contain one and only one row's worth of data, at least until - /// the bucket is compacted one or more times. - /// See also #589. - // - // TODO(cmc): compact timeless tables once in a while - pub(crate) chunks: Vec>, - - /// The total number of rows present in this bucket, across all chunks. - pub(crate) total_rows: u64, - - /// The size of this bucket in bytes, across all chunks. - /// - /// Accurately computing the size of arrow arrays is surprisingly costly, which is why we - /// cache this. - pub(crate) total_size_bytes: u64, +impl IndexedBucketInner { + /// Recomputes the size of the bucket from scratch. + /// + /// Beware: this is costly! + pub fn compute_total_size_bytes(&mut self) -> u64 { + let Self { + is_sorted, + time_range, + col_time, + col_insert_id, + col_row_id, + col_num_instances, + columns, + total_size_bytes, + } = self; + + let size_bytes = std::mem::size_of_val(is_sorted) + + std::mem::size_of_val(time_range) + + std::mem::size_of_val(col_time.as_slice()) + + std::mem::size_of_val(col_insert_id.as_slice()) + + std::mem::size_of_val(col_row_id.as_slice()) + + std::mem::size_of_val(col_num_instances.as_slice()) + + std::mem::size_of_val(total_size_bytes); + + *total_size_bytes = size_bytes as u64 + compute_table_size_bytes(columns); + + *total_size_bytes + } } -// --- Components --- - -/// A `ComponentTable` holds all the values ever inserted for a given component (provided they -/// are still alive, i.e. not GC'd). +/// A simple example to look at the internal representation of the a [`DataStore`]. /// -/// Example of a component table holding instances: +/// Run with: /// ```text -/// ComponentTable { -/// name: rerun.instance_key -/// size: 2 buckets for a total of 128 B across 5 total rows -/// buckets: [ -/// ComponentBucket { -/// size: 64 B across 3 rows -/// row range: from 0 to 0 (all inclusive) -/// archived: true -/// time ranges: -/// - frame_nr: from #41 to #41 (all inclusive) -/// +------------------------------------------------------------------+ -/// | rerun.instance_key | -/// +------------------------------------------------------------------+ -/// | [] | -/// | [2382325256275464629, 9801782006807296871, 13644487945655724411] | -/// | [0, 1, 2] | -/// +------------------------------------------------------------------+ -/// } -/// ComponentBucket { -/// size: 64 B across 2 rows -/// row range: from 3 to 4 (all inclusive) -/// archived: false -/// time ranges: -/// - frame_nr: from #42 to #42 (all inclusive) -/// - log_time: from 19:37:36.713798Z to 19:37:37.713798Z (all inclusive) -/// +-------------------------------------------------------------------+ -/// | rerun.instance_key | -/// +-------------------------------------------------------------------+ -/// | [8907162807054976021, 14953141369327162382, 15742885776230395882] | -/// | [165204472818569687, 3210188998985913268, 13675065411448304501] | -/// +-------------------------------------------------------------------+ -/// } -/// ] -/// } +/// cargo test -p re_arrow_store -- --nocapture datastore_docgen_indexed_table /// ``` -/// -/// Example of a component-table holding 2D positions: -/// ```text -/// ComponentTable { -/// name: rerun.point2d -/// size: 2 buckets for a total of 96 B across 4 total rows -/// buckets: [ -/// ComponentBucket { -/// size: 64 B across 3 rows -/// row range: from 0 to 0 (all inclusive) -/// archived: true -/// time ranges: -/// - log_time: from 19:37:35.713798Z to 19:37:35.713798Z (all inclusive) -/// - frame_nr: from #41 to #42 (all inclusive) -/// +-------------------------------------------------------------------+ -/// | rerun.point2d | -/// +-------------------------------------------------------------------+ -/// | [] | -/// | [{x: 2.4033058, y: 8.535466}, {x: 4.051945, y: 7.6194324} | -/// | [{x: 1.4975989, y: 6.17476}, {x: 2.4128711, y: 1.853013} | -/// +-------------------------------------------------------------------+ -/// } -/// ComponentBucket { -/// size: 32 B across 1 rows -/// row range: from 3 to 3 (all inclusive) -/// archived: false -/// time ranges: -/// - frame_nr: from #44 to #44 (all inclusive) -/// +-------------------------------------------------------------------+ -/// | rerun.point2d | -/// +-------------------------------------------------------------------+ -/// | [{x: 0.6296742, y: 6.7517242}, {x: 2.3393118, y: 8.770799} | -/// +-------------------------------------------------------------------+ -/// } -/// ] -/// } -/// ``` -#[derive(Debug)] -pub struct ComponentTable { - /// Name of the underlying component. - pub(crate) name: ComponentName, - - /// Type of the underlying component. - pub(crate) datatype: DataType, - - /// The actual buckets, where the component data is stored. - /// - /// Component buckets are append-only, they can never be written to in an out of order - /// fashion. - /// As such, a double-ended queue covers all our needs: - /// - popping from the front for garbage collection - /// - pushing to the back for insertions - /// - binary search for queries - pub(crate) buckets: VecDeque, +#[test] +fn datastore_docgen_indexed_table() { + use re_log_types::{component_types::InstanceKey, Component as _, DataTable}; + + let table = DataTable::example(false); + + let mut store = DataStore::new( + InstanceKey::name(), + DataStoreConfig { + indexed_bucket_num_rows: 0, + store_insert_ids: true, + enable_typecheck: true, + }, + ); + + eprintln!("{table}"); + store.insert_table(&table).unwrap(); + + store.sanity_check().unwrap(); + eprintln!("{store}"); } -/// A `ComponentBucket` holds a size-delimited (data size) chunk of a [`ComponentTable`]. +// --- Timeless --- + +/// The timeless specialization of an [`IndexedTable`]. +/// +/// TODO #[derive(Debug)] -pub struct ComponentBucket { - /// The component's name, for debugging purposes. - pub(crate) name: ComponentName, - - /// The offset of this bucket in the global table. - pub(crate) row_offset: u64, - - /// Has this bucket been archived yet? - /// - /// For every `ComponentTable`, there can only be one active bucket at a time (i.e. the bucket - /// that is currently accepting write requests), all the others are archived. - /// When the currently active bucket is full, it is archived in turn, and a new bucket is - /// created to take its place. - /// - /// Archiving a bucket is a good opportunity to run some maintenance tasks on it, e.g. - /// compaction (concatenating all chunks down to a single one). - /// Currently, an archived bucket is guaranteed to have these properties: - /// - the bucket is full (it has reached the maximum allowed length and/or size), - /// - the bucket has been compacted, - /// - the bucket is only used for reads. - pub(crate) archived: bool, - - /// The time ranges (plural!) covered by this bucket. - /// Buckets are never sorted over time, so these time ranges can grow arbitrarily large. - /// - /// These are only used for garbage collection. - pub(crate) time_ranges: HashMap, - - /// All the data for this bucket: many rows of a single column. - /// - /// Each chunk is a list of arrays of structs, i.e. `ListArray`: - /// - the list layer corresponds to the different rows, - /// - the array layer corresponds to the different instances within a single row, - /// - and finally the struct layer holds the components themselves. - /// E.g.: - /// ```text - /// [ - /// [{x: 8.687487, y: 1.9590926}, {x: 2.0559108, y: 0.1494348}, {x: 7.09219, y: 0.9616637}], - /// [{x: 7.158843, y: 0.68897724}, {x: 8.934421, y: 2.8420508}], - /// ] - /// ``` - /// - /// During the active lifespan of the bucket, this can contain any number of chunks, - /// depending on how the data was inserted (e.g. single insertions vs. batches). - /// All of these chunks get compacted into one contiguous array when the bucket is archived, - /// i.e. when the bucket is full and a new one is created. - /// - /// Note that, as of today, we do not actually support batched insertion nor do we support - /// chunks of non-unit length (batches are inserted on a per-row basis internally). - /// As a result, chunks always contain one and only one row's worth of data, at least until - /// the bucket is archived and compacted. - /// See also #589. - pub(crate) chunks: Vec>, - - /// The total number of rows present in this bucket, across all chunks. - pub(crate) total_rows: u64, - - /// The size of this bucket in bytes, across all chunks. +pub struct PersistentIndexedTable { + /// The entity this table is related to, for debugging purposes. + pub ent_path: EntityPath, + + /// Carrying the cluster key around to help with assertions and sanity checks all over the + /// place. + pub cluster_key: ComponentName, + + /// The entire column of insertion IDs, if enabled in [`DataStoreConfig`]. + /// + /// Keeps track of insertion order from the point-of-view of the [`DataStore`]. + pub col_insert_id: InsertIdVec, + + /// The entire column of `RowId`s. + /// + /// Keeps track of the unique identifier for each row that was generated by the clients. + pub col_row_id: RowIdVec, + + /// The entire column of `num_instances`. + /// + /// Keeps track of the expected number of instances in each row. + pub col_num_instances: NumInstancesVec, + + /// All the rows for all the component columns. + /// + /// The cells are optional since not all rows will have data for every single component + /// (i.e. the table is sparse). + pub columns: IntMap, + + /// The size of this indexed table in bytes, accounting for both data and metadata. /// /// Accurately computing the size of arrow arrays is surprisingly costly, which is why we /// cache this. - pub(crate) total_size_bytes: u64, + pub total_size_bytes: u64, +} + +impl PersistentIndexedTable { + /// Recomputes the size of the table from scratch. + /// + /// Beware: this is costly! + pub fn compute_total_size_bytes(&mut self) -> u64 { + let Self { + ent_path, + cluster_key, + col_insert_id, + col_row_id, + col_num_instances, + columns, + total_size_bytes, + } = self; + + let size_bytes = std::mem::size_of_val(ent_path) + + std::mem::size_of_val(cluster_key) + + std::mem::size_of_val(col_insert_id.as_slice()) + + std::mem::size_of_val(col_row_id.as_slice()) + + std::mem::size_of_val(col_num_instances.as_slice()) + + std::mem::size_of_val(total_size_bytes); + + *total_size_bytes = size_bytes as u64 + compute_table_size_bytes(columns); + + *total_size_bytes + } + + pub fn is_empty(&self) -> bool { + self.col_num_instances.is_empty() + } +} + +// --- Common --- + +/// Computes the size in bytes of an entire table's worth of data. +/// +/// Beware: this is costly! +fn compute_table_size_bytes(columns: &IntMap) -> u64 { + let keys = (columns.keys().len() * std::mem::size_of::()) as u64; + let cells = columns + .values() + .flat_map(|column| column.iter()) + .flatten() // option + .map(|cell| cell.size_bytes()) + .sum::(); + keys + cells } diff --git a/crates/re_arrow_store/src/store_arrow.rs b/crates/re_arrow_store/src/store_arrow.rs new file mode 100644 index 0000000000000..70a84eaaf32db --- /dev/null +++ b/crates/re_arrow_store/src/store_arrow.rs @@ -0,0 +1,225 @@ +use arrow2::{ + array::Array, + chunk::Chunk, + datatypes::{DataType, Field, Schema}, +}; +use re_log_types::{ + DataTable, DataTableResult, COLUMN_NUM_INSTANCES, COLUMN_ROW_ID, METADATA_KIND, + METADATA_KIND_CONTROL, +}; + +use crate::store::{IndexedBucket, IndexedBucketInner, PersistentIndexedTable}; + +// --- + +// TODO: sort columns + +pub const COLUMN_INSERT_ID: &str = "rerun.insert_id"; + +impl IndexedBucket { + /// Serializes the entire bucket into an arrow payload and schema. + pub fn serialize(&self) -> DataTableResult<(Schema, Chunk>)> { + crate::profile_function!(); + + let mut schema = Schema::default(); + let mut columns = Vec::new(); + + { + let (control_schema, control_columns) = self.serialize_control_columns()?; + schema.fields.extend(control_schema.fields); + schema.metadata.extend(control_schema.metadata); + columns.extend(control_columns.into_iter()); + } + + { + let (data_schema, data_columns) = self.serialize_data_columns()?; + schema.fields.extend(data_schema.fields); + schema.metadata.extend(data_schema.metadata); + columns.extend(data_columns.into_iter()); + } + + Ok((schema, Chunk::new(columns))) + } + + fn serialize_control_columns(&self) -> DataTableResult<(Schema, Vec>)> { + crate::profile_function!(); + + let Self { + timeline: _, + cluster_key: _, + inner, + } = self; + + // TODO + let (time_field, time_column) = { + let (name, data) = self.times(); + + let mut field = Field::new(name, data.data_type().clone(), false).with_metadata( + [(METADATA_KIND.to_owned(), METADATA_KIND_CONTROL.to_owned())].into(), + ); + + // TODO(cmc): why do we have to do this manually on the way out, but it's done + // automatically on our behalf on the way in...? + if let DataType::Extension(name, _, _) = data.data_type() { + field + .metadata + .extend([("ARROW:extension:name".to_owned(), name.clone())]); + } + + (field, data.boxed()) + }; + + let IndexedBucketInner { + is_sorted: _, + time_range: _, + col_time: _, + col_insert_id, + col_row_id, + col_num_instances, + columns: _, + total_size_bytes: _, + } = &*inner.read(); + + let mut schema = Schema::default(); + let mut columns = Vec::new(); + + schema.fields.push(time_field); + columns.push(time_column); + + let (insert_id_field, insert_id_column) = + DataTable::serialize_control_column(COLUMN_INSERT_ID, col_insert_id)?; + schema.fields.push(insert_id_field); + columns.push(insert_id_column); + + let (row_id_field, row_id_column) = + DataTable::serialize_control_column(COLUMN_ROW_ID, col_row_id)?; + schema.fields.push(row_id_field); + columns.push(row_id_column); + + // TODO(#1712): This is unnecessarily slow... + let (num_instances_field, num_instances_column) = + DataTable::serialize_control_column(COLUMN_NUM_INSTANCES, col_num_instances)?; + schema.fields.push(num_instances_field); + columns.push(num_instances_column); + + Ok((schema, columns)) + } + + fn serialize_data_columns(&self) -> DataTableResult<(Schema, Vec>)> { + crate::profile_function!(); + + let Self { + timeline: _, + cluster_key: _, + inner, + } = self; + + let IndexedBucketInner { + is_sorted: _, + time_range: _, + col_time: _, + col_insert_id: _, + col_row_id: _, + col_num_instances: _, + columns: table, + total_size_bytes: _, + } = &*inner.read(); + + let mut schema = Schema::default(); + let mut columns = Vec::new(); + + for (component, column) in table { + let (field, column) = DataTable::serialize_data_column(component.as_str(), column)?; + schema.fields.push(field); + columns.push(column); + } + + Ok((schema, columns)) + } +} + +impl PersistentIndexedTable { + /// Serializes the entire table into an arrow payload and schema. + pub fn serialize(&self) -> DataTableResult<(Schema, Chunk>)> { + crate::profile_function!(); + + let mut schema = Schema::default(); + let mut columns = Vec::new(); + + { + let (control_schema, control_columns) = self.serialize_control_columns()?; + schema.fields.extend(control_schema.fields); + schema.metadata.extend(control_schema.metadata); + columns.extend(control_columns.into_iter()); + } + + { + let (data_schema, data_columns) = self.serialize_data_columns()?; + schema.fields.extend(data_schema.fields); + schema.metadata.extend(data_schema.metadata); + columns.extend(data_columns.into_iter()); + } + + Ok((schema, Chunk::new(columns))) + } + + fn serialize_control_columns(&self) -> DataTableResult<(Schema, Vec>)> { + crate::profile_function!(); + + let Self { + ent_path: _, + cluster_key: _, + col_insert_id, + col_row_id, + col_num_instances, + columns: _, + total_size_bytes: _, + } = self; + + let mut schema = Schema::default(); + let mut columns = Vec::new(); + + let (insert_id_field, insert_id_column) = + DataTable::serialize_control_column(COLUMN_INSERT_ID, col_insert_id)?; + schema.fields.push(insert_id_field); + columns.push(insert_id_column); + + let (row_id_field, row_id_column) = + DataTable::serialize_control_column(COLUMN_ROW_ID, col_row_id)?; + schema.fields.push(row_id_field); + columns.push(row_id_column); + + // TODO(#1712): This is unnecessarily slow... + let (num_instances_field, num_instances_column) = + DataTable::serialize_control_column(COLUMN_NUM_INSTANCES, col_num_instances)?; + schema.fields.push(num_instances_field); + columns.push(num_instances_column); + + Ok((schema, columns)) + } + + fn serialize_data_columns(&self) -> DataTableResult<(Schema, Vec>)> { + crate::profile_function!(); + + let Self { + ent_path: _, + cluster_key: _, + col_insert_id: _, + col_row_id: _, + col_num_instances: _, + columns: table, + total_size_bytes: _, + } = self; + + let mut schema = Schema::default(); + let mut columns = Vec::new(); + + for (component, column) in table { + let (field, column) = DataTable::serialize_data_column(component.as_str(), column)?; + schema.fields.push(field); + columns.push(column); + } + + Ok((schema, columns)) + } +} diff --git a/crates/re_arrow_store/src/store_format.rs b/crates/re_arrow_store/src/store_format.rs index 731def279d290..40bfa3c9e736d 100644 --- a/crates/re_arrow_store/src/store_format.rs +++ b/crates/re_arrow_store/src/store_format.rs @@ -1,27 +1,8 @@ -use arrow2::array::UInt64Array; -use re_format::{arrow, format_bytes, format_number}; +use re_format::{format_bytes, format_number}; -use crate::{ - ComponentBucket, ComponentTable, DataStore, IndexBucket, IndexRowNr, IndexTable, - PersistentComponentTable, PersistentIndexTable, RowIndex, RowIndexKind, -}; +use crate::{DataStore, IndexedBucket, IndexedTable, PersistentIndexedTable}; -// --- Indices & offsets --- - -impl std::fmt::Display for RowIndex { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self.kind() { - RowIndexKind::Temporal => f.write_fmt(format_args!("Temporal({})", self.0)), - RowIndexKind::Timeless => f.write_fmt(format_args!("Timeless({})", self.0)), - } - } -} - -impl std::fmt::Display for IndexRowNr { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_fmt(format_args!("{}", self.0)) - } -} +// TODO: displaying is slow, makes sense to recompute sizes and sort // --- Data store --- @@ -31,12 +12,11 @@ impl std::fmt::Display for DataStore { let Self { cluster_key, config, - cluster_comp_cache: _, - messages: _, - indices, - components, - timeless_indices, - timeless_components, + cluster_cell_cache: _, + metadata_registry: _, + type_registry: _, + tables, + timeless_tables, insert_id: _, query_id: _, gc_id: _, @@ -54,33 +34,15 @@ impl std::fmt::Display for DataStore { f.write_str(&indent::indent_all_by( 4, format!( - "{} timeless index tables, for a total of {} across {} total rows\n", - timeless_indices.len(), + "{} timeless indexed tables, for a total of {} across {} total rows\n", + timeless_tables.len(), format_bytes(self.total_timeless_index_size_bytes() as _), format_number(self.total_timeless_index_rows() as _) ), ))?; - f.write_str(&indent::indent_all_by(4, "timeless_indices: [\n"))?; - for table in timeless_indices.values() { - f.write_str(&indent::indent_all_by(8, "PersistentIndexTable {\n"))?; - f.write_str(&indent::indent_all_by(12, table.to_string() + "\n"))?; - f.write_str(&indent::indent_all_by(8, "}\n"))?; - } - f.write_str(&indent::indent_all_by(4, "]\n"))?; - } - { - f.write_str(&indent::indent_all_by( - 4, - format!( - "{} persistent component tables, for a total of {} across {} total rows\n", - timeless_components.len(), - format_bytes(self.total_timeless_component_size_bytes() as _), - format_number(self.total_timeless_component_rows() as _) - ), - ))?; - f.write_str(&indent::indent_all_by(4, "timeless_components: [\n"))?; - for table in timeless_components.values() { - f.write_str(&indent::indent_all_by(8, "PersistentComponentTable {\n"))?; + f.write_str(&indent::indent_all_by(4, "timeless_tables: [\n"))?; + for table in timeless_tables.values() { + f.write_str(&indent::indent_all_by(8, "PersistentIndexedTable {\n"))?; f.write_str(&indent::indent_all_by(12, table.to_string() + "\n"))?; f.write_str(&indent::indent_all_by(8, "}\n"))?; } @@ -91,33 +53,15 @@ impl std::fmt::Display for DataStore { f.write_str(&indent::indent_all_by( 4, format!( - "{} index tables, for a total of {} across {} total rows\n", - indices.len(), + "{} indexed tables, for a total of {} across {} total rows\n", + tables.len(), format_bytes(self.total_temporal_index_size_bytes() as _), format_number(self.total_temporal_index_rows() as _) ), ))?; - f.write_str(&indent::indent_all_by(4, "indices: [\n"))?; - for table in indices.values() { - f.write_str(&indent::indent_all_by(8, "IndexTable {\n"))?; - f.write_str(&indent::indent_all_by(12, table.to_string() + "\n"))?; - f.write_str(&indent::indent_all_by(8, "}\n"))?; - } - f.write_str(&indent::indent_all_by(4, "]\n"))?; - } - { - f.write_str(&indent::indent_all_by( - 4, - format!( - "{} component tables, for a total of {} across {} total rows\n", - components.len(), - format_bytes(self.total_temporal_component_size_bytes() as _), - format_number(self.total_temporal_component_rows() as _) - ), - ))?; - f.write_str(&indent::indent_all_by(4, "components: [\n"))?; - for table in components.values() { - f.write_str(&indent::indent_all_by(8, "ComponentTable {\n"))?; + f.write_str(&indent::indent_all_by(4, "tables: [\n"))?; + for table in tables.values() { + f.write_str(&indent::indent_all_by(8, "IndexedTable {\n"))?; f.write_str(&indent::indent_all_by(12, table.to_string() + "\n"))?; f.write_str(&indent::indent_all_by(8, "}\n"))?; } @@ -130,56 +74,9 @@ impl std::fmt::Display for DataStore { } } -// --- Persistent Indices --- - -impl std::fmt::Display for PersistentIndexTable { - #[allow(clippy::string_add)] - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let Self { - ent_path, - cluster_key: _, - num_rows: _, - indices: _, - all_components: _, - } = self; - - f.write_fmt(format_args!("entity: {ent_path}\n"))?; - - f.write_fmt(format_args!( - "size: {} across {} rows\n", - format_bytes(self.total_size_bytes() as _), - format_number(self.total_rows() as _), - ))?; - - let (col_names, cols): (Vec<_>, Vec<_>) = { - self.indices - .iter() - .map(|(name, index)| { - ( - name.to_string(), - UInt64Array::from( - index - .iter() - .map(|row_idx| row_idx.map(|row_idx| row_idx.as_u64())) - .collect::>(), - ), - ) - }) - .unzip() - }; - - let values = cols.into_iter().map(|c| c.boxed()); - let table = arrow::format_table(values, col_names); - - f.write_fmt(format_args!("data:\n{table}\n"))?; - - Ok(()) - } -} - -// --- Indices --- +// --- Temporal --- -impl std::fmt::Display for IndexTable { +impl std::fmt::Display for IndexedTable { #[allow(clippy::string_add)] fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let Self { @@ -188,6 +85,8 @@ impl std::fmt::Display for IndexTable { buckets, cluster_key: _, all_components: _, + total_rows: _, + total_size_bytes: _, } = self; f.write_fmt(format_args!("timeline: {}\n", timeline.name()))?; @@ -201,7 +100,7 @@ impl std::fmt::Display for IndexTable { ))?; f.write_str("buckets: [\n")?; for (time, bucket) in buckets.iter() { - f.write_str(&indent::indent_all_by(4, "IndexBucket {\n"))?; + f.write_str(&indent::indent_all_by(4, "IndexedBucket {\n"))?; f.write_str(&indent::indent_all_by( 8, format!("index time bound: >= {}\n", timeline.typ().format(*time),), @@ -215,7 +114,7 @@ impl std::fmt::Display for IndexTable { } } -impl std::fmt::Display for IndexBucket { +impl std::fmt::Display for IndexedBucket { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_fmt(format_args!( "size: {} across {} rows\n", @@ -224,7 +123,7 @@ impl std::fmt::Display for IndexBucket { ))?; let time_range = { - let time_range = &self.indices.read().time_range; + let time_range = &self.inner.read().time_range; if time_range.min.as_i64() != i64::MAX && time_range.max.as_i64() != i64::MIN { self.timeline.format_time_range(time_range) } else { @@ -233,157 +132,53 @@ impl std::fmt::Display for IndexBucket { }; f.write_fmt(format_args!("{time_range}\n"))?; - let (timeline_name, times) = self.times(); - let (col_names, cols): (Vec<_>, Vec<_>) = { - self.indices - .read() - .indices - .iter() - .map(|(name, index)| { - ( - name.to_string(), - UInt64Array::from( - index - .iter() - .map(|row_idx| row_idx.map(|row_idx| row_idx.as_u64())) - .collect::>(), - ), - ) - }) - .unzip() - }; - - let names = std::iter::once(timeline_name).chain(col_names); - let values = std::iter::once(times.boxed()).chain(cols.into_iter().map(|c| c.boxed())); - let table = arrow::format_table(values, names); - - let is_sorted = self.is_sorted(); - f.write_fmt(format_args!("data (sorted={is_sorted}):\n{table}\n"))?; - - Ok(()) - } -} - -// --- Persistent Components --- - -impl std::fmt::Display for PersistentComponentTable { - #[allow(clippy::string_add)] - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let Self { - name, - datatype, - chunks, - total_rows, - total_size_bytes, - } = self; - - f.write_fmt(format_args!("name: {name}\n"))?; - if matches!( - std::env::var("RERUN_DATA_STORE_DISPLAY_SCHEMAS").as_deref(), - Ok("1") - ) { - f.write_fmt(format_args!("datatype: {datatype:#?}\n"))?; - } - - f.write_fmt(format_args!( - "size: {} across {} total rows\n", - format_bytes(*total_size_bytes as _), - format_number(*total_rows as _), - ))?; - - let data = { - use arrow2::compute::concatenate::concatenate; - let chunks = chunks.iter().map(|chunk| &**chunk).collect::>(); - concatenate(&chunks).unwrap() - }; - - let table = arrow::format_table([data], [self.name.as_str()]); - f.write_fmt(format_args!("{table}\n"))?; - - Ok(()) + let (schema, columns) = self.serialize().map_err(|err| { + re_log::error_once!("couldn't display indexed bucket: {err}"); + std::fmt::Error + })?; + re_format::arrow::format_table( + columns.columns(), + schema.fields.iter().map(|field| field.name.as_str()), + ) + .fmt(f)?; + + writeln!(f) } } -// --- Components --- +// --- Timeless --- -impl std::fmt::Display for ComponentTable { +impl std::fmt::Display for PersistentIndexedTable { #[allow(clippy::string_add)] fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let Self { - name, - datatype, - buckets, + ent_path, + cluster_key: _, + col_insert_id: _, + col_row_id: _, + col_num_instances: _, + columns: _, + total_size_bytes: _, } = self; - f.write_fmt(format_args!("name: {name}\n"))?; - if matches!( - std::env::var("RERUN_DATA_STORE_DISPLAY_SCHEMAS").as_deref(), - Ok("1") - ) { - f.write_fmt(format_args!("datatype: {datatype:#?}\n"))?; - } - - f.write_fmt(format_args!( - "size: {} buckets for a total of {} across {} total rows\n", - self.buckets.len(), - format_bytes(self.total_size_bytes() as _), - format_number(self.total_rows() as _), - ))?; - f.write_str("buckets: [\n")?; - for bucket in buckets { - f.write_str(&indent::indent_all_by(4, "ComponentBucket {\n"))?; - f.write_str(&indent::indent_all_by(8, bucket.to_string()))?; - f.write_str(&indent::indent_all_by(4, "}\n"))?; - } - f.write_str("]")?; - - Ok(()) - } -} + f.write_fmt(format_args!("entity: {ent_path}\n"))?; -impl std::fmt::Display for ComponentBucket { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_fmt(format_args!( "size: {} across {} rows\n", format_bytes(self.total_size_bytes() as _), format_number(self.total_rows() as _), ))?; - f.write_fmt(format_args!( - "row range: from {} to {} (all inclusive)\n", - self.row_offset, - // Component buckets can never be empty at the moment: - // - the first bucket is always initialized with a single empty row - // - all buckets that follow are lazily instantiated when data get inserted - // - // TODO(#439): is that still true with deletion? - // TODO(#589): support for non-unit-length chunks - self.row_offset - + self - .chunks - .len() - .checked_sub(1) - .expect("buckets are never empty") as u64, - ))?; - - f.write_fmt(format_args!("archived: {}\n", self.archived))?; - f.write_str("time ranges:\n")?; - for (timeline, time_range) in &self.time_ranges { - f.write_fmt(format_args!( - "{}\n", - &timeline.format_time_range(time_range) - ))?; - } - - let data = { - use arrow2::compute::concatenate::concatenate; - let chunks = self.chunks.iter().map(|chunk| &**chunk).collect::>(); - concatenate(&chunks).unwrap() - }; - - let table = arrow::format_table([data], [self.name.as_str()]); - f.write_fmt(format_args!("{table}\n"))?; - - Ok(()) + let (schema, columns) = self.serialize().map_err(|err| { + re_log::error_once!("couldn't display timeless indexed table: {err}"); + std::fmt::Error + })?; + re_format::arrow::format_table( + columns.columns(), + schema.fields.iter().map(|field| field.name.as_str()), + ) + .fmt(f)?; + + writeln!(f) } } diff --git a/crates/re_arrow_store/src/store_gc.rs b/crates/re_arrow_store/src/store_gc.rs index 5eca6a872ce1f..26fee70956278 100644 --- a/crates/re_arrow_store/src/store_gc.rs +++ b/crates/re_arrow_store/src/store_gc.rs @@ -1,14 +1,3 @@ -use std::collections::HashMap; - -use arrow2::array::{Array, ListArray}; - -use re_log::trace; -use re_log_types::{ComponentName, TimeRange, Timeline}; - -use crate::{ComponentBucket, DataStore}; - -// --- - #[derive(Debug, Clone, Copy)] pub enum GarbageCollectionTarget { /// Try to drop _at least_ the given percentage. @@ -28,188 +17,4 @@ impl std::fmt::Display for GarbageCollectionTarget { } } -impl DataStore { - /// Triggers a garbage collection according to the desired `target`, driven by the specified - /// `primary_component` and `primary_timeline`. - /// Returns all the raw data that was removed from the store for the given `primary_component`. - /// - /// This only affects component tables, indices are left as-is, effectively behaving as - /// tombstones. - /// - /// The garbage collection is based on _insertion order_, which makes it both very efficient - /// and very simple from an implementation standpoint. - /// The tradeoff is that the given `primary_timeline` is expected to roughly follow insertion - /// order, otherwise the behaviour is essentially undefined. - pub fn gc( - &mut self, - target: GarbageCollectionTarget, - primary_timeline: Timeline, - primary_component: ComponentName, - ) -> Vec> { - crate::profile_function!(); - - self.gc_id += 1; - - let initial_nb_rows = self.total_temporal_component_rows(); - let initial_size_bytes = self.total_temporal_component_size_bytes() as f64; - - let res = match target { - GarbageCollectionTarget::DropAtLeastPercentage(p) => { - assert!((0.0..=1.0).contains(&p)); - - let drop_at_least_size_bytes = initial_size_bytes * p; - let target_size_bytes = initial_size_bytes - drop_at_least_size_bytes; - - re_log::debug!( - kind = "gc", - id = self.gc_id, - %target, - timeline = %primary_timeline.name(), - %primary_component, - initial_nb_rows = re_format::format_large_number(initial_nb_rows as _), - initial_size_bytes = re_format::format_bytes(initial_size_bytes), - target_size_bytes = re_format::format_bytes(target_size_bytes), - drop_at_least_size_bytes = re_format::format_bytes(drop_at_least_size_bytes), - "starting GC" - ); - - self.gc_drop_at_least_size_bytes( - primary_timeline, - primary_component, - drop_at_least_size_bytes, - ) - } - }; - - #[cfg(debug_assertions)] - self.sanity_check().unwrap(); - - let new_nb_rows = self.total_temporal_component_rows(); - let new_size_bytes = self.total_temporal_component_size_bytes() as f64; - - re_log::debug!( - kind = "gc", - id = self.gc_id, - %target, - timeline = %primary_timeline.name(), - %primary_component, - initial_nb_rows = re_format::format_large_number(initial_nb_rows as _), - initial_size_bytes = re_format::format_bytes(initial_size_bytes), - new_nb_rows = re_format::format_large_number(new_nb_rows as _), - new_size_bytes = re_format::format_bytes(new_size_bytes), - "GC done" - ); - - res - } - - fn gc_drop_at_least_size_bytes( - &mut self, - primary_timeline: Timeline, - primary_component: ComponentName, - mut drop_at_least_size_bytes: f64, - ) -> Vec> { - let mut dropped = Vec::>::new(); - - let mut i = 0usize; - while drop_at_least_size_bytes > 0.0 { - // Find and drop the earliest (in terms of _insertion order_) primary component bucket - // that we can find. - let Some(primary_bucket) = self - .components - .get_mut(&primary_component) - .and_then(|table| (table.buckets.len() > 1).then(|| table.buckets.pop_front())) - .flatten() - else { - trace!( - kind = "gc", - id = self.gc_id, - timeline = %primary_timeline.name(), - %primary_component, - iter = i, - remaining = re_format::format_bytes(drop_at_least_size_bytes), - "no more primary buckets, giving up" - ); - break; - }; - - drop_at_least_size_bytes -= primary_bucket.total_size_bytes() as f64; - - trace!( - kind = "gc", - id = self.gc_id, - timeline = %primary_timeline.name(), - %primary_component, - iter = i, - reclaimed = re_format::format_bytes(primary_bucket.total_size_bytes() as f64), - remaining = re_format::format_bytes(drop_at_least_size_bytes), - "found & dropped primary component bucket" - ); - - // From there, find and drop all component buckets (in _insertion order_) that do not - // contain any data more recent than the time range covered by the primary - // component bucket (for the primary timeline!). - for table in self - .components - .iter_mut() - .filter_map(|(component, table)| (*component != primary_component).then_some(table)) - { - while table.buckets.len() > 1 { - let bucket = table.buckets.front().unwrap(); - if primary_bucket.encompasses(primary_timeline, &bucket.time_ranges) { - let bucket = table.buckets.pop_front().unwrap(); - drop_at_least_size_bytes -= bucket.total_size_bytes() as f64; - trace!( - kind = "gc", - id = self.gc_id, - timeline = %primary_timeline.name(), - %primary_component, - iter = i, - reclaimed = re_format::format_bytes(bucket.total_size_bytes() as f64), - remaining = re_format::format_bytes(drop_at_least_size_bytes), - "found & dropped secondary component bucket" - ); - } else { - break; - } - } - - i += 1; - } - - // We don't collect indices: they behave as tombstones. - - dropped.extend(primary_bucket.chunks.into_iter().map(|chunk| { - chunk - .as_any() - .downcast_ref::>() - .unwrap() - .values() - .clone() - })); - } - - dropped - } -} - -impl ComponentBucket { - /// Does `self` fully encompass `time_ranges` for the given `primary_timeline`? - fn encompasses( - &self, - primary_timeline: Timeline, - time_ranges: &HashMap, - ) -> bool { - if let (Some(time_range1), Some(time_range2)) = ( - self.time_ranges.get(&primary_timeline), - time_ranges.get(&primary_timeline), - ) { - return time_range1.max >= time_range2.max; - } - - // There's only one way this can happen: this is a bucket that only holds the fake row at - // offset #0. - // Ignore it. - true - } -} +// TODO: impl GC diff --git a/crates/re_arrow_store/src/store_polars.rs b/crates/re_arrow_store/src/store_polars.rs index fb095fcfb62fc..97290470f3538 100644 --- a/crates/re_arrow_store/src/store_polars.rs +++ b/crates/re_arrow_store/src/store_polars.rs @@ -1,21 +1,23 @@ +#![allow(clippy::all, unused_variables, dead_code)] + use std::collections::BTreeSet; use arrow2::{ - array::{new_empty_array, Array, BooleanArray, ListArray, UInt64Array, Utf8Array}, + array::{new_empty_array, Array, BooleanArray, ListArray, Utf8Array}, bitmap::Bitmap, compute::concatenate::concatenate, offset::Offsets, }; -use nohash_hasher::IntMap; use polars_core::{functions::diag_concat_df, prelude::*}; -use re_log_types::ComponentName; +use re_log_types::{ComponentName, DataCell}; use crate::{ - store::SecondaryIndex, ArrayExt, DataStore, DataStoreConfig, IndexBucket, IndexBucketIndices, - PersistentIndexTable, RowIndex, + store::InsertIdVec, ArrayExt, DataStore, DataStoreConfig, IndexedBucket, IndexedBucketInner, + PersistentIndexedTable, }; // TODO(#1692): all of this stuff should be defined by Data{Cell,Row,Table}, not the store. +// TODO: remove this and reimplement it on top of store serialization // --- @@ -29,7 +31,7 @@ impl DataStore { const TIMELESS_COL: &str = "_is_timeless"; - let timeless_dfs = self.timeless_indices.values().map(|index| { + let timeless_dfs = self.timeless_tables.values().map(|index| { let ent_path = index.ent_path.clone(); let mut df = index.to_dataframe(self, &self.config); @@ -45,7 +47,7 @@ impl DataStore { (ent_path, df.clone()) }); - let temporal_dfs = self.indices.values().map(|index| { + let temporal_dfs = self.tables.values().map(|index| { let dfs: Vec<_> = index .buckets .values() @@ -109,7 +111,7 @@ impl DataStore { // Arrange the columns in the order that makes the most sense as a user. let timelines: BTreeSet<&str> = self - .indices + .tables .keys() .map(|(timeline, _)| timeline.name().as_str()) .collect(); @@ -159,7 +161,7 @@ impl DataStore { } } -impl PersistentIndexTable { +impl PersistentIndexedTable { /// Dumps the entire table as a flat, denormalized dataframe. /// /// This cannot fail: it always tries to yield as much valuable information as it can, even in @@ -170,24 +172,27 @@ impl PersistentIndexTable { let Self { ent_path: _, cluster_key: _, - num_rows, - indices, - all_components: _, + col_insert_id, + col_row_id, + col_num_instances, + columns, + total_size_bytes: _, } = self; + let num_rows = self.total_rows() as usize; + + // TODO: these are not arrowified anymore let insert_ids = config .store_insert_ids - .then(|| insert_ids_as_series(*num_rows as usize, indices)) - .flatten(); + .then(|| insert_ids_as_series(&col_insert_id)); let comp_series = // One column for insert IDs, if they are available. std::iter::once(insert_ids) .flatten() // filter options - // One column for each component index. - .chain(indices.iter().filter_map(|(component, comp_row_nrs)| { - let datatype = find_component_datatype(store, component)?; - component_as_series(store, *num_rows as usize, datatype, *component, comp_row_nrs).into() + .chain(columns.iter().filter_map(|(component, cells)| { + let datatype = store.lookup_datatype(component)?.clone(); + column_as_series(store, num_rows, datatype, *component, cells).into() })); DataFrame::new(comp_series.collect::>()) @@ -197,7 +202,7 @@ impl PersistentIndexTable { } } -impl IndexBucket { +impl IndexedBucket { /// Dumps the entire bucket as a flat, denormalized dataframe. /// /// This cannot fail: it always tries to yield as much valuable information as it can, even in @@ -208,17 +213,21 @@ impl IndexBucket { let (_, times) = self.times(); let num_rows = times.len(); - let IndexBucketIndices { + let IndexedBucketInner { is_sorted: _, time_range: _, - times: _, - indices, - } = &*self.indices.read(); - + col_time: _, + col_insert_id, + col_row_id, + col_num_instances, + columns, + total_size_bytes: _, + } = &*self.inner.read(); + + // TODO let insert_ids = config .store_insert_ids - .then(|| insert_ids_as_series(num_rows, indices)) - .flatten(); + .then(|| insert_ids_as_series(&col_insert_id)); // Need to create one `Series` for the time index and one for each component index. let comp_series = [ @@ -234,9 +243,9 @@ impl IndexBucket { .into_iter() .flatten() // filter options // One column for each component index. - .chain(indices.iter().filter_map(|(component, comp_row_nrs)| { - let datatype = find_component_datatype(store, component)?; - component_as_series(store, num_rows, datatype, *component, comp_row_nrs).into() + .chain(columns.iter().filter_map(|(component, cells)| { + let datatype = store.lookup_datatype(component)?.clone(); + column_as_series(store, num_rows, datatype, *component, cells).into() })); DataFrame::new(comp_series.collect::>()) @@ -248,67 +257,44 @@ impl IndexBucket { // --- -fn insert_ids_as_series( - num_rows: usize, - indices: &IntMap, -) -> Option { - crate::profile_function!(); +// TODO: mess - indices.get(&DataStore::insert_id_key()).map(|insert_ids| { - let insert_ids = insert_ids - .iter() - .map(|id| id.map(|id| id.0.get())) - .collect::>(); - let insert_ids = UInt64Array::from(insert_ids); - new_infallible_series(DataStore::insert_id_key().as_str(), &insert_ids, num_rows) - }) -} - -fn find_component_datatype( - store: &DataStore, - component: &ComponentName, -) -> Option { +fn insert_ids_as_series(col_insert_id: &InsertIdVec) -> Series { crate::profile_function!(); - let timeless = store - .timeless_components - .get(component) - .map(|table| table.datatype.clone()); - let temporal = store - .components - .get(component) - .map(|table| table.datatype.clone()); - timeless.or(temporal) + let insert_ids = arrow2::array::UInt64Array::from_slice(col_insert_id.as_slice()); + new_infallible_series( + DataStore::insert_id_key().as_str(), + &insert_ids, + insert_ids.len(), + ) } -fn component_as_series( +// TODO: that belongs to DataCellColumn +fn column_as_series( store: &DataStore, num_rows: usize, datatype: arrow2::datatypes::DataType, component: ComponentName, - comp_row_nrs: &[Option], + cells: &[Option], ) -> Series { crate::profile_function!(); - let components = &[component]; - - // For each row in the index, grab the associated data from the component tables. - let comp_rows: Vec> = comp_row_nrs - .iter() - .cloned() - .map(|comp_row_nr| store.get(components, &[comp_row_nr])[0].clone()) - .collect(); - // Computing the validity bitmap is just a matter of checking whether the data was // available in the component tables. - let comp_validity: Vec<_> = comp_rows.iter().map(|row| row.is_some()).collect(); + let comp_validity: Vec<_> = cells.iter().map(|cell| cell.is_some()).collect(); // Each cell is actually a list, so we need to compute offsets one cell at a time. - let comp_lengths = comp_rows - .iter() - .map(|row| row.as_ref().map_or(0, |row| row.len())); + let comp_lengths = cells.iter().map(|cell| { + cell.as_ref() + .map_or(0, |cell| cell.num_instances() as usize) + }); - let comp_values: Vec<_> = comp_rows.iter().flatten().map(|row| row.as_ref()).collect(); + let comp_values: Vec<_> = cells + .iter() + .flatten() + .map(|cell| cell.as_arrow_ref()) + .collect(); // Bring everything together into one big list. let comp_values = ListArray::::new( @@ -330,6 +316,8 @@ fn component_as_series( // --- +// TODO: mess + fn new_infallible_series(name: &str, data: &dyn Array, len: usize) -> Series { crate::profile_function!(); @@ -339,6 +327,7 @@ fn new_infallible_series(name: &str, data: &dyn Array, len: usize) -> Series { }) } +// TODO: needs all the new control columns in there /// Sorts the columns of the given dataframe according to the following rules: // - insert ID comes first if it's available, // - followed by lexically sorted timelines, @@ -358,13 +347,6 @@ fn sort_df_columns( all.remove(all.binary_search(&"entity").expect("has to exist")); - if store_insert_ids { - all.remove( - all.binary_search(&DataStore::insert_id_key().as_str()) - .expect("has to exist"), - ); - } - let timelines = timelines.iter().copied().map(Some).collect::>(); let native_components = all @@ -382,7 +364,7 @@ fn sort_df_columns( .collect::>(); [ - vec![store_insert_ids.then(|| DataStore::insert_id_key().as_str())], + // vec![store_insert_ids.then(|| DataStore::insert_id_key().as_str())], timelines, vec![Some("entity")], native_components, diff --git a/crates/re_arrow_store/src/store_read.rs b/crates/re_arrow_store/src/store_read.rs index 00dce413215ff..7fe12895d2edd 100644 --- a/crates/re_arrow_store/src/store_read.rs +++ b/crates/re_arrow_store/src/store_read.rs @@ -1,20 +1,21 @@ use std::{ops::RangeBounds, sync::atomic::Ordering}; -use arrow2::array::{Array, ListArray}; - use itertools::Itertools; +use nohash_hasher::IntSet; use re_log::trace; -use re_log_types::{ComponentName, EntityPath, MsgId, TimeInt, TimePoint, TimeRange, Timeline}; - -use crate::{ - ComponentBucket, ComponentTable, DataStore, IndexBucket, IndexBucketIndices, IndexRowNr, - IndexTable, PersistentComponentTable, PersistentIndexTable, RowIndex, RowIndexKind, - SecondaryIndex, +use re_log_types::{ + ComponentName, DataCell, EntityPath, MsgId, RowId, TimeInt, TimePoint, TimeRange, Timeline, }; +use smallvec::SmallVec; + +use crate::{DataStore, IndexedBucket, IndexedBucketInner, IndexedTable, PersistentIndexedTable}; + +// TODO: +// - LatestAtResult, RangeResult // --- Queries --- -/// A query a given time, for a given timeline. +/// A query at a given time, for a given timeline. /// /// Get the latest version of the data available at this time. #[derive(Clone)] @@ -76,12 +77,12 @@ impl RangeQuery { // --- Data store --- impl DataStore { - /// Retrieve all the `ComponentName`s that have been written to for a given `EntityPath` on - /// a specific `Timeline`. + /// Retrieve all the [`ComponentName`]s that have been written to for a given [`EntityPath`] on + /// a specific [`Timeline`]. /// /// # Temporal semantics /// - /// In addition to the temporal results, this also includes all `ComponentName`s present in + /// In addition to the temporal results, this also includes all [`ComponentName`]s present in /// the timeless indices for this entity. pub fn all_components( &self, @@ -103,15 +104,15 @@ impl DataStore { "query started..." ); - let timeless = self - .timeless_indices + let timeless: Option> = self + .timeless_tables .get(&ent_path_hash) - .map(|index| &index.all_components); + .map(|table| table.columns.keys().copied().collect()); let temporal = self - .indices + .tables .get(&(*timeline, ent_path_hash)) - .map(|index| &index.all_components); + .map(|table| &table.all_components); let components = match (timeless, temporal) { (None, Some(temporal)) => temporal.iter().cloned().collect_vec(), @@ -132,19 +133,14 @@ impl DataStore { Some(components) } - /// Queries the datastore for the internal row indices of the specified `components`, as seen - /// from the point of view of the so-called `primary` component. + /// Queries the datastore for the cells of the specified `components`, as seen from the point + /// of view of the so-called `primary` component. /// - /// Returns an array of row indices on success, or `None` otherwise. - /// Success is defined by one thing and thing only: whether a row index could be found for the + /// Returns an array of [`DataCell`]s on success, or `None` otherwise. + /// Success is defined by one thing and thing only: whether a cell could be found for the /// `primary` component. /// The presence or absence of secondary components has no effect on the success criteria. /// - /// * On success, the returned array is filled with the internal row index of each and every - /// component in `components`, or `None` if said component is not available in that row. - /// - /// To actually retrieve the data associated with these indices, see [`Self::get`]. - /// /// # Temporal semantics /// /// Temporal indices take precedence, then timeless indices are queried to fill the holes left @@ -152,15 +148,15 @@ impl DataStore { /// /// ## Example /// - /// The following example demonstrate how to fetch the latest row indices for a given - /// component and the associated cluster key, then get the corresponding data using these row - /// indices, and finally turn everything into a nice-to-work-with polars's dataframe. + /// The following example demonstrate how to fetch the latest cells for a given component + /// and its associated cluster key, and wrap the result into a nice-to-work-with polars's + /// dataframe. /// /// ```rust /// # use polars_core::{prelude::*, series::Series}; - /// # use re_log_types::{ComponentName, EntityPath as EntityPath, TimeInt}; + /// # use re_log_types::{ComponentName, EntityPath, TimeInt}; /// # use re_arrow_store::{DataStore, LatestAtQuery, RangeQuery}; - /// + /// # /// pub fn latest_component( /// store: &DataStore, /// query: &LatestAtQuery, @@ -170,16 +166,19 @@ impl DataStore { /// let cluster_key = store.cluster_key(); /// /// let components = &[cluster_key, primary]; - /// let row_indices = store + /// let cells = store /// .latest_at(query, ent_path, primary, components) - /// .unwrap_or([None; 2]); - /// let results = store.get(components, &row_indices); + /// .unwrap_or([(); 2].map(|_| None)); /// - /// let series: Result, _> = components + /// let series: Result, _> = cells /// .iter() - /// .zip(results) - /// .filter_map(|(component, col)| col.map(|col| (component, col))) - /// .map(|(&component, col)| Series::try_from((component.as_str(), col))) + /// .flatten() + /// .map(|cell| { + /// Series::try_from(( + /// cell.component_name().as_str(), + /// cell.as_arrow(), + /// )) + /// }) /// .collect(); /// /// DataFrame::new(series?).map_err(Into::into) @@ -197,7 +196,7 @@ impl DataStore { ent_path: &EntityPath, primary: ComponentName, components: &[ComponentName; N], - ) -> Option<[Option; N]> { + ) -> Option<[Option; N]> { crate::profile_function!(); // TODO(cmc): kind & query_id need to somehow propagate through the span system. @@ -215,62 +214,64 @@ impl DataStore { "query started..." ); - let row_indices = self - .indices + let cells = self + .tables .get(&(query.timeline, ent_path_hash)) - .and_then(|index| { - let row_indices = index.latest_at(query.at, primary, components); + .and_then(|table| { + let cells = table.latest_at(query.at, primary, components); trace!( kind = "latest_at", query = ?query, entity = %ent_path, %primary, ?components, - ?row_indices, timeless = false, - "row indices fetched" + "row cells fetched" ); - row_indices + cells }); - // If we've found everything we were looking for in the temporal index, then we can + // If we've found everything we were looking for in the temporal table, then we can // return the results immediately. - if row_indices.map_or(false, |row_indices| row_indices.iter().all(Option::is_some)) { - return row_indices; + if cells + .as_ref() + .map_or(false, |cells| cells.iter().all(Option::is_some)) + { + return cells; } - let row_indices_timeless = self.timeless_indices.get(&ent_path_hash).and_then(|index| { - let row_indices = index.latest_at(primary, components); + let cells_timeless = self.timeless_tables.get(&ent_path_hash).and_then(|table| { + let cells = table.latest_at(primary, components); trace!( kind = "latest_at", query = ?query, entity = %ent_path, %primary, ?components, - ?row_indices, + ?cells, timeless = true, - "row indices fetched" + "cells fetched" ); - row_indices + cells }); - // Otherwise, let's see what's in the timeless index, and then..: - match (row_indices, row_indices_timeless) { - // nothing in the timeless index: return those partial row indices we got. - (Some(row_indices), None) => return Some(row_indices), - // no temporal row indices, but some timeless ones: return those as-is. - (None, Some(row_indices_timeless)) => return Some(row_indices_timeless), - // we have both temporal & timeless indices: let's merge the two when it makes sense + // Otherwise, let's see what's in the timeless table, and then..: + match (cells, cells_timeless) { + // nothing in the timeless table: return those partial cells we got. + (Some(cells), None) => return Some(cells), + // no temporal cells, but some timeless ones: return those as-is. + (None, Some(cells_timeless)) => return Some(cells_timeless), + // we have both temporal & timeless cells: let's merge the two when it makes sense // and return the end result. - (Some(mut row_indices), Some(row_indices_timeless)) => { - for (i, row_idx) in row_indices_timeless.into_iter().enumerate() { - if row_indices[i].is_none() { - row_indices[i] = row_idx; + (Some(mut cells), Some(cells_timeless)) => { + for (i, row_idx) in cells_timeless.into_iter().enumerate() { + if cells[i].is_none() { + cells[i] = row_idx; } } - return Some(row_indices); + return Some(cells); } - // no row indices at all. + // no cells at all. (None, None) => {} } @@ -286,20 +287,16 @@ impl DataStore { None } - /// Iterates the datastore in order to return the internal row indices of the the specified - /// `components`, as seen from the point of view of the so-called `primary` component, for the - /// given time range. + /// Iterates the datastore in order to return the cells of the the specified `components`, + /// as seen from the point of view of the so-called `primary` component, for the given time + /// range. /// /// For each and every relevant row that is found, the returned iterator will yield an array - /// that is filled with the internal row index of each and every component in `components`, - /// or `None` if said component is not available in that row. + /// that is filled with the cekks of each and every component in `components`, or `None` if + /// said component is not available in that row. /// A row is considered iff it contains data for the `primary` component. /// - /// This method cannot fail! If there's no data to return (whether that's due to a missing - /// primary index, missing secondary components, an empty point-of-view...), then an empty - /// iterator is returned. - /// - /// To actually retrieve the data associated with these indices, see [`Self::get`]. + /// This method cannot fail! If there's no data to return, an empty iterator is returned. /// /// ⚠ Contrary to latest-at queries, range queries can and will yield multiple rows for a /// single timestamp if that timestamp happens to hold multiple entries for the `primary` @@ -317,33 +314,35 @@ impl DataStore { /// /// ## Example /// - /// The following example demonstrate how to range over the row indices of a given - /// component and its associated cluster key, then get the corresponding data using these - /// row indices, and finally turn everything into a nice-to-work-with iterator of - /// polars's dataframe. - /// Additionally, it yields the latest-at state of the component a the start of the time range, + /// The following example demonstrate how to range over the cells of a given + /// component and its associated cluster key, and turn the results into a nice-to-work-with + /// iterator of polars's dataframe. + /// Additionally, it yields the latest-at state of the component at the start of the time range, /// if available. /// /// ```rust /// # use arrow2::array::Array; /// # use polars_core::{prelude::*, series::Series}; - /// # use re_log_types::{ComponentName, EntityPath as EntityPath, TimeInt}; + /// # use re_log_types::{ComponentName, DataCell, EntityPath, TimeInt}; /// # use re_arrow_store::{DataStore, LatestAtQuery, RangeQuery}; - /// - /// # pub fn dataframe_from_results( - /// # components: &[ComponentName; N], - /// # results: [Option>; N], + /// # + /// # pub fn dataframe_from_cells( + /// # cells: [Option; N], /// # ) -> anyhow::Result { - /// # let series: Result, _> = components + /// # let series: Result, _> = cells /// # .iter() - /// # .zip(results) - /// # .filter_map(|(component, col)| col.map(|col| (component, col))) - /// # .map(|(&component, col)| Series::try_from((component.as_str(), col))) + /// # .flatten() + /// # .map(|cell| { + /// # Series::try_from(( + /// # cell.component_name().as_str(), + /// # cell.as_arrow(), + /// # )) + /// # }) /// # .collect(); /// # /// # DataFrame::new(series?).map_err(Into::into) /// # } - /// + /// # /// pub fn range_component<'a>( /// store: &'a DataStore, /// query: &'a RangeQuery, @@ -358,11 +357,10 @@ impl DataStore { /// let latest_time = query.range.min.as_i64().saturating_sub(1).into(); /// let df_latest = { /// let query = LatestAtQuery::new(query.timeline, latest_time); - /// let row_indices = store + /// let cells = store /// .latest_at(&query, ent_path, primary, &components) - /// .unwrap_or([None; 2]); - /// let results = store.get(&components, &row_indices); - /// dataframe_from_results(&components, results) + /// .unwrap_or([(); 2].map(|_| None)); + /// dataframe_from_cells(cells) /// }; /// /// // Send the latest-at state before anything else.. @@ -370,10 +368,7 @@ impl DataStore { /// // ..but only if it's not an empty dataframe. /// .filter(|df| df.as_ref().map_or(true, |(_, df)| !df.is_empty())) /// .chain(store.range(query, ent_path, components).map( - /// move |(time, _, row_indices)| { - /// let results = store.get(&components, &row_indices); - /// dataframe_from_results(&components, results).map(|df| (time, df)) - /// }, + /// move |(time, _, cells)| dataframe_from_cells(cells).map(|df| (time, df)) /// )) /// } /// ``` @@ -389,7 +384,7 @@ impl DataStore { query: &RangeQuery, ent_path: &EntityPath, components: [ComponentName; N], - ) -> impl Iterator, IndexRowNr, [Option; N])> + 'a { + ) -> impl Iterator, RowId, [Option; N])> + 'a { // Beware! This merely measures the time it takes to gather all the necessary metadata // for building the returned iterator. crate::profile_function!(); @@ -409,21 +404,21 @@ impl DataStore { ); let temporal = self - .indices + .tables .get(&(query.timeline, ent_path_hash)) .map(|index| index.range(query.range, components)) .into_iter() .flatten() - .map(|(time, idx_row_nr, row_indices)| (Some(time), idx_row_nr, row_indices)); + .map(|(time, row_id, cells)| (Some(time), row_id, cells)); if query.range.min == TimeInt::MIN { let timeless = self - .timeless_indices + .timeless_tables .get(&ent_path_hash) .map(|index| { index .range(components) - .map(|(idx_row_nr, row_indices)| (None, idx_row_nr, row_indices)) + .map(|(row_id, cells)| (None, row_id, cells)) }) .into_iter() .flatten(); @@ -433,226 +428,30 @@ impl DataStore { } } - /// Retrieves the data associated with a list of `components` at the specified `indices`. - /// - /// If the associated data is found, it will be written into the returned array at the - /// appropriate index, or `None` otherwise. - /// - /// `row_indices` takes a list of options so that one can easily re-use the results obtained - /// from [`Self::latest_at`] & [`Self::range`]. - pub fn get( - &self, - components: &[ComponentName; N], - row_indices: &[Option; N], - ) -> [Option>; N] { - crate::profile_function!(); - - let mut results = [(); N].map(|_| None); // work around non-Copy const initialization limitations - - for (i, &component, row_idx) in components - .iter() - .zip(row_indices) - .enumerate() - .filter_map(|(i, (comp, row_idx))| row_idx.map(|row_idx| (i, comp, row_idx))) - { - match row_idx.kind() { - RowIndexKind::Timeless => { - let row = self - .timeless_components - .get(&component) - .map(|table| table.get(row_idx)); - results[i] = row; - } - RowIndexKind::Temporal => { - let row = self - .components - .get(&component) - .and_then(|table| table.get(row_idx)); - results[i] = row; - } - } - } - - results - } - pub fn get_msg_metadata(&self, msg_id: &MsgId) -> Option<&TimePoint> { crate::profile_function!(); - self.messages.get(msg_id) + self.metadata_registry.get(msg_id) } /// Sort all unsorted indices in the store. pub fn sort_indices_if_needed(&mut self) { - for index in self.indices.values_mut() { + for index in self.tables.values_mut() { index.sort_indices_if_needed(); } } - - /// Returns a read-only iterator over the raw index tables. - /// - /// Do _not_ use this to try and test the internal state of the datastore. - pub fn iter_indices( - &self, - ) -> impl ExactSizeIterator { - self.indices.iter().map(|((timeline, _), table)| { - ((*timeline, table.ent_path.clone() /* shallow */), table) - }) - } -} - -// --- Persistent Indices --- - -impl PersistentIndexTable { - /// Returns `None` iff no row index could be found for the `primary` component. - pub fn latest_at( - &self, - primary: ComponentName, - components: &[ComponentName; N], - ) -> Option<[Option; N]> { - if self.num_rows == 0 { - return None; - } - - // Early-exit if this bucket is unaware of this component. - let index = self.indices.get(&primary)?; - - crate::profile_function!(); - - trace!( - kind = "latest_at", - %primary, - ?components, - timeless = true, - "searching for primary & secondary row indices..." - ); - - // find the primary index's row. - let primary_idx = self.num_rows - 1; - - trace!( - kind = "latest_at", - %primary, - ?components, - %primary_idx, - timeless = true, - "found primary index", - ); - - // find the secondary indices' rows, and the associated row indices. - let mut secondary_idx = primary_idx as i64; - while index[secondary_idx as usize].is_none() { - secondary_idx -= 1; - if secondary_idx < 0 { - trace!( - kind = "latest_at", - %primary, - ?components, - timeless = true, - %primary_idx, - "no secondary index found", - ); - return None; - } - } - - trace!( - kind = "latest_at", - %primary, - ?components, - timeless = true, - %primary_idx, %secondary_idx, - "found secondary index", - ); - debug_assert!(index[secondary_idx as usize].is_some()); - - let mut row_indices = [None; N]; - for (i, component) in components.iter().enumerate() { - if let Some(index) = self.indices.get(component) { - if let Some(row_idx) = index[secondary_idx as usize] { - trace!( - kind = "latest_at", - %primary, - %component, - timeless = true, - %primary_idx, %secondary_idx, %row_idx, - "found row index", - ); - row_indices[i] = Some(row_idx); - } - } - } - - Some(row_indices) - } - - /// Returns an empty iterator if no data could be found for any reason. - pub fn range( - &self, - components: [ComponentName; N], - ) -> impl Iterator; N])> + '_ { - // Early-exit if the table is unaware of any of our components of interest. - if components - .iter() - .all(|component| self.indices.get(component).is_none()) - { - return itertools::Either::Right(std::iter::empty()); - } - - // Beware! This merely measures the time it takes to gather all the necessary metadata - // for building the returned iterator. - crate::profile_function!(); - - // TODO(cmc): Cloning these is obviously not great and will need to be addressed at - // some point. - // But, really, it's not _that_ bad either: these are integers and e.g. with the default - // configuration there are only 1024 of them (times the number of components). - let comp_indices = self.indices.clone(); - - let row_indices = (0..self.num_rows).filter_map(move |comp_idx_row_nr| { - let comp_idx_row_nr = IndexRowNr(comp_idx_row_nr); - - let mut row_indices = [None; N]; - for (i, component) in components.iter().enumerate() { - if let Some(index) = comp_indices.get(component) { - if let Some(row_idx) = index[comp_idx_row_nr.0 as usize] { - row_indices[i] = Some(row_idx); - } - } - } - - // We only yield rows that contain data for at least one of the components of - // interest. - if row_indices.iter().all(Option::is_none) { - return None; - } - - trace!( - kind = "range", - ?components, - timeless = true, - %comp_idx_row_nr, - ?row_indices, - "yielding row indices", - ); - - Some((comp_idx_row_nr, row_indices)) - }); - - itertools::Either::Left(row_indices) - } } -// --- Indices --- +// --- Temporal --- -impl IndexTable { - /// Returns `None` iff no row index could be found for the `primary` component. +impl IndexedTable { + /// Returns `None` iff no cell could be found for the `primary` component. pub fn latest_at( &self, time: TimeInt, primary: ComponentName, components: &[ComponentName; N], - ) -> Option<[Option; N]> { + ) -> Option<[Option; N]> { crate::profile_function!(); // Early-exit if this entire table is unaware of this component. @@ -665,9 +464,9 @@ impl IndexTable { // The time we're looking for gives us an upper bound: all components must be indexed // in either this bucket _or any of those that come before_! // - // That is because secondary indices allow for null values, which forces us to not only - // walk backwards within an index bucket, but sometimes even walk backwards across - // multiple index buckets within the same table! + // That is because secondary columns allow for null values, which forces us to not only + // walk backwards within an indexed bucket, but sometimes even walk backwards across + // multiple indexed buckets within the same table! let buckets = self .range_buckets_rev(..=time) @@ -681,11 +480,11 @@ impl IndexTable { %primary, ?components, attempt, - bucket_time_range = timeline.typ().format_range(bucket.indices.read().time_range), + bucket_time_range = timeline.typ().format_range(bucket.inner.read().time_range), "found candidate bucket" ); - if let row_indices @ Some(_) = bucket.latest_at(time, primary, components) { - return row_indices; // found at least the primary component! + if let cells @ Some(_) = bucket.latest_at(time, primary, components) { + return cells; // found at least the primary component! } } @@ -697,7 +496,7 @@ impl IndexTable { &self, time_range: TimeRange, components: [ComponentName; N], - ) -> impl Iterator; N])> + '_ { + ) -> impl Iterator; N])> + '_ { // Beware! This merely measures the time it takes to gather all the necessary metadata // for building the returned iterator. crate::profile_function!(); @@ -715,7 +514,7 @@ impl IndexTable { kind = "range", bucket_nr, bucket_time_range = - timeline.typ().format_range(bucket.indices.read().time_range), + timeline.typ().format_range(bucket.inner.read().time_range), timeline = %timeline.name(), ?time_range, ?components, @@ -726,29 +525,31 @@ impl IndexTable { }) } - /// Returns the index bucket whose time range covers the given `time`. + /// Returns the indexed bucket whose time range covers the given `time`. /// - /// In addition to returning a reference to the `IndexBucket` itself, this also returns its + /// In addition to returning a reference to the `IndexedBucket` itself, this also returns its /// _indexing time_, which is different from its minimum time range bound! - /// See `IndexTable::buckets` for more information. - pub fn find_bucket(&self, time: TimeInt) -> (TimeInt, &IndexBucket) { + /// + /// See [`IndexedTable::buckets`] for more information. + pub fn find_bucket(&self, time: TimeInt) -> (TimeInt, &IndexedBucket) { crate::profile_function!(); // This cannot fail, `iter_bucket` is guaranteed to always yield at least one bucket, - // since index tables always spawn with a default bucket that covers [-∞;+∞]. + // since indexed tables always spawn with a default bucket that covers [-∞;+∞]. self.range_buckets_rev(..=time).next().unwrap() } - /// Returns the index bucket whose time range covers the given `time`. + /// Returns the indexed bucket whose time range covers the given `time`. /// - /// In addition to returning a reference to the `IndexBucket` itself, this also returns its + /// In addition to returning a reference to the `IndexedBucket` itself, this also returns its /// _indexing time_, which is different from its minimum time range bound! - /// See `IndexTable::buckets` for more information. - pub fn find_bucket_mut(&mut self, time: TimeInt) -> (TimeInt, &mut IndexBucket) { + /// + /// See [`IndexedTable::buckets`] for more information. + pub fn find_bucket_mut(&mut self, time: TimeInt) -> (TimeInt, &mut IndexedBucket) { crate::profile_function!(); // This cannot fail, `iter_bucket_mut` is guaranteed to always yield at least one bucket, - // since index tables always spawn with a default bucket that covers [-∞;+∞]. + // since indexed tables always spawn with a default bucket that covers [-∞;+∞]. self.range_bucket_rev_mut(..=time).next().unwrap() } @@ -757,13 +558,14 @@ impl IndexTable { /// /// It then continues yielding buckets until it runs out, in increasing time range order. /// - /// In addition to yielding references to the `IndexBucket`s themselves, this also returns + /// In addition to yielding references to the `IndexedBucket`s themselves, this also returns /// their _indexing times_, which are different from their minimum time range bounds! - /// See `IndexTable::buckets` for more information. + /// + /// See [`IndexedTable::buckets`] for more information. pub fn range_buckets( &self, time_range: impl RangeBounds, - ) -> impl Iterator { + ) -> impl Iterator { // Beware! This merely measures the time it takes to gather all the necessary metadata // for building the returned iterator. crate::profile_function!(); @@ -778,13 +580,14 @@ impl IndexTable { /// /// It then continues yielding buckets until it runs out, in decreasing time range order. /// - /// In addition to yielding references to the `IndexBucket`s themselves, this also returns + /// In addition to yielding references to the `IndexedBucket`s themselves, this also returns /// their _indexing times_, which are different from their minimum time range bounds! - /// See `IndexTable::buckets` for more information. + /// + /// See [`IndexedTable::buckets`] for more information. pub fn range_buckets_rev( &self, time_range: impl RangeBounds, - ) -> impl Iterator { + ) -> impl Iterator { // Beware! This merely measures the time it takes to gather all the necessary metadata // for building the returned iterator. crate::profile_function!(); @@ -800,13 +603,14 @@ impl IndexTable { /// /// It then continues yielding buckets until it runs out, in decreasing time range order. /// - /// In addition to yielding references to the `IndexBucket`s themselves, this also returns + /// In addition to yielding references to the `IndexedBucket`s themselves, this also returns /// their _indexing times_, which are different from their minimum time range bounds! - /// See `IndexTable::buckets` for more information. + /// + /// See [`IndexedTable::buckets`] for more information. pub fn range_bucket_rev_mut( &mut self, time_range: impl RangeBounds, - ) -> impl Iterator { + ) -> impl Iterator { // Beware! This merely measures the time it takes to gather all the necessary metadata // for building the returned iterator. crate::profile_function!(); @@ -817,52 +621,49 @@ impl IndexTable { .map(|(time, bucket)| (*time, bucket)) } - /// Sort all unsorted index buckets in this table. + /// Sort all unsorted indexed buckets in this table. pub fn sort_indices_if_needed(&self) { for bucket in self.buckets.values() { bucket.sort_indices_if_needed(); } } - - /// Returns a read-only iterator over the raw buckets. - /// - /// Do _not_ use this to try and test the internal state of the datastore. - pub fn iter_buckets(&self) -> impl ExactSizeIterator { - self.buckets.values() - } } -impl IndexBucket { +impl IndexedBucket { /// Sort all component indices by time, provided that's not already the case. pub fn sort_indices_if_needed(&self) { - if self.indices.read().is_sorted { + if self.inner.read().is_sorted { return; // early read-only exit } crate::profile_scope!("sort"); - self.indices.write().sort(); + self.inner.write().sort(); } - /// Returns `None` iff no row index could be found for the `primary` component. + /// Returns `None` iff no cell could be found for the `primary` component. pub fn latest_at( &self, time: TimeInt, primary: ComponentName, components: &[ComponentName; N], - ) -> Option<[Option; N]> { + ) -> Option<[Option; N]> { crate::profile_function!(); self.sort_indices_if_needed(); - let IndexBucketIndices { + let IndexedBucketInner { is_sorted, time_range: _, - times, - indices, - } = &*self.indices.read(); + col_time, + col_insert_id: _, + col_row_id: _, + col_num_instances: _, + columns, + total_size_bytes: _, // TODO + } = &*self.inner.read(); debug_assert!(is_sorted); // Early-exit if this bucket is unaware of this component. - let index = indices.get(&primary)?; + let column = columns.get(&primary)?; crate::profile_function!(); @@ -872,45 +673,44 @@ impl IndexBucket { ?components, timeline = %self.timeline.name(), time = self.timeline.typ().format(time), - "searching for primary & secondary row indices..." + "searching for primary & secondary cells..." ); - // find the primary index's row. - let primary_idx = times.partition_point(|t| *t <= time.as_i64()) as i64; + let time_row_nr = col_time.partition_point(|t| *t <= time.as_i64()) as i64; // The partition point is always _beyond_ the index that we're looking for. // A partition point of 0 thus means that we're trying to query for data that lives // _before_ the beginning of time... there's nothing to be found there. - if primary_idx == 0 { + if time_row_nr == 0 { return None; } // The partition point is always _beyond_ the index that we're looking for; we need // to step back to find what we came for. - let primary_idx = primary_idx - 1; + let primary_row_nr = time_row_nr - 1; trace!( kind = "latest_at", %primary, ?components, timeline = %self.timeline.name(), time = self.timeline.typ().format(time), - %primary_idx, - "found primary index", + %primary_row_nr, + "found primary row number", ); - // find the secondary indices' rows, and the associated row indices. - let mut secondary_idx = primary_idx; - while index[secondary_idx as usize].is_none() { - secondary_idx -= 1; - if secondary_idx < 0 { + // find the secondary row number, and the associated cells. + let mut secondary_row_nr = primary_row_nr; + while column[secondary_row_nr as usize].is_none() { + secondary_row_nr -= 1; + if secondary_row_nr < 0 { trace!( kind = "latest_at", %primary, ?components, timeline = %self.timeline.name(), time = self.timeline.typ().format(time), - %primary_idx, - "no secondary index found", + %primary_row_nr, + "no secondary row number found", ); return None; } @@ -922,30 +722,30 @@ impl IndexBucket { ?components, timeline = %self.timeline.name(), time = self.timeline.typ().format(time), - %primary_idx, %secondary_idx, - "found secondary index", + %primary_row_nr, %secondary_row_nr, + "found secondary row number", ); - debug_assert!(index[secondary_idx as usize].is_some()); + debug_assert!(column[secondary_row_nr as usize].is_some()); - let mut row_indices = [None; N]; + let mut cells = [(); N].map(|_| None); for (i, component) in components.iter().enumerate() { - if let Some(index) = indices.get(component) { - if let Some(row_idx) = index[secondary_idx as usize] { + if let Some(column) = columns.get(component) { + if let Some(cell) = &column[secondary_row_nr as usize] { trace!( kind = "latest_at", %primary, %component, timeline = %self.timeline.name(), time = self.timeline.typ().format(time), - %primary_idx, %secondary_idx, %row_idx, - "found row index", + %primary_row_nr, %secondary_row_nr, + "found cell", ); - row_indices[i] = Some(row_idx); + cells[i] = Some(cell.clone() /* shallow */); } } } - Some(row_indices) + Some(cells) } /// Returns an empty iterator if no data could be found for any reason. @@ -953,15 +753,19 @@ impl IndexBucket { &self, time_range: TimeRange, components: [ComponentName; N], - ) -> impl Iterator; N])> + '_ { + ) -> impl Iterator; N])> + '_ { self.sort_indices_if_needed(); - let IndexBucketIndices { + let IndexedBucketInner { is_sorted, time_range: bucket_time_range, - times, - indices, - } = &*self.indices.read(); + col_time, + col_insert_id: _, + col_row_id, + col_num_instances: _, + columns, + total_size_bytes: _, // TODO + } = &*self.inner.read(); debug_assert!(is_sorted); let bucket_time_range = *bucket_time_range; @@ -969,7 +773,7 @@ impl IndexBucket { // Early-exit if this bucket is unaware of any of our components of interest. if components .iter() - .all(|component| indices.get(component).is_none()) + .all(|component| columns.get(component).is_none()) { return itertools::Either::Right(std::iter::empty()); } @@ -984,12 +788,10 @@ impl IndexBucket { ?components, timeline = %self.timeline.name(), time_range = self.timeline.typ().format_range(time_range), - "searching for time & component row index numbers..." + "searching for time & component cell numbers..." ); - // find the time index's row number - let time_idx_row_nr: IndexRowNr = - IndexRowNr(times.partition_point(|t| *t < time_range.min.as_i64()) as u64); + let time_row_nr = col_time.partition_point(|t| *t < time_range.min.as_i64()) as u64; trace!( kind = "range", @@ -997,46 +799,50 @@ impl IndexBucket { ?components, timeline = %self.timeline.name(), time_range = self.timeline.typ().format_range(time_range), - %time_idx_row_nr, - "found time index row number", + %time_row_nr, + "found time row number", ); // TODO(cmc): Cloning these is obviously not great and will need to be addressed at // some point. - // But, really, it's not _that_ bad either: these are integers and e.g. with the default - // configuration there are only 1024 of them (times the number of components). - let time_idx = times.clone(); - let comp_indices = indices.clone(); + // But, really, it's not _that_ bad either: these are either integers or erased pointers, + // and e.g. with the default configuration there are only 1024 of them (times the number + // of components). + let col_time = col_time.clone(); + let col_row_id = col_row_id.clone(); + let columns = columns.clone(); // shallow // We have found the index of the first row that possibly contains data for any single one // of the components we're interested in. // // Now we need to iterate through every remaining rows in the bucket and yield any that // contains data for these components and is still within the time range. - let row_indices = time_idx + let cells = col_time .into_iter() - .skip(time_idx_row_nr.0 as usize) + .skip(time_row_nr as usize) // don't go beyond the time range we're interested in! .filter(move |time| time_range.contains((*time).into())) .enumerate() - .filter_map(move |(time_idx_offset, time)| { - let comp_idx_row_nr = IndexRowNr(time_idx_row_nr.0 + time_idx_offset as u64); + .filter_map(move |(time_row_offset, time)| { + let row_nr = time_row_nr + time_row_offset as u64; - let mut row_indices = [None; N]; + let mut cells = [(); N].map(|_| None); for (i, component) in components.iter().enumerate() { - if let Some(index) = comp_indices.get(component) { - if let Some(row_idx) = index[comp_idx_row_nr.0 as usize] { - row_indices[i] = Some(row_idx); + if let Some(column) = columns.get(component) { + if let Some(cell) = &column[row_nr as usize] { + cells[i] = Some(cell.clone() /* shallow */); } } } // We only yield rows that contain data for at least one of the components of // interest. - if row_indices.iter().all(Option::is_none) { + if cells.iter().all(Option::is_none) { return None; } + let row_id = col_row_id[row_nr as usize]; + trace!( kind = "range", bucket_time_range = @@ -1044,30 +850,35 @@ impl IndexBucket { ?components, timeline = %self.timeline.name(), time_range = self.timeline.typ().format_range(time_range), - %comp_idx_row_nr, - ?row_indices, - "yielding row indices", + %row_nr, + %row_id, + ?cells, + "yielding cells", ); - Some((time.into(), comp_idx_row_nr, row_indices)) + Some((time.into(), row_id, cells)) }); - itertools::Either::Left(row_indices) + itertools::Either::Left(cells) } - /// Whether the indices in this `IndexBucket` are sorted + /// Whether the indices in this `IndexedBucket` are sorted pub fn is_sorted(&self) -> bool { - self.indices.read().is_sorted + self.inner.read().is_sorted } } -impl IndexBucketIndices { +impl IndexedBucketInner { pub fn sort(&mut self) { let Self { is_sorted, time_range: _, - times, - indices, + col_time, + col_insert_id, + col_row_id, + col_num_instances, + columns, + total_size_bytes: _, } = self; if *is_sorted { @@ -1077,8 +888,8 @@ impl IndexBucketIndices { crate::profile_function!(); let swaps = { - let mut swaps = (0..times.len()).collect::>(); - swaps.sort_by_key(|&i| ×[i]); + let mut swaps = (0..col_time.len()).collect::>(); + swaps.sort_by_key(|&i| &col_time[i]); swaps .iter() .copied() @@ -1090,135 +901,174 @@ impl IndexBucketIndices { // Yep, the reshuffle implementation is very dumb and very slow :) // TODO(#442): re_datastore: implement efficient shuffling on the read path. - // shuffle time index back into a sorted state - { - let source = times.clone(); + fn reshuffle_control_column( + column: &mut SmallVec<[T; N]>, + swaps: &[(usize, usize)], + ) { + let source = column.clone(); for (from, to) in swaps.iter().copied() { - times[to] = source[from]; + column[to] = source[from]; } } - - fn reshuffle_index(index: &mut SecondaryIndex, swaps: &[(usize, usize)]) { - // shuffle data - { - let source = index.clone(); - for (from, to) in swaps.iter().copied() { - index[to] = source[from]; - } - } + reshuffle_control_column(col_time, &swaps); + if !col_insert_id.is_empty() { + reshuffle_control_column(col_insert_id, &swaps); } + reshuffle_control_column(col_row_id, &swaps); + reshuffle_control_column(col_num_instances, &swaps); - // shuffle component indices back into a sorted state - for index in indices.values_mut() { - reshuffle_index(index, &swaps); + // shuffle component columns back into a sorted state + for column in columns.values_mut() { + let mut source = column.clone(); + for (from, to) in swaps.iter().copied() { + column[to] = source[from].take(); + } } *is_sorted = true; } } -// --- Persistent Components --- +// --- Timeless --- -impl PersistentComponentTable { - /// Returns a shallow clone of the row data present at the given `row_idx`. - /// - /// Panics if `row_idx` is out of bounds. - pub fn get(&self, row_idx: RowIndex) -> Box { - crate::profile_function!(); +impl PersistentIndexedTable { + /// Returns `None` iff no cell could be found for the `primary` component. + fn latest_at( + &self, + primary: ComponentName, + components: &[ComponentName; N], + ) -> Option<[Option; N]> { + if self.is_empty() { + return None; + } - self.chunks[row_idx.as_u64() as usize] - .as_any() - .downcast_ref::>() - .unwrap() - .value(0) - } -} + // Early-exit if this bucket is unaware of this component. + let column = self.columns.get(&primary)?; -// --- Components --- + crate::profile_function!(); -impl ComponentTable { - pub fn get(&self, row_idx: RowIndex) -> Option> { - let bucket_nr = self - .buckets - .partition_point(|bucket| row_idx.as_u64() >= bucket.row_offset); + trace!( + kind = "latest_at", + %primary, + ?components, + timeless = true, + "searching for primary & secondary cells..." + ); - // The partition point will give us the index of the first bucket that has a row offset - // strictly greater than the row index we're looking for, therefore we need to take a - // step back to find what we're looking for. - // - // Component tables always spawn with a default bucket at offset 0, so the smallest - // partition point that can ever be returned is one, making this operation always - // overflow-safe... unless the garbage collector has ever run, in which case all bets are - // off! - let Some(bucket_nr) = bucket_nr.checked_sub(1) else { return None }; + // find the primary row number's row. + let primary_row_nr = self.total_rows() - 1; - if let Some(bucket) = self.buckets.get(bucket_nr) { - trace!( - kind = "get", - component = self.name.as_str(), - %row_idx, - bucket_nr, - %bucket.row_offset, - "fetching component data" - ); - bucket.get(row_idx) - } else { - trace!( - kind = "get", - component = self.name.as_str(), - %row_idx, - bucket_nr, - "row index is out of bounds" - ); - None + trace!( + kind = "latest_at", + %primary, + ?components, + %primary_row_nr, + timeless = true, + "found primary row number", + ); + + // find the secondary indices' rows, and the associated cells. + let mut secondary_row_nr = primary_row_nr as i64; + while column[secondary_row_nr as usize].is_none() { + secondary_row_nr -= 1; + if secondary_row_nr < 0 { + trace!( + kind = "latest_at", + %primary, + ?components, + timeless = true, + %primary_row_nr, + "no secondary row number found", + ); + return None; + } } - } - /// Returns an iterator over the `ComponentBucket` in this table - #[allow(dead_code)] - pub fn iter_buckets(&self) -> impl ExactSizeIterator { - self.buckets.iter() - } -} + trace!( + kind = "latest_at", + %primary, + ?components, + timeless = true, + %primary_row_nr, %secondary_row_nr, + "found secondary row number", + ); + debug_assert!(column[secondary_row_nr as usize].is_some()); + + let mut cells = [(); N].map(|_| None); + for (i, component) in components.iter().enumerate() { + if let Some(column) = self.columns.get(component) { + if let Some(cell) = &column[secondary_row_nr as usize] { + trace!( + kind = "latest_at", + %primary, + %component, + timeless = true, + %primary_row_nr, %secondary_row_nr, + "found cell", + ); + cells[i] = Some(cell.clone() /* shallow */); + } + } + } -impl ComponentBucket { - /// Returns the name of the component stored in this bucket. - #[allow(dead_code)] - pub fn name(&self) -> &str { - &self.name + Some(cells) } - /// Returns a shallow clone of the row data present at the given `row_idx`. - pub fn get(&self, row_idx: RowIndex) -> Option> { - let row_idx = row_idx.as_u64() - self.row_offset; - // This has to be safe to unwrap, otherwise it would never have made it past insertion. - if self.archived { - debug_assert_eq!(self.chunks.len(), 1); - let list = self.chunks[0] - .as_any() - .downcast_ref::>() - .unwrap(); - (row_idx < list.len() as u64).then(|| list.value(row_idx as _)) - } else { - self.chunks.get(row_idx as usize).map(|chunk| { - chunk - .as_any() - .downcast_ref::>() - .unwrap() - .value(0) - }) + /// Returns an empty iterator if no data could be found for any reason. + pub fn range( + &self, + components: [ComponentName; N], + ) -> impl Iterator; N])> + '_ { + // Early-exit if the table is unaware of any of our components of interest. + if components + .iter() + .all(|component| self.columns.get(component).is_none()) + { + return itertools::Either::Right(std::iter::empty()); } - } - /// Returns a shallow clone of all the chunks in this bucket. - #[allow(dead_code)] - pub fn data(&self) -> Vec> { - self.chunks.clone() // shallow - } + // Beware! This merely measures the time it takes to gather all the necessary metadata + // for building the returned iterator. + crate::profile_function!(); + + // TODO(cmc): Cloning these is obviously not great and will need to be addressed at + // some point. + // But, really, it's not _that_ bad either: these are either integers or erased pointers, + // and e.g. with the default configuration there are only 1024 of them (times the number + // of components). + let row_id = self.col_row_id.clone(); + let columns = self.columns.clone(); // shallow + + let cells = (0..self.total_rows()).filter_map(move |row_nr| { + let mut cells = [(); N].map(|_| None); + for (i, component) in components.iter().enumerate() { + if let Some(column) = columns.get(component) { + if let Some(cell) = &column[row_nr as usize] { + cells[i] = Some(cell.clone() /* shallow */); + } + } + } + + // We only yield rows that contain data for at least one of the components of + // interest. + if cells.iter().all(Option::is_none) { + return None; + } + + let row_id = row_id[row_nr as usize]; + + trace!( + kind = "range", + ?components, + timeless = true, + %row_nr, + ?cells, + "yielding cells", + ); + + Some((row_id, cells)) + }); - /// Return an iterator over the time ranges in this bucket. - #[allow(dead_code)] - pub fn iter_time_ranges(&self) -> impl Iterator { - self.time_ranges.iter() + itertools::Either::Left(cells) } } diff --git a/crates/re_arrow_store/src/store_sanity.rs b/crates/re_arrow_store/src/store_sanity.rs index f002f1c13d8ce..5a741c57be522 100644 --- a/crates/re_arrow_store/src/store_sanity.rs +++ b/crates/re_arrow_store/src/store_sanity.rs @@ -1,15 +1,36 @@ -use std::collections::BTreeMap; +use re_log_types::{ + ComponentName, DataCellColumn, COLUMN_NUM_INSTANCES, COLUMN_ROW_ID, COLUMN_TIMEPOINT, +}; -use anyhow::{anyhow, ensure}; -use nohash_hasher::IntMap; -use re_log_types::{TimeInt, Timeline}; +use crate::{DataStore, IndexedBucket, IndexedBucketInner, IndexedTable, PersistentIndexedTable}; -use crate::{ - ComponentBucket, ComponentTable, DataStore, IndexBucket, IndexBucketIndices, IndexTable, - PersistentComponentTable, PersistentIndexTable, -}; +// --- + +#[derive(thiserror::Error, Debug)] +pub enum SanityError { + #[error("Column '{component}' has too few/many rows: got {got} instead of {expected}")] + ColumnLengthMismatch { + component: ComponentName, + expected: u64, + got: u64, + }, + + #[error("Couldn't find any column for the configured cluster key ('{cluster_key}')")] + ClusterColumnMissing { cluster_key: ComponentName }, + + #[error("The cluster column must be dense, found holes: {cluster_column:?}")] + ClusterColumnSparse { cluster_column: Box }, + + #[error("Found overlapping indexed buckets: {t1_max_formatted} ({t1_max}) <-> {t2_max_formatted} ({t2_max})")] + OverlappingBuckets { + t1_max: i64, + t1_max_formatted: String, + t2_max: i64, + t2_max_formatted: String, + }, +} -// TODO(#527): Typed errors. +pub type SanityResult = ::std::result::Result; // --- Data store --- @@ -20,160 +41,95 @@ impl DataStore { pub fn sanity_check(&self) -> anyhow::Result<()> { crate::profile_function!(); - // Row indices should be continuous across all index tables. - if self.gc_id == 0 { - let mut row_indices: IntMap<_, Vec> = IntMap::default(); - for table in self.indices.values() { - for bucket in table.buckets.values() { - for (comp, index) in &bucket.indices.read().indices { - let row_indices = row_indices.entry(*comp).or_default(); - row_indices.extend(index.iter().flatten().map(|row_idx| row_idx.as_u64())); - } - } - } - - for (comp, mut row_indices) in row_indices { - // Not an actual row index! - if comp == DataStore::insert_id_key() { - continue; - } - - row_indices.sort(); - row_indices.dedup(); - for pair in row_indices.windows(2) { - let &[i1, i2] = pair else { unreachable!() }; - ensure!( - i1 + 1 == i2, - "found hole in index coverage for {comp:?}: \ - in {row_indices:?}, {i1} -> {i2}" - ); - } - } - } - - // Row indices should be continuous across all timeless index tables. - { - let mut row_indices: IntMap<_, Vec> = IntMap::default(); - for table in self.timeless_indices.values() { - for (comp, index) in &table.indices { - let row_indices = row_indices.entry(*comp).or_default(); - row_indices.extend(index.iter().flatten().map(|row_idx| row_idx.as_u64())); - } - } - - for (comp, mut row_indices) in row_indices { - // Not an actual row index! - if comp == DataStore::insert_id_key() { - continue; - } - - row_indices.sort(); - row_indices.dedup(); - for pair in row_indices.windows(2) { - let &[i1, i2] = pair else { unreachable!() }; - ensure!( - i1 + 1 == i2, - "found hole in timeless index coverage for {comp:?}: \ - in {row_indices:?}, {i1} -> {i2}" - ); - } - } - } - - for table in self.timeless_indices.values() { - table.sanity_check()?; - } - for table in self.timeless_components.values() { + for table in self.timeless_tables.values() { table.sanity_check()?; } - for table in self.indices.values() { - table.sanity_check()?; - } - for table in self.components.values() { + for table in self.tables.values() { table.sanity_check()?; } Ok(()) } - - /// The oldest time for which we have any data. - /// - /// Ignores timeless data. - /// - /// Useful to call after a gc. - pub fn oldest_time_per_timeline(&self) -> BTreeMap { - crate::profile_function!(); - - let mut oldest_time_per_timeline = BTreeMap::default(); - - for component_table in self.components.values() { - for bucket in &component_table.buckets { - for (timeline, time_range) in &bucket.time_ranges { - let entry = oldest_time_per_timeline - .entry(*timeline) - .or_insert(TimeInt::MAX); - *entry = time_range.min.min(*entry); - } - } - } - - oldest_time_per_timeline - } } // --- Persistent Indices --- -impl PersistentIndexTable { +impl PersistentIndexedTable { /// Runs the sanity check suite for the entire table. /// /// Returns an error if anything looks wrong. - pub fn sanity_check(&self) -> anyhow::Result<()> { + pub fn sanity_check(&self) -> SanityResult<()> { crate::profile_function!(); let Self { ent_path: _, cluster_key, - num_rows, - indices, - all_components: _, + col_insert_id, + col_row_id, + col_num_instances, + columns, + total_size_bytes: _, // TODO } = self; - // All indices should be `Self::num_rows` long. + // All columns should be `Self::num_rows` long. { - for (comp, index) in indices { - let secondary_len = index.len() as u64; - ensure!( - *num_rows == secondary_len, - "found rogue secondary index for {comp:?}: \ - expected {num_rows} rows, got {secondary_len} instead", - ); + let num_rows = self.total_rows(); + + let column_lengths = [ + (!col_insert_id.is_empty()) + .then(|| (DataStore::insert_id_key(), col_insert_id.len())), // + Some((COLUMN_ROW_ID.into(), col_row_id.len())), + Some((COLUMN_NUM_INSTANCES.into(), col_num_instances.len())), + ] + .into_iter() + .flatten() + .chain( + columns + .iter() + .map(|(component, column)| (*component, column.len())), + ) + .map(|(component, len)| (component, len as u64)); + + for (component, len) in column_lengths { + if len != num_rows { + return Err(SanityError::ColumnLengthMismatch { + component, + expected: num_rows, + got: len, + }); + } } } - // The cluster index must be fully dense. + // The cluster column must be fully dense. { - let cluster_idx = indices - .get(cluster_key) - .ok_or_else(|| anyhow!("no index found for cluster key: {cluster_key:?}"))?; - ensure!( - cluster_idx.iter().all(|row| row.is_some()), - "the cluster index ({cluster_key:?}) must be fully dense: \ - got {cluster_idx:?}", - ); + let cluster_column = + columns + .get(cluster_key) + .ok_or(SanityError::ClusterColumnMissing { + cluster_key: *cluster_key, + })?; + if !cluster_column.iter().all(|cell| cell.is_some()) { + return Err(SanityError::ClusterColumnSparse { + cluster_column: cluster_column.clone().into(), + }); + } } + // TODO: recomputing shouldnt change the size! + Ok(()) } } // --- Indices --- -impl IndexTable { +impl IndexedTable { /// Runs the sanity check suite for the entire table. /// /// Returns an error if anything looks wrong. - pub fn sanity_check(&self) -> anyhow::Result<()> { + pub fn sanity_check(&self) -> SanityResult<()> { crate::profile_function!(); // No two buckets should ever overlap time-range-wise. @@ -181,18 +137,18 @@ impl IndexTable { let time_ranges = self .buckets .values() - .map(|bucket| bucket.indices.read().time_range) + .map(|bucket| bucket.inner.read().time_range) .collect::>(); for time_ranges in time_ranges.windows(2) { let &[t1, t2] = time_ranges else { unreachable!() }; - ensure!( - t1.max.as_i64() < t2.min.as_i64(), - "found overlapping index buckets: {} ({}) <-> {} ({})", - self.timeline.typ().format(t1.max), - t1.max.as_i64(), - self.timeline.typ().format(t2.min), - t2.min.as_i64(), - ); + if t1.max.as_i64() >= t2.min.as_i64() { + return Err(SanityError::OverlappingBuckets { + t1_max: t1.max.as_i64(), + t1_max_formatted: self.timeline.typ().format(t1.max), + t2_max: t2.max.as_i64(), + t2_max_formatted: self.timeline.typ().format(t2.max), + }); + } } } @@ -205,122 +161,82 @@ impl IndexTable { } } -impl IndexBucket { +impl IndexedBucket { /// Runs the sanity check suite for the entire bucket. /// /// Returns an error if anything looks wrong. - pub fn sanity_check(&self) -> anyhow::Result<()> { + pub fn sanity_check(&self) -> SanityResult<()> { crate::profile_function!(); - let IndexBucketIndices { + let Self { + timeline: _, + cluster_key, + inner, + } = self; + + let IndexedBucketInner { is_sorted: _, time_range: _, - times, - indices, - } = &*self.indices.read(); - - // All indices should contain the exact same number of rows as the time index. + col_time, + col_insert_id, + col_row_id, + col_num_instances, + columns, + total_size_bytes: _, // TODO + } = &*inner.read(); + + // dbg!(&col_time); + // dbg!(&columns); + + // All columns should be `Self::num_rows` long. { - let primary_len = times.len(); - for (comp, index) in indices { - let secondary_len = index.len(); - ensure!( - primary_len == secondary_len, - "found rogue secondary index for {comp:?}: \ - expected {primary_len} rows, got {secondary_len} instead", - ); - } - } - - // The cluster index must be fully dense. - { - let cluster_key = self.cluster_key; - let cluster_idx = indices - .get(&cluster_key) - .ok_or_else(|| anyhow!("no index found for cluster key: {cluster_key:?}"))?; - ensure!( - cluster_idx.iter().all(|row| row.is_some()), - "the cluster index ({cluster_key:?}) must be fully dense: \ - got {cluster_idx:?}", - ); - } - - Ok(()) - } -} - -// --- Persistent Components --- - -impl PersistentComponentTable { - /// Runs the sanity check suite for the entire table. - /// - /// Returns an error if anything looks wrong. - pub fn sanity_check(&self) -> anyhow::Result<()> { - crate::profile_function!(); - - // All chunks should always be dense - { - for chunk in &self.chunks { - ensure!( - chunk.validity().is_none(), - "persistent component chunks should always be dense", - ); + let num_rows = self.total_rows(); + + use itertools::Itertools as _; + let column_lengths = [ + (!col_insert_id.is_empty()) + .then(|| (DataStore::insert_id_key(), col_insert_id.len())), // + Some((COLUMN_TIMEPOINT.into(), col_time.len())), + Some((COLUMN_ROW_ID.into(), col_row_id.len())), + Some((COLUMN_NUM_INSTANCES.into(), col_num_instances.len())), + ] + .into_iter() + .flatten() + .chain( + columns + .iter() + .map(|(component, column)| (*component, column.len())), + ) + .map(|(component, len)| (component, len as u64)) + .collect_vec(); // TODO + + for (component, len) in column_lengths { + if len != num_rows { + return Err(SanityError::ColumnLengthMismatch { + component, + expected: num_rows, + got: len, + }); + } } } - Ok(()) - } -} - -// --- Components --- - -impl ComponentTable { - /// Runs the sanity check suite for the entire table. - /// - /// Returns an error if anything looks wrong. - pub fn sanity_check(&self) -> anyhow::Result<()> { - crate::profile_function!(); - - // No two buckets should ever overlap row-range-wise. + // The cluster column must be fully dense. { - let row_ranges = self - .buckets - .iter() - .map(|bucket| bucket.row_offset..bucket.row_offset + bucket.total_rows()) - .collect::>(); - for row_ranges in row_ranges.windows(2) { - let &[r1, r2] = &row_ranges else { unreachable!() }; - ensure!( - !r1.contains(&r2.start), - "found overlapping component buckets: {r1:?} <-> {r2:?}" - ); + let cluster_column = + columns + .get(cluster_key) + .ok_or(SanityError::ClusterColumnMissing { + cluster_key: *cluster_key, + })?; + if !cluster_column.iter().all(|cell| cell.is_some()) { + return Err(SanityError::ClusterColumnSparse { + cluster_column: cluster_column.clone().into(), + }); } } - for bucket in &self.buckets { - bucket.sanity_check()?; - } - - Ok(()) - } -} - -impl ComponentBucket { - /// Runs the sanity check suite for the entire table. - /// - /// Returns an error if anything looks wrong. - pub fn sanity_check(&self) -> anyhow::Result<()> { - crate::profile_function!(); - - // All chunks should always be dense - { - for chunk in &self.chunks { - ensure!( - chunk.validity().is_none(), - "component bucket chunks should always be dense", - ); - } - } + // TODO: recomputing shouldnt change the size! Ok(()) } diff --git a/crates/re_arrow_store/src/store_stats.rs b/crates/re_arrow_store/src/store_stats.rs index 10111073064a2..fb836133d5da1 100644 --- a/crates/re_arrow_store/src/store_stats.rs +++ b/crates/re_arrow_store/src/store_stats.rs @@ -1,7 +1,4 @@ -use crate::{ - ComponentBucket, ComponentTable, DataStore, DataStoreConfig, IndexBucket, IndexBucketIndices, - IndexTable, PersistentComponentTable, PersistentIndexTable, -}; +use crate::{DataStore, DataStoreConfig, IndexedBucket, IndexedTable, PersistentIndexedTable}; // --- @@ -10,20 +7,13 @@ use crate::{ pub struct DataStoreStats { pub total_timeless_index_rows: u64, pub total_timeless_index_size_bytes: u64, - pub total_timeless_component_rows: u64, - pub total_timeless_component_size_bytes: u64, pub total_temporal_index_rows: u64, pub total_temporal_index_size_bytes: u64, pub total_temporal_index_buckets: u64, - pub total_temporal_component_rows: u64, - pub total_temporal_component_size_bytes: u64, - pub total_temporal_component_buckets: u64, pub total_index_rows: u64, pub total_index_size_bytes: u64, - pub total_component_rows: u64, - pub total_component_size_bytes: u64, pub config: DataStoreConfig, } @@ -34,40 +24,25 @@ impl DataStoreStats { let total_timeless_index_rows = store.total_timeless_index_rows(); let total_timeless_index_size_bytes = store.total_timeless_index_size_bytes(); - let total_timeless_component_rows = store.total_timeless_component_rows(); - let total_timeless_component_size_bytes = store.total_timeless_component_size_bytes(); let total_temporal_index_rows = store.total_temporal_index_rows(); let total_temporal_index_size_bytes = store.total_temporal_index_size_bytes(); let total_temporal_index_buckets = store.total_temporal_index_buckets(); - let total_temporal_component_rows = store.total_temporal_component_rows(); - let total_temporal_component_size_bytes = store.total_temporal_component_size_bytes(); - let total_temporal_component_buckets = store.total_temporal_component_buckets(); let total_index_rows = total_timeless_index_rows + total_temporal_index_rows; let total_index_size_bytes = total_timeless_index_size_bytes + total_temporal_index_size_bytes; - let total_component_rows = total_timeless_component_rows + total_temporal_component_rows; - let total_component_size_bytes = - total_timeless_component_size_bytes + total_temporal_component_size_bytes; Self { total_timeless_index_rows, total_timeless_index_size_bytes, - total_timeless_component_rows, - total_timeless_component_size_bytes, total_temporal_index_rows, total_temporal_index_size_bytes, total_temporal_index_buckets, - total_temporal_component_rows, - total_temporal_component_size_bytes, - total_temporal_component_buckets, total_index_rows, total_index_size_bytes, - total_component_rows, - total_component_size_bytes, config: store.config.clone(), } @@ -78,95 +53,51 @@ impl DataStoreStats { impl DataStore { /// Returns the number of timeless index rows stored across this entire store, i.e. the sum of - /// the number of rows across all of its timeless index tables. + /// the number of rows across all of its timeless indexed tables. + #[inline] pub fn total_timeless_index_rows(&self) -> u64 { crate::profile_function!(); - self.timeless_indices + self.timeless_tables .values() .map(|table| table.total_rows()) .sum() } /// Returns the size of the timeless index data stored across this entire store, i.e. the sum - /// of the size of the data stored across all of its timeless index tables, in bytes. + /// of the size of the data stored across all of its timeless indexed tables, in bytes. + #[inline] pub fn total_timeless_index_size_bytes(&self) -> u64 { crate::profile_function!(); - self.timeless_indices - .values() - .map(|table| table.total_size_bytes()) - .sum() - } - - /// Returns the number of timeless component rows stored across this entire store, i.e. the - /// sum of the number of rows across all of its timeless component tables. - pub fn total_timeless_component_rows(&self) -> u64 { - crate::profile_function!(); - self.timeless_components - .values() - .map(|table| table.total_rows()) - .sum() - } - - /// Returns the size of the timeless component data stored across this entire store, i.e. the - /// sum of the size of the data stored across all of its timeless component tables, in bytes. - pub fn total_timeless_component_size_bytes(&self) -> u64 { - crate::profile_function!(); - self.timeless_components + self.timeless_tables .values() .map(|table| table.total_size_bytes()) .sum() } /// Returns the number of temporal index rows stored across this entire store, i.e. the sum of - /// the number of rows across all of its temporal index tables. + /// the number of rows across all of its temporal indexed tables. + #[inline] pub fn total_temporal_index_rows(&self) -> u64 { crate::profile_function!(); - self.indices.values().map(|table| table.total_rows()).sum() + self.tables.values().map(|table| table.total_rows()).sum() } /// Returns the size of the temporal index data stored across this entire store, i.e. the sum - /// of the size of the data stored across all of its temporal index tables, in bytes. + /// of the size of the data stored across all of its temporal indexed tables, in bytes. + #[inline] pub fn total_temporal_index_size_bytes(&self) -> u64 { crate::profile_function!(); - self.indices + self.tables .values() .map(|table| table.total_size_bytes()) .sum() } - /// Returns the number of temporal index buckets stored across this entire store. + /// Returns the number of temporal indexed buckets stored across this entire store. + #[inline] pub fn total_temporal_index_buckets(&self) -> u64 { crate::profile_function!(); - self.indices - .values() - .map(|table| table.total_buckets()) - .sum() - } - - /// Returns the number of temporal component rows stored across this entire store, i.e. the - /// sum of the number of rows across all of its temporal component tables. - pub fn total_temporal_component_rows(&self) -> u64 { - crate::profile_function!(); - self.components - .values() - .map(|table| table.total_rows()) - .sum() - } - - /// Returns the size of the temporal component data stored across this entire store, i.e. the - /// sum of the size of the data stored across all of its temporal component tables, in bytes. - pub fn total_temporal_component_size_bytes(&self) -> u64 { - crate::profile_function!(); - self.components - .values() - .map(|table| table.total_size_bytes()) - .sum() - } - - /// Returns the number of temporal component buckets stored across this entire store. - pub fn total_temporal_component_buckets(&self) -> u64 { - crate::profile_function!(); - self.components + self.tables .values() .map(|table| table.total_buckets()) .sum() @@ -175,278 +106,54 @@ impl DataStore { // --- Persistent Indices --- -impl PersistentIndexTable { +impl PersistentIndexedTable { /// Returns the number of rows stored across this table. + #[inline] pub fn total_rows(&self) -> u64 { - self.num_rows + self.col_num_instances.len() as _ } /// Returns the size of the data stored across this table, in bytes. + #[inline] pub fn total_size_bytes(&self) -> u64 { - self.indices - .values() - .map(|index| std::mem::size_of_val(index.as_slice()) as u64) - .sum::() + self.total_size_bytes } } // --- Indices --- -impl IndexTable { +impl IndexedTable { /// Returns the number of rows stored across this entire table, i.e. the sum of the number /// of rows stored across all of its buckets. - pub fn total_rows(&self) -> u64 { - self.buckets - .values() - .map(|bucket| bucket.total_rows()) - .sum() - } - - /// Returns the size of data stored across this entire table, i.e. the sum of the size of - /// the data stored across all of its buckets, in bytes. - pub fn total_size_bytes(&self) -> u64 { - self.buckets - .values() - .map(|bucket| bucket.total_size_bytes()) - .sum() - } - - /// Returns the number of buckets stored across this entire table. - pub fn total_buckets(&self) -> u64 { - self.buckets.len() as _ - } -} - -impl IndexBucket { - /// Returns the number of rows stored across this bucket. - pub fn total_rows(&self) -> u64 { - self.indices.read().times.len() as u64 - } - - /// Returns the size of the data stored across this bucket, in bytes. - pub fn total_size_bytes(&self) -> u64 { - let IndexBucketIndices { - is_sorted: _, - time_range: _, - times, - indices, - } = &*self.indices.read(); - - std::mem::size_of_val(times.as_slice()) as u64 - + indices - .values() - .map(|index| std::mem::size_of_val(index.as_slice()) as u64) - .sum::() - } -} - -// --- Persistent Components --- - -impl PersistentComponentTable { - /// Returns the number of rows stored across this table. + #[inline] pub fn total_rows(&self) -> u64 { self.total_rows } - /// Returns the size of the data stored across this table, in bytes. - pub fn total_size_bytes(&self) -> u64 { - self.total_size_bytes - } -} - -// --- Components --- - -impl ComponentTable { - /// Returns the number of rows stored across this entire table, i.e. the sum of the number - /// of rows stored across all of its buckets. - pub fn total_rows(&self) -> u64 { - self.buckets.iter().map(|bucket| bucket.total_rows()).sum() - } - /// Returns the size of data stored across this entire table, i.e. the sum of the size of /// the data stored across all of its buckets, in bytes. + #[inline] pub fn total_size_bytes(&self) -> u64 { - self.buckets - .iter() - .map(|bucket| bucket.total_size_bytes()) - .sum() + self.total_size_bytes } /// Returns the number of buckets stored across this entire table. + #[inline] pub fn total_buckets(&self) -> u64 { self.buckets.len() as _ } } -impl ComponentBucket { +impl IndexedBucket { /// Returns the number of rows stored across this bucket. + #[inline] pub fn total_rows(&self) -> u64 { - self.total_rows + self.inner.read().col_time.len() as u64 } /// Returns the size of the data stored across this bucket, in bytes. + #[inline] pub fn total_size_bytes(&self) -> u64 { - self.total_size_bytes - } -} - -// This test exists because the documentation and online discussions revolving around -// arrow2's `estimated_bytes_size()` function indicate that there's a lot of limitations and -// edge cases to be aware of. -// -// Also, it's just plain hard to be sure that the answer you get is the answer you're looking -// for with these kinds of tools. When in doubt.. test everything we're going to need from it. -// -// In many ways, this is a specification of what we mean when we ask "what's the size of this -// Arrow array?". -#[test] -#[allow(clippy::from_iter_instead_of_collect)] -fn test_arrow_estimated_size_bytes() { - use arrow2::{ - array::{Array, Float64Array, ListArray, StructArray, UInt64Array, Utf8Array}, - compute::aggregate::estimated_bytes_size, - datatypes::{DataType, Field}, - offset::Offsets, - }; - - // simple primitive array - { - let data = vec![42u64; 100]; - let array = UInt64Array::from_vec(data.clone()).boxed(); - assert_eq!( - std::mem::size_of_val(data.as_slice()), - estimated_bytes_size(&*array) - ); - } - - // utf8 strings array - { - let data = vec![Some("some very, very, very long string indeed"); 100]; - let array = Utf8Array::::from(data.clone()).to_boxed(); - - let raw_size_bytes = data - .iter() - // headers + bodies! - .map(|s| std::mem::size_of_val(s) + std::mem::size_of_val(s.unwrap().as_bytes())) - .sum::(); - let arrow_size_bytes = estimated_bytes_size(&*array); - - assert_eq!(5600, raw_size_bytes); - assert_eq!(4404, arrow_size_bytes); // smaller because validity bitmaps instead of opts - } - - // simple primitive list array - { - let data = std::iter::repeat(vec![42u64; 100]) - .take(50) - .collect::>(); - let array = { - let array_flattened = - UInt64Array::from_vec(data.clone().into_iter().flatten().collect()).boxed(); - - ListArray::::new( - ListArray::::default_datatype(DataType::UInt64), - Offsets::try_from_lengths(std::iter::repeat(50).take(50)) - .unwrap() - .into(), - array_flattened, - None, - ) - .boxed() - }; - - let raw_size_bytes = data - .iter() - // headers + bodies! - .map(|s| std::mem::size_of_val(s) + std::mem::size_of_val(s.as_slice())) - .sum::(); - let arrow_size_bytes = estimated_bytes_size(&*array); - - assert_eq!(41200, raw_size_bytes); - assert_eq!(40200, arrow_size_bytes); // smaller because smaller inner headers - } - - // compound type array - { - #[derive(Clone, Copy)] - struct Point { - x: f64, - y: f64, - } - - impl Default for Point { - fn default() -> Self { - Self { x: 42.0, y: 666.0 } - } - } - - let data = vec![Point::default(); 100]; - let array = { - let x = Float64Array::from_vec(data.iter().map(|p| p.x).collect()).boxed(); - let y = Float64Array::from_vec(data.iter().map(|p| p.y).collect()).boxed(); - let fields = vec![ - Field::new("x", DataType::Float64, false), - Field::new("y", DataType::Float64, false), - ]; - StructArray::new(DataType::Struct(fields), vec![x, y], None).boxed() - }; - - let raw_size_bytes = std::mem::size_of_val(data.as_slice()); - let arrow_size_bytes = estimated_bytes_size(&*array); - - assert_eq!(1600, raw_size_bytes); - assert_eq!(1600, arrow_size_bytes); - } - - // compound type list array - { - #[derive(Clone, Copy)] - struct Point { - x: f64, - y: f64, - } - - impl Default for Point { - fn default() -> Self { - Self { x: 42.0, y: 666.0 } - } - } - - let data = std::iter::repeat(vec![Point::default(); 100]) - .take(50) - .collect::>(); - let array: Box = { - let array = { - let x = - Float64Array::from_vec(data.iter().flatten().map(|p| p.x).collect()).boxed(); - let y = - Float64Array::from_vec(data.iter().flatten().map(|p| p.y).collect()).boxed(); - let fields = vec![ - Field::new("x", DataType::Float64, false), - Field::new("y", DataType::Float64, false), - ]; - StructArray::new(DataType::Struct(fields), vec![x, y], None) - }; - - ListArray::::new( - ListArray::::default_datatype(array.data_type().clone()), - Offsets::try_from_lengths(std::iter::repeat(50).take(50)) - .unwrap() - .into(), - array.boxed(), - None, - ) - .boxed() - }; - - let raw_size_bytes = data - .iter() - // headers + bodies! - .map(|s| std::mem::size_of_val(s) + std::mem::size_of_val(s.as_slice())) - .sum::(); - let arrow_size_bytes = estimated_bytes_size(&*array); - - assert_eq!(81200, raw_size_bytes); - assert_eq!(80200, arrow_size_bytes); // smaller because smaller inner headers + self.inner.read().total_size_bytes } } diff --git a/crates/re_arrow_store/src/store_write.rs b/crates/re_arrow_store/src/store_write.rs index 1585ac537021f..01177b0289ef9 100644 --- a/crates/re_arrow_store/src/store_write.rs +++ b/crates/re_arrow_store/src/store_write.rs @@ -1,23 +1,26 @@ use arrow2::datatypes::DataType; use itertools::Itertools as _; -use nohash_hasher::IntMap; +use nohash_hasher::{IntMap, IntSet}; use parking_lot::RwLock; +use smallvec::SmallVec; use re_log::{debug, trace}; use re_log_types::{ - component_types::InstanceKey, ComponentName, DataCell, DataCellError, DataRow, DataTable, - EntityPath, MsgId, TimeInt, TimePoint, TimeRange, Timeline, + component_types::InstanceKey, ComponentName, DataCell, DataCellColumn, DataCellError, DataRow, + DataTable, EntityPath, TimeInt, TimeRange, }; use crate::{ - ComponentBucket, ComponentTable, DataStore, DataStoreConfig, IndexBucket, IndexBucketIndices, - IndexTable, PersistentComponentTable, PersistentIndexTable, RowIndex, RowIndexKind, TimeIndex, + DataStore, DataStoreConfig, IndexedBucket, IndexedBucketInner, IndexedTable, + PersistentIndexedTable, }; // TODO(#1619): // - The store should insert column-per-column rather than row-per-row (purely a performance // matter) -// - The store shouldn't ever deal with raw arrow arrays, use cells/rows/tables instead +// - None of these APIs should be taking references to cells?? (LogMsg storage...) + +// TODO: incremental size bytes? // --- Data store --- @@ -26,7 +29,6 @@ pub enum WriteError { #[error("Error with one or more the underlying data cells")] DataCell(#[from] DataCellError), - // Clustering key #[error("The cluster component must be dense, got {0:?}")] SparseClusteringComponent(DataCell), @@ -36,7 +38,16 @@ pub enum WriteError { )] InvalidClusteringComponent(DataCell), - // Misc + #[error( + "Component '{component}' failed to typecheck: expected {expected:#?} but got {got:#?}" + )] + TypeCheck { + component: ComponentName, + expected: DataType, + got: DataType, + }, + + // TODO #[error("Other error")] Other(#[from] anyhow::Error), } @@ -64,7 +75,7 @@ impl DataStore { /// /// If the bundle doesn't carry a payload for the cluster key, one will be auto-generated /// based on the length of the components in the payload, in the form of an array of - /// monotonically increasing u64s going from `0` to `N-1`. + /// monotonically increasing `u64`s going from `0` to `N-1`. pub fn insert_row(&mut self, row: &DataRow) -> WriteResult<()> { // TODO(cmc): kind & insert_id need to somehow propagate through the span system. self.insert_id += 1; @@ -75,15 +86,42 @@ impl DataStore { crate::profile_function!(); + // Update type registry and do typechecking if enabled + if self.config.enable_typecheck { + for cell in row.cells().iter() { + use std::collections::hash_map::Entry; + match self.type_registry.entry(cell.component_name()) { + Entry::Occupied(entry) => { + if entry.get() != cell.datatype() { + return Err(WriteError::TypeCheck { + component: cell.component_name(), + expected: entry.get().clone(), + got: cell.datatype().clone(), + }); + } + } + Entry::Vacant(entry) => { + entry.insert(cell.datatype().clone()); + } + } + } + } else { + for cell in row.cells().iter() { + self.type_registry + .insert(cell.component_name(), cell.datatype().clone()); + } + } + let DataRow { row_id, timepoint, entity_path: ent_path, - num_instances: _, + num_instances, cells, } = row; let ent_path_hash = ent_path.hash(); + let num_instances = *num_instances; trace!( kind = "insert", @@ -102,150 +140,8 @@ impl DataStore { .find_position(|cell| cell.component_name() == self.cluster_key) .map(|(pos, _)| pos); - if timepoint.is_timeless() { - let mut row_indices = IntMap::default(); - - self.insert_timeless_row_helper(cluster_cell_pos, cells, &mut row_indices)?; - - let index = self - .timeless_indices - .entry(ent_path_hash) - .or_insert_with(|| PersistentIndexTable::new(self.cluster_key, ent_path.clone())); - index.insert(&row_indices)?; - } else { - let mut row_indices = IntMap::default(); - - self.insert_row_helper(timepoint, cluster_cell_pos, cells, &mut row_indices)?; - - for (timeline, time) in timepoint.iter() { - let ent_path = ent_path.clone(); // shallow - let index = self - .indices - .entry((*timeline, ent_path_hash)) - .or_insert_with(|| IndexTable::new(self.cluster_key, *timeline, ent_path)); - index.insert(&self.config, *time, &row_indices)?; - } - } - - // This is valuable information, even for a timeless timepoint! - self.messages.insert(*row_id, timepoint.clone()); - - Ok(()) - } - - fn insert_timeless_row_helper( - &mut self, - cluster_cell_pos: Option, - cells: &[DataCell], - row_indices: &mut IntMap, - ) -> WriteResult<()> { - crate::profile_function!(); - - let cluster_row_idx = - self.get_or_create_cluster_component(cluster_cell_pos, cells, &TimePoint::default())?; - - // Always insert the cluster component. - row_indices.insert(self.cluster_key, cluster_row_idx); - - if self.config.store_insert_ids { - // Store the ID of the write request alongside the data. - // - // This is _not_ an actual `RowIndex`, there isn't even a component table associated - // with insert IDs! - // We're just abusing the fact that any value we push here as a `RowIndex` will end up - // as-is in the index. - row_indices.insert( - Self::insert_id_key(), - RowIndex::from_u63(RowIndexKind::Temporal, self.insert_id), - ); - } - - for cell in cells - .iter() - .filter(|cell| cell.component_name() != self.cluster_key) - { - let component = cell.component_name(); - - let table = self - .timeless_components - .entry(cell.component_name()) - .or_insert_with(|| PersistentComponentTable::new(component, cell.datatype())); - - let row_idx = table.push_cell(cell); - row_indices.insert(component, row_idx); - } - - Ok(()) - } - - fn insert_row_helper( - &mut self, - time_point: &TimePoint, - cluster_cell_pos: Option, - cells: &[DataCell], - row_indices: &mut IntMap, - ) -> WriteResult<()> { - crate::profile_function!(); - - let cluster_row_idx = - self.get_or_create_cluster_component(cluster_cell_pos, cells, time_point)?; - - // Always insert the cluster component. - row_indices.insert(self.cluster_key, cluster_row_idx); - - if self.config.store_insert_ids { - // Store the ID of the write request alongside the data. - // - // This is _not_ an actual `RowIndex`, there isn't even a component table associated - // with insert IDs! - // We're just abusing the fact that any value we push here as a `RowIndex` will end up - // as-is in the index. - row_indices.insert( - Self::insert_id_key(), - RowIndex::from_u63(RowIndexKind::Temporal, self.insert_id), - ); - } - - for cell in cells - .iter() - .filter(|cell| cell.component_name() != self.cluster_key) - { - let component = cell.component_name(); - - let table = self - .components - .entry(component) - .or_insert_with(|| ComponentTable::new(component, cell.datatype())); - - let row_idx = table.push_cell(&self.config, time_point, cell); - row_indices.insert(component, row_idx); - } - - Ok(()) - } - - /// Tries to find the cluster component for the current row, or creates it if the caller hasn't - /// specified any. - /// - /// When creating an auto-generated cluster component of a specific length for the first time, - /// this will keep track of its assigned row index and re-use it later on as a mean of - /// deduplication. - fn get_or_create_cluster_component( - &mut self, - cluster_cell_pos: Option, - cells: &[DataCell], - time_point: &TimePoint, - ) -> WriteResult { - crate::profile_function!(); - - enum ClusterData<'a> { - Cached(RowIndex), - GenData(DataCell), - UserData(&'a DataCell), - } - - let (cluster_len, cluster_data) = if let Some(cluster_cell_pos) = cluster_cell_pos { - // We found a component with a name matching the cluster key's, let's make sure it's + let generated_cluster_cell = if let Some(cluster_cell_pos) = cluster_cell_pos { + // We found a column with a name matching the cluster key's, let's make sure it's // valid (dense, sorted, no duplicates) and use that if so. let cluster_cell = &cells[cluster_cell_pos]; @@ -259,191 +155,120 @@ impl DataStore { return Err(WriteError::InvalidClusteringComponent(cluster_cell.clone())); } - ( - cluster_cell.num_instances(), - ClusterData::UserData(cluster_cell), - ) + None } else { // The caller has not specified any cluster component, and so we'll have to generate // one... unless we've already generated one of this exact length in the past, - // in which case we can simply re-use that row index. - - // Use the length of any other component in the batch, they are guaranteed to all - // share the same length at this point anyway. - let len = cells.first().map_or(0, |comp| comp.num_instances()); + // in which case we can simply re-use that cell. - if let Some(row_idx) = self.cluster_comp_cache.get(&len) { - // Cache hit! Re-use that row index. - (len, ClusterData::Cached(*row_idx)) - } else { - // Cache miss! Craft a new instance keys from the ground up. - - // TODO(#1712): That's exactly how one should create a cell of instance keys... but - // it turns out that running `TryIntoArrow` on a primitive type is orders of - // magnitude slower than manually creating the equivalent primitive array for some - // reason... - // let cell = DataCell::from_component::(0..len as u64); - - // ...so we create it manually instead. - use re_log_types::Component as _; - let values = - arrow2::array::UInt64Array::from_vec((0..len as u64).collect_vec()).boxed(); - let cell = DataCell::from_arrow(InstanceKey::name(), values); - - (len, ClusterData::GenData(cell)) - } + Some(self.generate_cluster_cell(num_instances)) }; - match cluster_data { - ClusterData::Cached(row_idx) => Ok(row_idx), - ClusterData::GenData(cell) => { - // We had to generate a cluster component of the given length for the first time, - // let's store it forever. - - let table = self - .timeless_components - .entry(self.cluster_key) - .or_insert_with(|| { - PersistentComponentTable::new(self.cluster_key, cell.datatype()) - }); - let row_idx = table.push_cell(&cell); + let insert_id = self.config.store_insert_ids.then_some(self.insert_id); - self.cluster_comp_cache.insert(cluster_len, row_idx); + if timepoint.is_timeless() { + let index = self + .timeless_tables + .entry(ent_path_hash) + .or_insert_with(|| PersistentIndexedTable::new(self.cluster_key, ent_path.clone())); - Ok(row_idx) - } - ClusterData::UserData(cell) => { - // If we didn't hit the cache, then we have to insert this cluster component in - // the right tables, just like any other component. - - let row_idx = if time_point.is_timeless() { - let table = self - .timeless_components - .entry(self.cluster_key) - .or_insert_with(|| { - PersistentComponentTable::new(self.cluster_key, cell.datatype()) - }); - table.push_cell(cell) - } else { - let table = self - .components - .entry(self.cluster_key) - .or_insert_with(|| ComponentTable::new(self.cluster_key, cell.datatype())); - table.push_cell(&self.config, time_point, cell) - }; - - Ok(row_idx) + index.insert_row(insert_id, generated_cluster_cell, row); + } else { + for (timeline, time) in timepoint.iter() { + let ent_path = ent_path.clone(); // shallow + let index = self + .tables + .entry((*timeline, ent_path_hash)) + .or_insert_with(|| IndexedTable::new(self.cluster_key, *timeline, ent_path)); + + index.insert_row( + &self.config, + insert_id, + *time, + generated_cluster_cell.clone(), /* shallow */ + row, + ); } } - } - - pub fn clear_msg_metadata(&mut self, drop_msg_ids: &ahash::HashSet) { - crate::profile_function!(); - - self.messages - .retain(|msg_id, _| !drop_msg_ids.contains(msg_id)); - } -} -// --- Persistent Indices --- + // This is valuable information even for a timeless timepoint! + self.metadata_registry.insert(*row_id, timepoint.clone()); -impl PersistentIndexTable { - pub fn new(cluster_key: ComponentName, ent_path: EntityPath) -> Self { - Self { - cluster_key, - ent_path, - indices: Default::default(), - num_rows: 0, - all_components: Default::default(), - } + Ok(()) } - #[allow(clippy::unnecessary_wraps)] - pub fn insert(&mut self, row_indices: &IntMap) -> anyhow::Result<()> { + /// Auto-generates an approriate cluster cell for the specified number of instances and + /// transparently handles caching. + fn generate_cluster_cell(&mut self, num_instances: u32) -> DataCell { crate::profile_function!(); - // 2-way merge, step1: left-to-right - // - // push new row indices to their associated secondary index - for (name, row_idx) in row_indices { - let index = self - .indices - .entry(*name) - .or_insert_with(|| vec![None; self.num_rows as usize]); - index.push(Some(*row_idx)); - } + if let Some(cell) = self.cluster_cell_cache.get(&num_instances) { + // Cache hit! - // 2-way merge, step2: right-to-left - // - // fill unimpacted secondary indices with null values - for (name, index) in &mut self.indices { - if !row_indices.contains_key(name) { - index.push(None); - } - } + cell.clone() // shallow + } else { + // Cache miss! Craft a new instance keys from the ground up. - self.num_rows += 1; + // TODO(#1712): That's exactly how one should create a cell of instance keys... + // but it turns out that running `TryIntoArrow` on a primitive type is orders of + // magnitude slower than manually creating the equivalent primitive array for some + // reason... + // let cell = DataCell::from_component::(0..len as u64); - #[cfg(debug_assertions)] - self.sanity_check().unwrap(); + // ...so we create it manually instead. + use re_log_types::Component as _; + let values = + arrow2::array::UInt64Array::from_vec((0..num_instances as u64).collect_vec()) + .boxed(); + let cell = DataCell::from_arrow(InstanceKey::name(), values); - // Insert components last, only if bucket-insert succeeded. - self.all_components.extend(row_indices.keys()); + self.cluster_cell_cache + .insert(num_instances, cell.clone() /* shallow */); - Ok(()) + cell + } } } -// --- Indices --- +// --- Temporal --- -impl IndexTable { - pub fn new(cluster_key: ComponentName, timeline: Timeline, ent_path: EntityPath) -> Self { - Self { - timeline, - ent_path, - buckets: [(i64::MIN.into(), IndexBucket::new(cluster_key, timeline))].into(), - cluster_key, - all_components: Default::default(), - } - } - - pub fn insert( +impl IndexedTable { + pub fn insert_row( &mut self, config: &DataStoreConfig, + insert_id: Option, time: TimeInt, - indices: &IntMap, - ) -> anyhow::Result<()> { + generated_cluster_cell: Option, + row: &DataRow, + ) { crate::profile_function!(); + let components: IntSet<_> = row.component_names().collect(); + // borrowck workaround let timeline = self.timeline; let ent_path = self.ent_path.clone(); // shallow let (_, bucket) = self.find_bucket_mut(time); - let size = bucket.total_size_bytes(); - let size_overflow = bucket.total_size_bytes() > config.index_bucket_size_bytes; - let len = bucket.total_rows(); - let len_overflow = len > config.index_bucket_nb_rows; + let len_overflow = len > config.indexed_bucket_num_rows; - if size_overflow || len_overflow { + if len_overflow { if let Some((min, second_half)) = bucket.split() { trace!( kind = "insert", timeline = %timeline.name(), time = timeline.typ().format(time), entity = %ent_path, - size_limit = config.component_bucket_size_bytes, - len_limit = config.component_bucket_nb_rows, - size, size_overflow, + len_limit = config.indexed_bucket_num_rows, len, len_overflow, new_time_bound = timeline.typ().format(min), - "splitting off index bucket following overflow" + "splitting off indexed bucket following overflow" ); self.buckets.insert(min, second_half); - return self.insert(config, time, indices); + return self.insert_row(config, insert_id, time, generated_cluster_cell, row); } // We couldn't split the bucket, either because it's already too small, or because it @@ -462,16 +287,16 @@ impl IndexTable { // covers a time range which includes this timepoint (if such a bucket existed, then // we would have stumbled upon it before ever finding the current one!). // This gives us an opportunity to create a new bucket that starts at the upper - // bound of the current one _excluded_ and that ranges all the way up to the timepoint - // that we're inserting. + // bound of the current one _excluded_ and that ranges all the way up to the + // timepoint that we're inserting. // Not only is this a great opportunity to naturally split things up, it's actually // mandatory to avoid a nasty edge case where one keeps inserting into a full, // unsplittable bucket and indefinitely creates new single-entry buckets, leading // to the worst-possible case of fragmentation. let (bucket_upper_bound, bucket_len) = { - let guard = bucket.indices.read(); - (guard.times.last().copied(), guard.times.len()) + let guard = bucket.inner.read(); + (guard.col_time.last().copied(), guard.col_time.len()) }; if let Some(upper_bound) = bucket_upper_bound { @@ -482,27 +307,23 @@ impl IndexTable { timeline = %timeline.name(), time = timeline.typ().format(time), entity = %ent_path, - size_limit = config.component_bucket_size_bytes, - len_limit = config.component_bucket_nb_rows, - size, size_overflow, + len_limit = config.indexed_bucket_num_rows, len, len_overflow, new_time_bound = timeline.typ().format(new_time_bound.into()), - "creating brand new index bucket following overflow" + "creating brand new indexed bucket following overflow" ); self.buckets.insert( (new_time_bound).into(), - IndexBucket { + IndexedBucket { timeline, - indices: RwLock::new(IndexBucketIndices { - is_sorted: true, + cluster_key: self.cluster_key, + inner: RwLock::new(IndexedBucketInner { time_range: TimeRange::new(time, time), - times: Default::default(), - indices: Default::default(), + ..Default::default() }), - cluster_key: self.cluster_key, }, ); - return self.insert(config, time, indices); + return self.insert_row(config, insert_id, time, generated_cluster_cell, row); } } @@ -511,11 +332,9 @@ impl IndexTable { timeline = %timeline.name(), time = timeline.typ().format(time), entity = %ent_path, - size_limit = config.component_bucket_size_bytes, - len_limit = config.component_bucket_nb_rows, - size, size_overflow, + len_limit = config.indexed_bucket_num_rows, len, len_overflow, - "couldn't split index bucket, proceeding to ignore limits" + "couldn't split indexed bucket, proceeding to ignore limits" ); } @@ -524,66 +343,82 @@ impl IndexTable { timeline = %timeline.name(), time = timeline.typ().format(time), entity = %ent_path, - components = ?indices.iter().collect::>(), - "inserted into index table" + ?components, + "inserted into indexed tables" ); - bucket.insert(time, indices)?; + bucket.insert_row(insert_id, time, generated_cluster_cell, row, &components); // Insert components last, only if bucket-insert succeeded. - self.all_components.extend(indices.keys()); - - Ok(()) + self.all_components.extend(components); } } -impl IndexBucket { - pub fn new(cluster_key: ComponentName, timeline: Timeline) -> Self { - Self { - timeline, - indices: RwLock::new(IndexBucketIndices::default()), - cluster_key, - } - } - - #[allow(clippy::unnecessary_wraps)] - pub fn insert( +impl IndexedBucket { + fn insert_row( &mut self, + insert_id: Option, time: TimeInt, - row_indices: &IntMap, - ) -> anyhow::Result<()> { + generated_cluster_cell: Option, + row: &DataRow, + components: &IntSet, + ) { crate::profile_function!(); - let mut guard = self.indices.write(); - let IndexBucketIndices { + let num_rows = self.total_rows() as usize; + + let mut guard = self.inner.write(); + let IndexedBucketInner { is_sorted, time_range, - times, - indices, + col_time, + col_insert_id, + col_row_id, + col_num_instances, + columns, + total_size_bytes: _, // TODO } = &mut *guard; - // append time to primary index and update time range appropriately - times.push(time.as_i64()); + // append time to primary column and update time range appropriately + col_time.push(time.as_i64()); *time_range = TimeRange::new(time_range.min.min(time), time_range.max.max(time)); - // append components to secondary indices (2-way merge) + // update all control columns + if let Some(insert_id) = insert_id { + col_insert_id.push(insert_id); + } + col_row_id.push(row.row_id()); + col_num_instances.push(row.num_instances()); + + // insert auto-generated cluster cell if present + if let Some(cluster_cell) = generated_cluster_cell { + let column = columns + .entry(cluster_cell.component_name()) + .or_insert_with(|| DataCellColumn::empty(num_rows)); + column.0.push(Some(cluster_cell)); + } - // 2-way merge, step1: left-to-right - // - // push new row indices to their associated secondary index - for (name, row_idx) in row_indices { - let index = indices - .entry(*name) - .or_insert_with(|| vec![None; times.len().saturating_sub(1)]); - index.push(Some(*row_idx)); + // append components to their respective columns (2-way merge) + + // 2-way merge, step 1: left-to-right + for cell in row.cells().iter() { + let column = columns + .entry(cell.component_name()) + .or_insert_with(|| DataCellColumn::empty(col_time.len().saturating_sub(1))); + column.0.push(Some(cell.clone() /* shallow */)); // TODO } - // 2-way merge, step2: right-to-left + // 2-way merge, step 2: right-to-left // - // fill unimpacted secondary indices with null values - for (name, index) in &mut *indices { - if !row_indices.contains_key(name) { - index.push(None); + // fill unimpacted columns with null values + for (component, column) in &mut *columns { + // The cluster key always gets added one way or another, don't try to force fill it! + if *component == self.cluster_key { + continue; + } + + if !components.contains(component) { + column.0.push(None); } } @@ -595,8 +430,6 @@ impl IndexBucket { drop(guard); // sanity checking will grab the lock! self.sanity_check().unwrap(); } - - Ok(()) } /// Splits the bucket into two, potentially uneven parts. @@ -612,21 +445,22 @@ impl IndexBucket { /// /// # Unsplittable buckets /// - /// The datastore and query path operate under the general assumption that _all of the - /// index data_ for a given timepoint will reside in _one and only one_ bucket. + /// The datastore and query path operate under the general assumption that _all of the data_ + /// for a given timepoint will reside in _one and only one_ bucket. /// This function makes sure to uphold that restriction, which sometimes means splitting the /// bucket into two uneven parts, or even not splitting it at all. /// - /// Here's an example of an index table configured to have a maximum of 2 rows per bucket: one + /// Here's an example of an indexed tables configured to have a maximum of 2 rows per bucket: one /// can see that the 1st and 2nd buckets exceed this maximum in order to uphold the restriction /// described above: + /// TODO /// ```text - /// IndexTable { + /// IndexedTable { /// timeline: frame_nr /// entity: this/that /// size: 3 buckets for a total of 256 B across 8 total rows /// buckets: [ - /// IndexBucket { + /// IndexedBucket { /// index time bound: >= #0 /// size: 96 B across 3 rows /// - frame_nr: from #41 to #41 (all inclusive) @@ -640,21 +474,21 @@ impl IndexBucket { /// +----------+---------------+--------------+--------------------+ /// /// } - /// IndexBucket { + /// IndexedBucket { /// index time bound: >= #42 /// size: 96 B across 3 rows /// - frame_nr: from #42 to #42 (all inclusive) /// data (sorted=true): - /// +----------+--------------+--------------------+--------------------+ + /// +----------+--------------+--------------------+---------------+ /// | frame_nr | rerun.rect2d | rerun.instance_key | rerun.point2d | - /// +----------+--------------+--------------------+-------------------+ - /// | 42 | 1 | 2 | | - /// | 42 | | 4 | | - /// | 42 | | 2 | 2 | - /// +----------+--------------+--------------------+-------------------+ + /// +----------+--------------+--------------------+---------------+ + /// | 42 | 1 | 2 | | + /// | 42 | | 4 | | + /// | 42 | | 2 | 2 | + /// +----------+--------------+--------------------+---------------+ /// /// } - /// IndexBucket { + /// IndexedBucket { /// index time bound: >= #43 /// size: 64 B across 2 rows /// - frame_nr: from #43 to #44 (all inclusive) @@ -670,26 +504,32 @@ impl IndexBucket { /// ] /// } /// ``` - pub fn split(&self) -> Option<(TimeInt, Self)> { + fn split(&self) -> Option<(TimeInt, Self)> { let Self { - timeline, indices, .. + timeline, + cluster_key: _, + inner, } = self; - let mut indices = indices.write(); - indices.sort(); + let mut inner = inner.write(); + inner.sort(); - let IndexBucketIndices { + let IndexedBucketInner { is_sorted: _, time_range: time_range1, - times: times1, - indices: indices1, - } = &mut *indices; - - if times1.len() < 2 { + col_time: col_time1, + col_insert_id: col_insert_id1, + col_row_id: col_row_id1, + col_num_instances: col_num_instances1, + columns: columns1, + total_size_bytes: _, // NOTE: recomputed from scratch for both halves + } = &mut *inner; + + if col_time1.len() < 2 { return None; // early exit: can't split the unsplittable } - if times1.first() == times1.last() { + if col_time1.first() == col_time1.last() { // The entire bucket contains only one timepoint, thus it's impossible to find // a split index to begin with. return None; @@ -698,49 +538,87 @@ impl IndexBucket { crate::profile_function!(); let timeline = *timeline; - // Used down the line to assert that we've left everything in a sane state. - let _total_rows = times1.len(); + // Used in debug builds to assert that we've left everything in a sane state. + let _total_rows = col_time1.len(); + + fn split_off_column( + column: &mut SmallVec<[T; N]>, + split_idx: usize, + ) -> SmallVec<[T; N]> { + if split_idx >= column.len() { + return SmallVec::default(); + } + + let second_half = SmallVec::from_slice(&column[split_idx..]); + column.truncate(split_idx); + second_half + } let (min2, bucket2) = { - let split_idx = find_split_index(times1).expect("must be splittable at this point"); + let split_idx = find_split_index(col_time1).expect("must be splittable at this point"); // this updates `time_range1` in-place! - let time_range2 = split_time_range_off(split_idx, times1, time_range1); + let time_range2 = split_time_range_off(split_idx, col_time1, time_range1); - // this updates `times1` in-place! - let times2 = times1.split_off(split_idx); + // this updates `col_time1` in-place! + let col_time2 = split_off_column(col_time1, split_idx); - // this updates `indices1` in-place! - let indices2: IntMap<_, _> = indices1 + // this updates `col_insert_id1` in-place! + let col_insert_id2 = split_off_column(col_insert_id1, split_idx); + + // this updates `col_row_id1` in-place! + let col_row_id2 = split_off_column(col_row_id1, split_idx); + + // this updates `col_num_instances1` in-place! + let col_num_instances2 = split_off_column(col_num_instances1, split_idx); + + // this updates `columns1` in-place! + let columns2: IntMap<_, _> = columns1 .iter_mut() - .map(|(name, index1)| { - // this updates `index1` in-place! - let index2 = index1.split_off(split_idx); - (*name, index2) + .map(|(name, column1)| { + if split_idx >= column1.len() { + return (*name, DataCellColumn(SmallVec::default())); + } + + // this updates `column1` in-place! + let column2 = DataCellColumn({ + let second_half = SmallVec::from(&column1.0[split_idx..]); + column1.0.truncate(split_idx); + second_half + }); + (*name, column2) }) .collect(); - ( - time_range2.min, - Self { - timeline, - indices: RwLock::new(IndexBucketIndices { - is_sorted: true, - time_range: time_range2, - times: times2, - indices: indices2, - }), - cluster_key: self.cluster_key, - }, - ) + + let mut bucket2 = Self { + timeline, + cluster_key: self.cluster_key, + inner: RwLock::new(IndexedBucketInner { + is_sorted: true, + time_range: time_range2, + col_time: col_time2, + col_insert_id: col_insert_id2, + col_row_id: col_row_id2, + col_num_instances: col_num_instances2, + columns: columns2, + total_size_bytes: 0, // TODO + }), + }; + bucket2.compute_total_size_bytes(); + + (time_range2.min, bucket2) }; + inner.compute_total_size_bytes(); + // sanity checks #[cfg(debug_assertions)] { - drop(indices); // sanity checking will grab the lock! + drop(inner); // sanity checking will grab the lock! self.sanity_check().unwrap(); bucket2.sanity_check().unwrap(); + // TODO: size_bytes too? let total_rows1 = self.total_rows() as i64; let total_rows2 = bucket2.total_rows() as i64; debug_assert!( @@ -766,7 +644,7 @@ impl IndexBucket { /// /// This function expects `times` to be sorted! /// In debug builds, it will panic if that's not the case. -fn find_split_index(times: &TimeIndex) -> Option { +fn find_split_index(times: &[i64]) -> Option { debug_assert!( times.windows(2).all(|t| t[0] <= t[1]), "time index must be sorted before splitting!" @@ -855,7 +733,7 @@ fn test_find_split_index() { /// The two resulting time range halves are guaranteed to never overlap. fn split_time_range_off( split_idx: usize, - times1: &TimeIndex, + times1: &[i64], time_range1: &mut TimeRange, ) -> TimeRange { let time_range2 = TimeRange::new(times1[split_idx].into(), time_range1.max); @@ -875,242 +753,87 @@ fn split_time_range_off( time_range2 } -// --- Persistent Components --- - -impl PersistentComponentTable { - /// Creates a new timeless component table for the specified component `datatype`. - /// - /// `datatype` must be the type of the component itself, devoid of any wrapping layers - /// (i.e. _not_ a `ListArray<...>`!). - fn new(name: ComponentName, datatype: &DataType) -> Self { - // TODO(#1619): the whole fake row thing needs to go - let chunks = vec![DataCell::from_arrow_empty(name, datatype.clone()).as_arrow_monolist()]; - let total_rows = chunks.iter().map(|values| values.len() as u64).sum(); - let total_size_bytes = chunks - .iter() - .map(|values| arrow2::compute::aggregate::estimated_bytes_size(&**values) as u64) - .sum(); +// --- Timeless --- +impl PersistentIndexedTable { + pub fn new(cluster_key: ComponentName, ent_path: EntityPath) -> Self { Self { - name, - datatype: datatype.clone(), - chunks, - total_rows, - total_size_bytes, - } - } - - /// Pushes `cell` to the end of the bucket, returning the _global_ `RowIndex` of the - /// freshly added row. - pub fn push_cell(&mut self, cell: &DataCell) -> RowIndex { - crate::profile_function!(); - - debug_assert!( - cell.datatype() == &self.datatype, - "trying to insert data of the wrong datatype in a component table, \ - expected {:?}, got {:?}", - &self.datatype, - cell.datatype(), - ); - - // TODO(#1619): don't use raw arrays - let values = cell.as_arrow_monolist(); - - self.total_rows += 1; - // Warning: this is surprisingly costly! - self.total_size_bytes += arrow2::compute::aggregate::estimated_bytes_size(&*values) as u64; - - // TODO(#589): support for non-unit-length chunks - self.chunks.push(values); - - RowIndex::from_u63(RowIndexKind::Timeless, self.chunks.len() as u64 - 1) - } -} - -// --- Components --- - -impl ComponentTable { - /// Creates a new component table for the specified component `datatype`. - /// - /// `datatype` must be the type of the component itself, devoid of any wrapping layers - /// (i.e. _not_ a `ListArray<...>`!). - fn new(name: ComponentName, datatype: &DataType) -> Self { - ComponentTable { - name, - datatype: datatype.clone(), - buckets: [ComponentBucket::new(name, datatype, 0u64)].into(), + cluster_key, + ent_path, + col_insert_id: Default::default(), + col_row_id: Default::default(), + col_num_instances: Default::default(), + columns: Default::default(), + total_size_bytes: 0, // TODO } } - /// Finds the appropriate bucket in this component table and pushes `cell` at the - /// end of it, returning the _global_ `RowIndex` for this new row. - pub fn push_cell( + fn insert_row( &mut self, - config: &DataStoreConfig, - time_point: &TimePoint, - cell: &DataCell, - ) -> RowIndex { + insert_id: Option, + generated_cluster_cell: Option, + row: &DataRow, + ) { crate::profile_function!(); - debug_assert!( - cell.datatype() == &self.datatype, - "trying to insert data of the wrong datatype in a component table, \ - expected {:?}, got {:?}", - &self.datatype, - cell.datatype() - ); - - // All component tables spawn with an initial bucket at row offset 0, thus this cannot - // fail. - let active_bucket = self.buckets.back_mut().unwrap(); - - let size = active_bucket.total_size_bytes(); - let size_overflow = active_bucket.total_size_bytes() > config.component_bucket_size_bytes; + let num_rows = self.total_rows() as usize; - let len = active_bucket.total_rows(); - let len_overflow = len > config.component_bucket_nb_rows; + let Self { + ent_path: _, + cluster_key: _, + col_insert_id, + col_row_id, + col_num_instances, + columns, + total_size_bytes: _, // TODO + } = self; - if size_overflow || len_overflow { - trace!( - kind = "insert", - component = self.name.as_str(), - size_limit = config.component_bucket_size_bytes, - len_limit = config.component_bucket_nb_rows, - size, - size_overflow, - len, - len_overflow, - "allocating new component bucket, previous one overflowed" - ); + // dbg!(&columns); - if config.enable_compaction { - active_bucket.archive(); - } + let components: IntSet<_> = row.component_names().collect(); - let row_offset = active_bucket.row_offset + len; - self.buckets - .push_back(ComponentBucket::new(self.name, &self.datatype, row_offset)); + // update all control columns + if let Some(insert_id) = insert_id { + col_insert_id.push(insert_id); } + col_row_id.push(row.row_id()); + col_num_instances.push(row.num_instances()); - // Two possible cases: - // - If the table has not just underwent an overflow, then this is panic-safe for the - // same reason as above: all component tables spawn with an initial bucket at row - // offset 0, thus this cannot fail. - // - If the table has just overflowed, then we've just pushed a bucket to the dequeue. - let active_bucket = self.buckets.back_mut().unwrap(); - let row_idx = RowIndex::from_u63( - RowIndexKind::Temporal, - active_bucket.push_cell(time_point, cell) + active_bucket.row_offset, - ); - - trace!( - kind = "insert", - timelines = ?time_point.into_iter() - .map(|(timeline, time)| (timeline.name(), timeline.typ().format(*time))) - .collect::>(), - component = self.name.as_str(), - %row_idx, - "pushed into component table" - ); - - row_idx - } -} - -impl ComponentBucket { - /// Creates a new component bucket for the specified component `datatype`. - /// - /// `datatype` must be the type of the component itself, devoid of any wrapping layers - /// (i.e. _not_ a `ListArray<...>`!). - pub fn new(name: ComponentName, datatype: &DataType, row_offset: u64) -> Self { - // If this is the first bucket of this table, we need to insert an empty list at - // row index #0! - // TODO(#1619): the whole fake row thing needs to go - let chunks = if row_offset == 0 { - vec![DataCell::from_arrow_empty(name, datatype.clone()).as_arrow_monolist()] - } else { - vec![] - }; - - let total_rows = chunks.iter().map(|values| values.len() as u64).sum(); - let total_size_bytes = chunks - .iter() - .map(|values| arrow2::compute::aggregate::estimated_bytes_size(&**values) as u64) - .sum(); + // append components to their respective columns (2-way merge) - Self { - name, - row_offset, - archived: false, - time_ranges: Default::default(), - chunks, - total_rows, - total_size_bytes, + // insert auto-generated cluster cell if present + if let Some(cluster_cell) = generated_cluster_cell { + let column = columns + .entry(cluster_cell.component_name()) + .or_insert_with(|| DataCellColumn::empty(num_rows)); + column.0.push(Some(cluster_cell)); } - } - - /// Pushes `cell` to the end of the bucket, returning the _local_ index of the - /// freshly added row. - pub fn push_cell(&mut self, timepoint: &TimePoint, cell: &DataCell) -> u64 { - crate::profile_function!(); - // Keep track of all affected time ranges, for garbage collection purposes. - for (timeline, &time) in timepoint { - self.time_ranges - .entry(*timeline) - .and_modify(|range| { - *range = TimeRange::new(range.min.min(time), range.max.max(time)); - }) - .or_insert_with(|| TimeRange::new(time, time)); + // 2-way merge, step 1: left-to-right + for cell in row.cells().iter() { + let column = columns + .entry(cell.component_name()) + .or_insert_with(|| DataCellColumn::empty(num_rows)); + column.0.push(Some(cell.clone() /* shallow */)); } - // TODO(cmc): don't use raw arrays - let values = cell.as_arrow_monolist(); - - self.total_rows += 1; - // Warning: this is surprisingly costly! - self.total_size_bytes += arrow2::compute::aggregate::estimated_bytes_size(&*values) as u64; - - // TODO(#589): support for non-unit-length chunks - self.chunks.push(values); - - self.chunks.len() as u64 - 1 - } - - /// Archives the bucket as a new one is about to take its place. - /// - /// This is a good opportunity to run compaction and other maintenance related tasks. - #[allow(dead_code)] - pub fn archive(&mut self) { - crate::profile_function!(); - - debug_assert!( - !self.archived, - "archiving an already archived bucket, something is likely wrong" - ); - - // Chunk compaction - // Compacts the bucket by concatenating all chunks of data into a single one. - { - use arrow2::compute::concatenate::concatenate; - - let chunks = self.chunks.iter().map(|chunk| &**chunk).collect::>(); - // Only two reasons this can ever fail: - // - // * `chunks` is empty: - // This can never happen, buckets always spawn with an initial chunk. - // - // * the various chunks contain data with different datatypes: - // This can never happen as that would first panic during insertion. - let values = concatenate(&chunks).unwrap(); - - // Recompute the size as we've just discarded a bunch of list headers. - self.total_size_bytes = - arrow2::compute::aggregate::estimated_bytes_size(&*values) as u64; + // 2-way merge, step 2: right-to-left + // + // fill unimpacted secondary indices with null values + for (component, column) in columns { + // The cluster key always gets added one way or another, don't try to force fill it! + if *component == self.cluster_key { + continue; + } - self.chunks = vec![values]; + if !components.contains(component) { + column.0.push(None); + } } - self.archived = true; + // dbg!(&columns); + + #[cfg(debug_assertions)] + self.sanity_check().unwrap(); } } diff --git a/crates/re_arrow_store/src/test_util.rs b/crates/re_arrow_store/src/test_util.rs index 870624782a2cf..00da9be207f9d 100644 --- a/crates/re_arrow_store/src/test_util.rs +++ b/crates/re_arrow_store/src/test_util.rs @@ -26,85 +26,28 @@ macro_rules! test_row { } pub fn all_configs() -> impl Iterator { - const COMPONENT_CONFIGS: &[DataStoreConfig] = &[ - DataStoreConfig::DEFAULT, - DataStoreConfig { - component_bucket_nb_rows: 0, - ..DataStoreConfig::DEFAULT - }, - DataStoreConfig { - component_bucket_nb_rows: 1, - ..DataStoreConfig::DEFAULT - }, - DataStoreConfig { - component_bucket_nb_rows: 2, - ..DataStoreConfig::DEFAULT - }, - DataStoreConfig { - component_bucket_nb_rows: 3, - ..DataStoreConfig::DEFAULT - }, - DataStoreConfig { - component_bucket_size_bytes: 0, - ..DataStoreConfig::DEFAULT - }, - DataStoreConfig { - component_bucket_size_bytes: 16, - ..DataStoreConfig::DEFAULT - }, - DataStoreConfig { - component_bucket_size_bytes: 32, - ..DataStoreConfig::DEFAULT - }, - DataStoreConfig { - component_bucket_size_bytes: 64, - ..DataStoreConfig::DEFAULT - }, - ]; - const INDEX_CONFIGS: &[DataStoreConfig] = &[ DataStoreConfig::DEFAULT, DataStoreConfig { - index_bucket_nb_rows: 0, - ..DataStoreConfig::DEFAULT - }, - DataStoreConfig { - index_bucket_nb_rows: 1, - ..DataStoreConfig::DEFAULT - }, - DataStoreConfig { - index_bucket_nb_rows: 2, - ..DataStoreConfig::DEFAULT - }, - DataStoreConfig { - index_bucket_nb_rows: 3, - ..DataStoreConfig::DEFAULT - }, - DataStoreConfig { - index_bucket_size_bytes: 0, + indexed_bucket_num_rows: 0, ..DataStoreConfig::DEFAULT }, DataStoreConfig { - index_bucket_size_bytes: 16, + indexed_bucket_num_rows: 1, ..DataStoreConfig::DEFAULT }, DataStoreConfig { - index_bucket_size_bytes: 32, + indexed_bucket_num_rows: 2, ..DataStoreConfig::DEFAULT }, DataStoreConfig { - index_bucket_size_bytes: 64, + indexed_bucket_num_rows: 3, ..DataStoreConfig::DEFAULT }, ]; - COMPONENT_CONFIGS.iter().flat_map(|comp| { - INDEX_CONFIGS.iter().map(|idx| DataStoreConfig { - component_bucket_size_bytes: comp.component_bucket_size_bytes, - component_bucket_nb_rows: comp.component_bucket_nb_rows, - index_bucket_size_bytes: idx.index_bucket_size_bytes, - index_bucket_nb_rows: idx.index_bucket_nb_rows, - store_insert_ids: comp.store_insert_ids || idx.store_insert_ids, - enable_compaction: comp.enable_compaction || idx.enable_compaction, - }) + INDEX_CONFIGS.iter().map(|idx| DataStoreConfig { + indexed_bucket_num_rows: idx.indexed_bucket_num_rows, + store_insert_ids: idx.store_insert_ids, + enable_typecheck: idx.enable_typecheck, }) } diff --git a/crates/re_arrow_store/tests/correctness.rs b/crates/re_arrow_store/tests/correctness.rs index 74ec6a8a76409..dd046109eef7e 100644 --- a/crates/re_arrow_store/tests/correctness.rs +++ b/crates/re_arrow_store/tests/correctness.rs @@ -6,16 +6,13 @@ use std::sync::atomic::{AtomicBool, Ordering::SeqCst}; use rand::Rng; -use re_arrow_store::{ - test_row, DataStore, DataStoreConfig, GarbageCollectionTarget, LatestAtQuery, WriteError, -}; +use re_arrow_store::{test_row, DataStore, DataStoreConfig, LatestAtQuery, WriteError}; use re_log_types::{ component_types::InstanceKey, datagen::{ build_frame_nr, build_log_time, build_some_colors, build_some_instances, build_some_point2d, }, - external::arrow2_convert::deserialize::arrow_array_deserialize_iterator, - Component as _, DataCell, Duration, EntityPath, MsgId, Time, TimeType, Timeline, + Component as _, DataCell, Duration, EntityPath, Time, TimeType, Timeline, }; // --- @@ -115,41 +112,41 @@ fn latest_at_emptiness_edge_cases_impl(store: &mut DataStore) { // empty frame_nr { - let row_indices = store.latest_at( + let cells = store.latest_at( &LatestAtQuery::new(timeline_frame_nr, frame39), &ent_path, InstanceKey::name(), &[InstanceKey::name()], ); - assert!(row_indices.is_none()); + assert!(cells.is_none()); } // empty log_time { - let row_indices = store.latest_at( + let cells = store.latest_at( &LatestAtQuery::new(timeline_log_time, now_minus_1s_nanos), &ent_path, InstanceKey::name(), &[InstanceKey::name()], ); - assert!(row_indices.is_none()); + assert!(cells.is_none()); } // wrong entity path { - let row_indices = store.latest_at( + let cells = store.latest_at( &LatestAtQuery::new(timeline_frame_nr, frame40), &EntityPath::from("does/not/exist"), InstanceKey::name(), &[InstanceKey::name()], ); - assert!(row_indices.is_none()); + assert!(cells.is_none()); } // bunch of non-existing components { let components = &["they".into(), "dont".into(), "exist".into()]; - let row_indices = store + let cells = store .latest_at( &LatestAtQuery::new(timeline_frame_nr, frame40), &ent_path, @@ -157,13 +154,12 @@ fn latest_at_emptiness_edge_cases_impl(store: &mut DataStore) { components, ) .unwrap(); - let rows = store.get(components, &row_indices); - rows.iter().all(|row| row.is_none()); + cells.iter().all(|cell| cell.is_none()); } // empty component list { - let row_indices = store + let cells = store .latest_at( &LatestAtQuery::new(timeline_frame_nr, frame40), &ent_path, @@ -171,29 +167,29 @@ fn latest_at_emptiness_edge_cases_impl(store: &mut DataStore) { &[], ) .unwrap(); - assert!(row_indices.is_empty()); + assert!(cells.is_empty()); } // wrong timeline name { - let row_indices = store.latest_at( + let cells = store.latest_at( &LatestAtQuery::new(timeline_wrong_name, frame40), &EntityPath::from("does/not/exist"), InstanceKey::name(), &[InstanceKey::name()], ); - assert!(row_indices.is_none()); + assert!(cells.is_none()); } // wrong timeline kind { - let row_indices = store.latest_at( + let cells = store.latest_at( &LatestAtQuery::new(timeline_wrong_kind, frame40), &EntityPath::from("does/not/exist"), InstanceKey::name(), &[InstanceKey::name()], ); - assert!(row_indices.is_none()); + assert!(cells.is_none()); } } @@ -282,7 +278,6 @@ fn gc_correct() { let mut store = DataStore::new( InstanceKey::name(), DataStoreConfig { - component_bucket_nb_rows: 0, ..Default::default() }, ); @@ -312,62 +307,64 @@ fn gc_correct() { } check_still_readable(&store); - let msg_id_chunks = store.gc( - GarbageCollectionTarget::DropAtLeastPercentage(1.0), - Timeline::new("frame_nr", TimeType::Sequence), - MsgId::name(), - ); - - let msg_ids = msg_id_chunks - .iter() - .flat_map(|chunk| arrow_array_deserialize_iterator::>(&**chunk).unwrap()) - .map(Option::unwrap) // MsgId is always present - .collect::>(); - assert!(!msg_ids.is_empty()); - - if let err @ Err(_) = store.sanity_check() { - store.sort_indices_if_needed(); - eprintln!("{store}"); - err.unwrap(); - } - check_still_readable(&store); - for msg_id in &msg_ids { - assert!(store.get_msg_metadata(msg_id).is_some()); - } - - store.clear_msg_metadata(&msg_ids); - - if let err @ Err(_) = store.sanity_check() { - store.sort_indices_if_needed(); - eprintln!("{store}"); - err.unwrap(); - } - check_still_readable(&store); - for msg_id in &msg_ids { - assert!(store.get_msg_metadata(msg_id).is_none()); - } - - let msg_id_chunks = store.gc( - GarbageCollectionTarget::DropAtLeastPercentage(1.0), - Timeline::new("frame_nr", TimeType::Sequence), - MsgId::name(), - ); - - let msg_ids = msg_id_chunks - .iter() - .flat_map(|chunk| arrow_array_deserialize_iterator::>(&**chunk).unwrap()) - .map(Option::unwrap) // MsgId is always present - .collect::>(); - assert!(msg_ids.is_empty()); - - if let err @ Err(_) = store.sanity_check() { - store.sort_indices_if_needed(); - eprintln!("{store}"); - err.unwrap(); - } - check_still_readable(&store); - - assert_eq!(2, store.total_temporal_component_rows()); + // TODO + + // let msg_id_chunks = store.gc( + // GarbageCollectionTarget::DropAtLeastPercentage(1.0), + // Timeline::new("frame_nr", TimeType::Sequence), + // MsgId::name(), + // ); + + // let msg_ids = msg_id_chunks + // .iter() + // .flat_map(|chunk| arrow_array_deserialize_iterator::>(&**chunk).unwrap()) + // .map(Option::unwrap) // MsgId is always present + // .collect::>(); + // assert!(!msg_ids.is_empty()); + + // if let err @ Err(_) = store.sanity_check() { + // store.sort_indices_if_needed(); + // eprintln!("{store}"); + // err.unwrap(); + // } + // check_still_readable(&store); + // for msg_id in &msg_ids { + // assert!(store.get_msg_metadata(msg_id).is_some()); + // } + + // store.clear_msg_metadata(&msg_ids); + + // if let err @ Err(_) = store.sanity_check() { + // store.sort_indices_if_needed(); + // eprintln!("{store}"); + // err.unwrap(); + // } + // check_still_readable(&store); + // for msg_id in &msg_ids { + // assert!(store.get_msg_metadata(msg_id).is_none()); + // } + + // let msg_id_chunks = store.gc( + // GarbageCollectionTarget::DropAtLeastPercentage(1.0), + // Timeline::new("frame_nr", TimeType::Sequence), + // MsgId::name(), + // ); + + // let msg_ids = msg_id_chunks + // .iter() + // .flat_map(|chunk| arrow_array_deserialize_iterator::>(&**chunk).unwrap()) + // .map(Option::unwrap) // MsgId is always present + // .collect::>(); + // assert!(msg_ids.is_empty()); + + // if let err @ Err(_) = store.sanity_check() { + // store.sort_indices_if_needed(); + // eprintln!("{store}"); + // err.unwrap(); + // } + // check_still_readable(&store); + + // assert_eq!(2, store.total_temporal_component_rows()); } fn check_still_readable(_store: &DataStore) { diff --git a/crates/re_arrow_store/tests/data_store.rs b/crates/re_arrow_store/tests/data_store.rs index d6a0ac79ffea7..d449ebea12d03 100644 --- a/crates/re_arrow_store/tests/data_store.rs +++ b/crates/re_arrow_store/tests/data_store.rs @@ -11,8 +11,8 @@ use polars_core::{prelude::*, series::Series}; use polars_ops::prelude::DataFrameJoinOps; use rand::Rng; use re_arrow_store::{ - polars_util, test_row, DataStore, DataStoreConfig, GarbageCollectionTarget, LatestAtQuery, - RangeQuery, TimeInt, TimeRange, + polars_util, test_row, DataStore, DataStoreConfig, LatestAtQuery, RangeQuery, TimeInt, + TimeRange, }; use re_log_types::{ component_types::{ColorRGBA, InstanceKey, Point2D, Rect2D}, @@ -20,8 +20,7 @@ use re_log_types::{ build_frame_nr, build_some_colors, build_some_instances, build_some_instances_from, build_some_point2d, build_some_rects, }, - external::arrow2_convert::deserialize::arrow_array_deserialize_iterator, - Component as _, ComponentName, DataCell, DataRow, EntityPath, MsgId, TimeType, Timeline, + Component as _, ComponentName, DataCell, DataRow, EntityPath, TimeType, Timeline, }; // TODO(#1619): introduce batching in the testing matrix @@ -69,8 +68,7 @@ fn all_components() { let mut store = DataStore::new( InstanceKey::name(), DataStoreConfig { - component_bucket_nb_rows: u64::MAX, - index_bucket_nb_rows: u64::MAX, + indexed_bucket_num_rows: u64::MAX, ..Default::default() }, ); @@ -80,9 +78,6 @@ fn all_components() { ColorRGBA::name(), // added by us, timeless Rect2D::name(), // added by us cluster_key, // always here - MsgId::name(), // automatically appended by DataRow - #[cfg(debug_assertions)] - DataStore::insert_id_key(), // automatically added in debug ]; let components_b = &[ @@ -90,9 +85,6 @@ fn all_components() { Point2D::name(), // added by us Rect2D::name(), // added by us cluster_key, // always here - MsgId::name(), // automatically appended by DataRow - #[cfg(debug_assertions)] - DataStore::insert_id_key(), // automatically added in debug ]; let row = test_row!(ent_path @ [] => 2; [build_some_colors(2)]); @@ -122,8 +114,7 @@ fn all_components() { let mut store = DataStore::new( InstanceKey::name(), DataStoreConfig { - component_bucket_nb_rows: 0, - index_bucket_nb_rows: 0, + indexed_bucket_num_rows: 0, ..Default::default() }, ); @@ -146,9 +137,6 @@ fn all_components() { ColorRGBA::name(), // added by us, timeless Rect2D::name(), // added by us cluster_key, // always here - MsgId::name(), // automatically appended by DataRow - #[cfg(debug_assertions)] - DataStore::insert_id_key(), // automatically added in debug ]; let components_b = &[ @@ -156,9 +144,6 @@ fn all_components() { Rect2D::name(), // ⚠ inherited before the buckets got split apart! Point2D::name(), // added by us cluster_key, // always here - MsgId::name(), // automatically appended by DataRow - #[cfg(debug_assertions)] - DataStore::insert_id_key(), // automatically added in debug ]; let row = test_row!(ent_path @ [] => 2; [build_some_colors(2)]); @@ -192,8 +177,7 @@ fn all_components() { let mut store = DataStore::new( InstanceKey::name(), DataStoreConfig { - component_bucket_nb_rows: 0, - index_bucket_nb_rows: 0, + indexed_bucket_num_rows: 0, ..Default::default() }, ); @@ -218,9 +202,6 @@ fn all_components() { ColorRGBA::name(), // added by us, timeless Rect2D::name(), // added by us cluster_key, // always here - MsgId::name(), // automatically appended by DataRow - #[cfg(debug_assertions)] - DataStore::insert_id_key(), // automatically added in debug ]; let components_b = &[ @@ -228,9 +209,6 @@ fn all_components() { Point2D::name(), // added by us but not contained in the second bucket Rect2D::name(), // added by use cluster_key, // always here - MsgId::name(), // automatically appended by DataRow - #[cfg(debug_assertions)] - DataStore::insert_id_key(), // automatically added in debug ]; let row = test_row!(ent_path @ [] => 2; [build_some_colors(2)]); @@ -273,12 +251,12 @@ fn latest_at() { for config in re_arrow_store::test_util::all_configs() { let mut store = DataStore::new(InstanceKey::name(), config.clone()); latest_at_impl(&mut store); - store.gc( - GarbageCollectionTarget::DropAtLeastPercentage(1.0), - Timeline::new("frame_nr", TimeType::Sequence), - MsgId::name(), - ); - latest_at_impl(&mut store); + // store.gc( + // GarbageCollectionTarget::DropAtLeastPercentage(1.0), + // Timeline::new("frame_nr", TimeType::Sequence), + // MsgId::name(), + // ); + // latest_at_impl(&mut store); } } @@ -883,27 +861,29 @@ fn gc_impl(store: &mut DataStore) { } _ = store.to_dataframe(); // simple way of checking that everything is still readable - let msg_id_chunks = store.gc( - GarbageCollectionTarget::DropAtLeastPercentage(1.0 / 3.0), - Timeline::new("frame_nr", TimeType::Sequence), - MsgId::name(), - ); + // TODO - let msg_ids = msg_id_chunks - .iter() - .flat_map(|chunk| arrow_array_deserialize_iterator::>(&**chunk).unwrap()) - .map(Option::unwrap) // MsgId is always present - .collect::>(); + // let msg_id_chunks = store.gc( + // GarbageCollectionTarget::DropAtLeastPercentage(1.0 / 3.0), + // Timeline::new("frame_nr", TimeType::Sequence), + // MsgId::name(), + // ); - for msg_id in &msg_ids { - assert!(store.get_msg_metadata(msg_id).is_some()); - } + // let msg_ids = msg_id_chunks + // .iter() + // .flat_map(|chunk| arrow_array_deserialize_iterator::>(&**chunk).unwrap()) + // .map(Option::unwrap) // MsgId is always present + // .collect::>(); - store.clear_msg_metadata(&msg_ids); + // for msg_id in &msg_ids { + // assert!(store.get_msg_metadata(msg_id).is_some()); + // } - for msg_id in &msg_ids { - assert!(store.get_msg_metadata(msg_id).is_none()); - } + // store.clear_msg_metadata(&msg_ids); + + // for msg_id in &msg_ids { + // assert!(store.get_msg_metadata(msg_id).is_none()); + // } } } diff --git a/crates/re_arrow_store/tests/internals.rs b/crates/re_arrow_store/tests/internals.rs index d666e40b5675e..1b776d2619dcf 100644 --- a/crates/re_arrow_store/tests/internals.rs +++ b/crates/re_arrow_store/tests/internals.rs @@ -8,7 +8,7 @@ use re_arrow_store::{DataStore, DataStoreConfig}; use re_log_types::{ component_types::InstanceKey, datagen::{build_frame_nr, build_some_instances}, - Component as _, DataRow, EntityPath, MsgId, TimePoint, + Component as _, DataRow, EntityPath, RowId, TimePoint, }; // --- Internals --- @@ -29,14 +29,14 @@ fn pathological_bucket_topology() { let mut store_forward = DataStore::new( InstanceKey::name(), DataStoreConfig { - index_bucket_nb_rows: 10, + indexed_bucket_num_rows: 10, ..Default::default() }, ); let mut store_backward = DataStore::new( InstanceKey::name(), DataStoreConfig { - index_bucket_nb_rows: 10, + indexed_bucket_num_rows: 10, ..Default::default() }, ); @@ -53,7 +53,7 @@ fn pathological_bucket_topology() { let timepoint = TimePoint::from([build_frame_nr(frame_nr.into())]); for _ in 0..num { let row = DataRow::from_cells1( - MsgId::ZERO, + RowId::ZERO, ent_path.clone(), timepoint.clone(), num_instances, @@ -62,7 +62,7 @@ fn pathological_bucket_topology() { store_forward.insert_row(&row).unwrap(); let row = DataRow::from_cells1( - MsgId::ZERO, + RowId::ZERO, ent_path.clone(), timepoint.clone(), num_instances, @@ -84,7 +84,7 @@ fn pathological_bucket_topology() { .map(|frame_nr| { let timepoint = TimePoint::from([build_frame_nr(frame_nr.into())]); DataRow::from_cells1( - MsgId::ZERO, + RowId::ZERO, ent_path.clone(), timepoint, num_instances, diff --git a/crates/re_data_store/src/entity_properties.rs b/crates/re_data_store/src/entity_properties.rs index 0b69cd442893f..89a9235fdbb83 100644 --- a/crates/re_data_store/src/entity_properties.rs +++ b/crates/re_data_store/src/entity_properties.rs @@ -1,8 +1,5 @@ use re_arrow_store::LatestAtQuery; -use re_log_types::{ - external::arrow2_convert::deserialize::arrow_array_deserialize_iterator, - DeserializableComponent, EntityPath, -}; +use re_log_types::{DeserializableComponent, EntityPath}; use crate::log_db::EntityDb; @@ -211,14 +208,10 @@ where // single components this is easy enough. let data_store = &entity_db.data_store; - let components = [C::name()]; + let cells = data_store.latest_at(query, entity_path, C::name(), &[C::name()])?; + let cell = cells.get(0)?.as_ref()?; - let row_indices = data_store.latest_at(query, entity_path, C::name(), &components)?; - - let results = data_store.get(&components, &row_indices); - let arr = results.get(0)?.as_ref()?.as_ref(); - - let mut iter = arrow_array_deserialize_iterator::(arr).ok()?; + let mut iter = cell.try_as_native::().ok()?; let component = iter.next(); diff --git a/crates/re_data_store/src/log_db.rs b/crates/re_data_store/src/log_db.rs index 50a5ce57a7036..0985bd961fe61 100644 --- a/crates/re_data_store/src/log_db.rs +++ b/crates/re_data_store/src/log_db.rs @@ -1,12 +1,10 @@ use nohash_hasher::IntMap; -use re_arrow_store::{DataStoreConfig, GarbageCollectionTarget, TimeInt}; +use re_arrow_store::{DataStoreConfig, TimeInt}; use re_log_types::{ - component_types::InstanceKey, - external::arrow2_convert::deserialize::arrow_array_deserialize_iterator, ArrowMsg, - BeginRecordingMsg, Component as _, ComponentPath, DataCell, DataRow, DataTable, EntityPath, - EntityPathHash, EntityPathOpMsg, LogMsg, MsgId, PathOp, RecordingId, RecordingInfo, TimePoint, - Timeline, + component_types::InstanceKey, ArrowMsg, BeginRecordingMsg, Component as _, ComponentPath, + DataCell, DataRow, DataTable, EntityPath, EntityPathHash, EntityPathOpMsg, LogMsg, MsgId, + PathOp, RecordingId, RecordingInfo, TimePoint, Timeline, }; use crate::{Error, TimesPerTimeline}; @@ -37,26 +35,8 @@ impl Default for EntityDb { data_store: re_arrow_store::DataStore::new( InstanceKey::name(), DataStoreConfig { - // Garbage collection of the datastore is currently driven by the `MsgId` - // component column, as a workaround for the `MsgId` mismatch issue. - // - // Since this component is only a few bytes large, trying to trigger a GC - // based on bucket size is a lost cause, so make sure to have a small enough - // row limit. - // - // TODO(cmc): Reasses once the whole `MsgId` mismatch issue is resolved - // (probably once batching is implemented). - component_bucket_nb_rows: 128, - component_bucket_size_bytes: 10 * 1024 * 1024, // 10 MiB - // We do not garbage collect index buckets at the moment, and so the size of - // individual index buckets is irrelevant, only their total number of rows - // matter. - // See https://github.com/rerun-io/rerun/pull/1558 for details. - // - // TODO(cmc): Bring back index GC once the whole `MsgId` mismatch issue is - // resolved (probably once batching is implemented). - index_bucket_size_bytes: u64::MAX, - index_bucket_nb_rows: 2048, + // TODO + indexed_bucket_num_rows: 2048, ..Default::default() }, ), @@ -131,7 +111,7 @@ impl EntityDb { for component_path in cleared_paths { if let Some(data_type) = self .data_store - .lookup_data_type(&component_path.component_name) + .lookup_datatype(&component_path.component_name) { // Create and insert an empty component into the arrow store // TODO(jleibs): Faster empty-array creation @@ -280,21 +260,23 @@ impl LogDb { crate::profile_function!(); assert!((0.0..=1.0).contains(&fraction_to_purge)); - let drop_msg_ids = { - let msg_id_chunks = self.entity_db.data_store.gc( - GarbageCollectionTarget::DropAtLeastPercentage(fraction_to_purge as _), - Timeline::log_time(), - MsgId::name(), - ); - - msg_id_chunks - .iter() - .flat_map(|chunk| { - arrow_array_deserialize_iterator::>(&**chunk).unwrap() - }) - .map(Option::unwrap) // MsgId is always present - .collect::>() - }; + // TODO + let drop_msg_ids: ahash::HashSet<_> = Default::default(); + // let drop_msg_ids = { + // let msg_id_chunks = self.entity_db.data_store.gc( + // GarbageCollectionTarget::DropAtLeastPercentage(fraction_to_purge as _), + // Timeline::log_time(), + // MsgId::name(), + // ); + + // msg_id_chunks + // .iter() + // .flat_map(|chunk| { + // arrow_array_deserialize_iterator::>(&**chunk).unwrap() + // }) + // .map(Option::unwrap) // MsgId is always present + // .collect::>() + // }; let cutoff_times = self.entity_db.data_store.oldest_time_per_timeline(); diff --git a/crates/re_log_types/src/component_types/mod.rs b/crates/re_log_types/src/component_types/mod.rs index ba4d4f1125755..3a7f456d6b0a6 100644 --- a/crates/re_log_types/src/component_types/mod.rs +++ b/crates/re_log_types/src/component_types/mod.rs @@ -53,7 +53,7 @@ pub use label::Label; pub use linestrip::{LineStrip2D, LineStrip3D}; pub use mat::Mat3x3; pub use mesh3d::{EncodedMesh3D, Mesh3D, MeshFormat, MeshId, RawMesh3D}; -pub use msg_id::MsgId; +pub use msg_id::{MsgId, RowId, TableId}; pub use point::{Point2D, Point3D}; pub use quaternion::Quaternion; pub use radius::Radius; diff --git a/crates/re_log_types/src/component_types/msg_id.rs b/crates/re_log_types/src/component_types/msg_id.rs index 104f444cac099..dfc76c35382f9 100644 --- a/crates/re_log_types/src/component_types/msg_id.rs +++ b/crates/re_log_types/src/component_types/msg_id.rs @@ -25,6 +25,10 @@ use crate::{Component, ComponentName}; #[arrow_field(transparent)] pub struct MsgId(re_tuid::Tuid); +// TODO +pub type TableId = MsgId; +pub type RowId = MsgId; + impl std::fmt::Debug for MsgId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:032X}", self.0.as_u128()) diff --git a/crates/re_log_types/src/data_cell.rs b/crates/re_log_types/src/data_cell.rs index a0fdbdcbaf951..e9cee817e074f 100644 --- a/crates/re_log_types/src/data_cell.rs +++ b/crates/re_log_types/src/data_cell.rs @@ -253,6 +253,17 @@ impl DataCell { // --- + /// Returns the contents of the cell as an arrow array. + /// + /// Avoid using raw arrow arrays unless you absolutely have to: prefer working directly with + /// `DataCell`s, `DataRow`s & `DataTable`s instead. + /// If you do use them, try to keep the scope as short as possible: holding on to a raw array + /// might prevent the datastore from releasing memory from garbage collected data. + #[inline] + pub fn into_arrow(self) -> Box { + self.values + } + /// Returns the contents of the cell as an arrow array (shallow clone). /// /// Avoid using raw arrow arrays unless you absolutely have to: prefer working directly with @@ -443,3 +454,179 @@ impl std::fmt::Display for DataCell { .fmt(f) } } + +// --- + +impl DataCell { + /// Returns the total (heap) allocated size of the array in bytes. + /// + /// Beware: this is costly! Cache the returned value as much as possible. + pub fn size_bytes(&self) -> u64 { + let Self { name, values } = self; + + std::mem::size_of_val(name) as u64 + + // Warning: this is surprisingly costly! + arrow2::compute::aggregate::estimated_bytes_size(&**values) as u64 + } +} + +// This test exists because the documentation and online discussions revolving around +// arrow2's `estimated_bytes_size()` function indicate that there's a lot of limitations and +// edge cases to be aware of. +// +// Also, it's just plain hard to be sure that the answer you get is the answer you're looking +// for with these kinds of tools. When in doubt.. test everything we're going to need from it. +// +// In many ways, this is a specification of what we mean when we ask "what's the size of this +// Arrow array?". +#[test] +#[allow(clippy::from_iter_instead_of_collect)] +fn test_arrow_estimated_size_bytes() { + use arrow2::{ + array::{Array, Float64Array, ListArray, StructArray, UInt64Array, Utf8Array}, + compute::aggregate::estimated_bytes_size, + datatypes::{DataType, Field}, + offset::Offsets, + }; + + // simple primitive array + { + let data = vec![42u64; 100]; + let array = UInt64Array::from_vec(data.clone()).boxed(); + assert_eq!( + std::mem::size_of_val(data.as_slice()), + estimated_bytes_size(&*array) + ); + } + + // utf8 strings array + { + let data = vec![Some("some very, very, very long string indeed"); 100]; + let array = Utf8Array::::from(data.clone()).to_boxed(); + + let raw_size_bytes = data + .iter() + // headers + bodies! + .map(|s| std::mem::size_of_val(s) + std::mem::size_of_val(s.unwrap().as_bytes())) + .sum::(); + let arrow_size_bytes = estimated_bytes_size(&*array); + + assert_eq!(5600, raw_size_bytes); + assert_eq!(4404, arrow_size_bytes); // smaller because validity bitmaps instead of opts + } + + // simple primitive list array + { + let data = std::iter::repeat(vec![42u64; 100]) + .take(50) + .collect::>(); + let array = { + let array_flattened = + UInt64Array::from_vec(data.clone().into_iter().flatten().collect()).boxed(); + + ListArray::::new( + ListArray::::default_datatype(DataType::UInt64), + Offsets::try_from_lengths(std::iter::repeat(50).take(50)) + .unwrap() + .into(), + array_flattened, + None, + ) + .boxed() + }; + + let raw_size_bytes = data + .iter() + // headers + bodies! + .map(|s| std::mem::size_of_val(s) + std::mem::size_of_val(s.as_slice())) + .sum::(); + let arrow_size_bytes = estimated_bytes_size(&*array); + + assert_eq!(41200, raw_size_bytes); + assert_eq!(40200, arrow_size_bytes); // smaller because smaller inner headers + } + + // compound type array + { + #[derive(Clone, Copy)] + struct Point { + x: f64, + y: f64, + } + + impl Default for Point { + fn default() -> Self { + Self { x: 42.0, y: 666.0 } + } + } + + let data = vec![Point::default(); 100]; + let array = { + let x = Float64Array::from_vec(data.iter().map(|p| p.x).collect()).boxed(); + let y = Float64Array::from_vec(data.iter().map(|p| p.y).collect()).boxed(); + let fields = vec![ + Field::new("x", DataType::Float64, false), + Field::new("y", DataType::Float64, false), + ]; + StructArray::new(DataType::Struct(fields), vec![x, y], None).boxed() + }; + + let raw_size_bytes = std::mem::size_of_val(data.as_slice()); + let arrow_size_bytes = estimated_bytes_size(&*array); + + assert_eq!(1600, raw_size_bytes); + assert_eq!(1600, arrow_size_bytes); + } + + // compound type list array + { + #[derive(Clone, Copy)] + struct Point { + x: f64, + y: f64, + } + + impl Default for Point { + fn default() -> Self { + Self { x: 42.0, y: 666.0 } + } + } + + let data = std::iter::repeat(vec![Point::default(); 100]) + .take(50) + .collect::>(); + let array: Box = { + let array = { + let x = + Float64Array::from_vec(data.iter().flatten().map(|p| p.x).collect()).boxed(); + let y = + Float64Array::from_vec(data.iter().flatten().map(|p| p.y).collect()).boxed(); + let fields = vec![ + Field::new("x", DataType::Float64, false), + Field::new("y", DataType::Float64, false), + ]; + StructArray::new(DataType::Struct(fields), vec![x, y], None) + }; + + ListArray::::new( + ListArray::::default_datatype(array.data_type().clone()), + Offsets::try_from_lengths(std::iter::repeat(50).take(50)) + .unwrap() + .into(), + array.boxed(), + None, + ) + .boxed() + }; + + let raw_size_bytes = data + .iter() + // headers + bodies! + .map(|s| std::mem::size_of_val(s) + std::mem::size_of_val(s.as_slice())) + .sum::(); + let arrow_size_bytes = estimated_bytes_size(&*array); + + assert_eq!(81200, raw_size_bytes); + assert_eq!(80200, arrow_size_bytes); // smaller because smaller inner headers + } +} diff --git a/crates/re_log_types/src/data_row.rs b/crates/re_log_types/src/data_row.rs index 96a9c438f8978..3122ad478a539 100644 --- a/crates/re_log_types/src/data_row.rs +++ b/crates/re_log_types/src/data_row.rs @@ -2,7 +2,7 @@ use ahash::HashSetExt; use nohash_hasher::IntSet; use smallvec::SmallVec; -use crate::{ComponentName, DataCell, DataCellError, DataTable, EntityPath, MsgId, TimePoint}; +use crate::{ComponentName, DataCell, DataCellError, DataTable, EntityPath, RowId, TimePoint}; // --- @@ -134,11 +134,11 @@ impl std::ops::IndexMut for DataCellRow { /// /// ```rust /// # use re_log_types::{ -/// # component_types::{ColorRGBA, Label, MsgId, Point2D}, +/// # component_types::{ColorRGBA, Label, RowId, Point2D}, /// # DataRow, Timeline, /// # }; /// # -/// # let row_id = MsgId::ZERO; +/// # let row_id = RowId::ZERO; /// # let timepoint = [ /// # (Timeline::new_sequence("frame_nr"), 42.into()), // /// # (Timeline::new_sequence("clock"), 666.into()), // @@ -163,7 +163,7 @@ pub struct DataRow { /// Auto-generated `TUID`, uniquely identifying this event and keeping track of the client's /// wall-clock. // TODO(#1619): introduce RowId & TableId - pub row_id: MsgId, + pub row_id: RowId, /// User-specified [`TimePoint`] for this event. pub timepoint: TimePoint, @@ -190,7 +190,7 @@ impl DataRow { /// - one or more cell isn't 0, 1 or `num_instances` long, /// - two or more cells share the same component type. pub fn try_from_cells( - row_id: MsgId, + row_id: RowId, timepoint: impl Into, entity_path: impl Into, num_instances: u32, @@ -238,7 +238,7 @@ impl DataRow { // we need to craft an array of `MsgId`s that matches the length of the other components. // TODO(#1619): This goes away once the store supports the new control columns use crate::Component as _; - if !components.contains(&MsgId::name()) { + if !components.contains(&RowId::name()) { let num_instances = this.num_instances(); this.cells.0.push(DataCell::from_native( vec![row_id; num_instances as _].iter(), @@ -256,7 +256,7 @@ impl DataRow { /// /// See [`Self::try_from_cells`] for the fallible alternative. pub fn from_cells( - row_id: MsgId, + row_id: RowId, timepoint: impl Into, entity_path: impl Into, num_instances: u32, @@ -277,7 +277,7 @@ impl DataRow { impl DataRow { #[inline] - pub fn row_id(&self) -> MsgId { + pub fn row_id(&self) -> RowId { self.row_id } @@ -297,7 +297,7 @@ impl DataRow { } #[inline] - pub fn components(&self) -> impl ExactSizeIterator + '_ { + pub fn component_names(&self) -> impl ExactSizeIterator + '_ { self.cells.iter().map(|cell| cell.component_name()) } @@ -332,7 +332,7 @@ impl DataRow { impl DataRow { pub fn from_cells1( - row_id: MsgId, + row_id: RowId, entity_path: impl Into, timepoint: impl Into, num_instances: u32, @@ -351,7 +351,7 @@ impl DataRow { } pub fn try_from_cells1( - row_id: MsgId, + row_id: RowId, entity_path: impl Into, timepoint: impl Into, num_instances: u32, @@ -371,7 +371,7 @@ impl DataRow { } pub fn from_cells2( - row_id: MsgId, + row_id: RowId, entity_path: impl Into, timepoint: impl Into, num_instances: u32, @@ -394,7 +394,7 @@ impl DataRow { } pub fn try_from_cells2( - row_id: MsgId, + row_id: RowId, entity_path: impl Into, timepoint: impl Into, num_instances: u32, @@ -419,7 +419,7 @@ impl DataRow { } pub fn from_cells3( - row_id: MsgId, + row_id: RowId, entity_path: impl Into, timepoint: impl Into, num_instances: u32, @@ -444,7 +444,7 @@ impl DataRow { } pub fn try_from_cells3( - row_id: MsgId, + row_id: RowId, entity_path: impl Into, timepoint: impl Into, num_instances: u32, @@ -502,7 +502,7 @@ mod tests { #[test] fn data_row_error_num_instances() { - let row_id = MsgId::ZERO; + let row_id = RowId::ZERO; let timepoint = TimePoint::timeless(); let num_instances = 2; @@ -549,7 +549,7 @@ mod tests { #[test] fn data_row_error_duped_components() { - let row_id = MsgId::ZERO; + let row_id = RowId::ZERO; let timepoint = TimePoint::timeless(); let points: &[Point2D] = &[[10.0, 10.0].into(), [20.0, 20.0].into()]; diff --git a/crates/re_log_types/src/data_table.rs b/crates/re_log_types/src/data_table.rs index 6183c31f1a869..07dd97d120446 100644 --- a/crates/re_log_types/src/data_table.rs +++ b/crates/re_log_types/src/data_table.rs @@ -4,8 +4,9 @@ use nohash_hasher::{IntMap, IntSet}; use smallvec::SmallVec; use crate::{ - ArrowMsg, ComponentName, DataCell, DataCellError, DataRow, DataRowError, EntityPath, MsgId, - TimePoint, + component_types::{ColorRGBA, Label, Point2D}, + ArrowMsg, ComponentName, DataCell, DataCellError, DataRow, DataRowError, EntityPath, RowId, + TableId, Time, TimePoint, Timeline, }; // --- @@ -34,13 +35,18 @@ pub enum DataTableError { pub type DataTableResult = ::std::result::Result; +// TODO: I'm really thinking we should just flatten the timepoint... as part of 1712 perhaps + +// TODO: fast paths for primitive types, both control & data + // --- -type RowIdVec = SmallVec<[MsgId; 4]>; -type TimePointVec = SmallVec<[TimePoint; 4]>; -type EntityPathVec = SmallVec<[EntityPath; 4]>; -type NumInstancesVec = SmallVec<[u32; 4]>; -type DataCellOptVec = SmallVec<[Option; 4]>; +pub type RowIdVec = SmallVec<[RowId; 4]>; +pub type TimePointVec = SmallVec<[TimePoint; 4]>; +pub type ErasedTimeVec = SmallVec<[i64; 4]>; +pub type EntityPathVec = SmallVec<[EntityPath; 4]>; +pub type NumInstancesVec = SmallVec<[u32; 4]>; +pub type DataCellOptVec = SmallVec<[Option; 4]>; /// A column's worth of [`DataCell`]s: a sparse collection of [`DataCell`]s that share the same /// underlying type and likely point to shared, contiguous memory. @@ -58,6 +64,8 @@ impl std::ops::Deref for DataCellColumn { } } +// TODO(cmc): Those Deref don't actually do their job most of the time for some reason... + impl std::ops::DerefMut for DataCellColumn { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { @@ -81,6 +89,13 @@ impl std::ops::IndexMut for DataCellColumn { } } +impl DataCellColumn { + #[inline] + pub fn empty(num_rows: usize) -> Self { + Self(smallvec::smallvec![None; num_rows]) + } +} + // --- /// A sparse table's worth of data, i.e. a batch of events: a collection of [`DataRow`]s. @@ -121,18 +136,18 @@ impl std::ops::IndexMut for DataCellColumn { /// let points: &[Point2D] = &[[10.0, 10.0].into(), [20.0, 20.0].into()]; /// let colors: &[_] = &[ColorRGBA::from_rgb(128, 128, 128)]; /// let labels: &[Label] = &[]; -/// DataRow::from_cells3(MsgId::random(), "a", timepoint(1, 1), num_instances, (points, colors, labels)) +/// DataRow::from_cells3(RowId::random(), "a", timepoint(1, 1), num_instances, (points, colors, labels)) /// }; /// let row1 = { /// let num_instances = 0; /// let colors: &[ColorRGBA] = &[]; -/// DataRow::from_cells1(MsgId::random(), "b", timepoint(1, 2), num_instances, colors) +/// DataRow::from_cells1(RowId::random(), "b", timepoint(1, 2), num_instances, colors) /// }; /// let row2 = { /// let num_instances = 1; /// let colors: &[_] = &[ColorRGBA::from_rgb(255, 255, 255)]; /// let labels: &[_] = &[Label("hey".into())]; -/// DataRow::from_cells2(MsgId::random(), "c", timepoint(2, 1), num_instances, (colors, labels)) +/// DataRow::from_cells2(RowId::random(), "c", timepoint(2, 1), num_instances, (colors, labels)) /// }; /// let table = DataTable::from_rows(table_id, [row0, row1, row2]); /// ``` @@ -157,11 +172,11 @@ impl std::ops::IndexMut for DataCellColumn { /// /// ```rust /// # use re_log_types::{ -/// # component_types::{ColorRGBA, Label, MsgId, Point2D}, +/// # component_types::{ColorRGBA, Label, Point2D, RowId, TableId}, /// # DataRow, DataTable, Timeline, TimePoint, /// # }; /// # -/// # let table_id = MsgId::ZERO; // not used (yet) +/// # let table_id = TableId::ZERO; // not used (yet) /// # /// # let timepoint = |frame_nr: i64, clock: i64| { /// # TimePoint::from([ @@ -177,7 +192,7 @@ impl std::ops::IndexMut for DataCellColumn { /// let labels: &[Label] = &[]; /// /// DataRow::from_cells3( -/// MsgId::random(), +/// RowId::random(), /// "a", /// timepoint(1, 1), /// num_instances, @@ -189,7 +204,7 @@ impl std::ops::IndexMut for DataCellColumn { /// let num_instances = 0; /// let colors: &[ColorRGBA] = &[]; /// -/// DataRow::from_cells1(MsgId::random(), "b", timepoint(1, 2), num_instances, colors) +/// DataRow::from_cells1(RowId::random(), "b", timepoint(1, 2), num_instances, colors) /// }; /// /// let row2 = { @@ -198,7 +213,7 @@ impl std::ops::IndexMut for DataCellColumn { /// let labels: &[_] = &[Label("hey".into())]; /// /// DataRow::from_cells2( -/// MsgId::random(), +/// RowId::random(), /// "c", /// timepoint(2, 1), /// num_instances, @@ -224,19 +239,25 @@ pub struct DataTable { /// Auto-generated `TUID`, uniquely identifying this batch of data and keeping track of the /// client's wall-clock. // TODO(#1619): use once batching lands - pub table_id: MsgId, + pub table_id: TableId, /// The entire column of `RowId`s. - pub row_id: RowIdVec, + /// + /// Keeps track of the unique identifier for each row that was generated by the clients. + pub col_row_id: RowIdVec, /// The entire column of [`TimePoint`]s. - pub timepoint: TimePointVec, + pub col_timepoint: TimePointVec, /// The entire column of [`EntityPath`]s. - pub entity_path: EntityPathVec, + /// + /// The entity each row relates to, respectively. + pub col_entity_path: EntityPathVec, /// The entire column of `num_instances`. - pub num_instances: NumInstancesVec, + /// + /// Keeps track of the expected number of instances in each row. + pub col_num_instances: NumInstancesVec, /// All the rows for all the component columns. /// @@ -247,19 +268,19 @@ pub struct DataTable { impl DataTable { /// Creates a new empty table with the given ID. - pub fn new(table_id: MsgId) -> Self { + pub fn new(table_id: TableId) -> Self { Self { table_id, - row_id: Default::default(), - timepoint: Default::default(), - entity_path: Default::default(), - num_instances: Default::default(), + col_row_id: Default::default(), + col_timepoint: Default::default(), + col_entity_path: Default::default(), + col_num_instances: Default::default(), columns: Default::default(), } } /// Builds a new `DataTable` from an iterable of [`DataRow`]s. - pub fn from_rows(table_id: MsgId, rows: impl IntoIterator) -> Self { + pub fn from_rows(table_id: TableId, rows: impl IntoIterator) -> Self { crate::profile_function!(); let rows = rows.into_iter(); @@ -267,7 +288,7 @@ impl DataTable { // Explode all rows into columns, and keep track of which components are involved. let mut components = IntSet::default(); #[allow(clippy::type_complexity)] - let (row_id, timepoint, entity_path, num_instances, column): ( + let (col_row_id, col_timepoint, col_entity_path, col_num_instances, column): ( RowIdVec, TimePointVec, EntityPathVec, @@ -275,7 +296,7 @@ impl DataTable { Vec<_>, ) = rows .map(|row| { - components.extend(row.components()); + components.extend(row.component_names()); let DataRow { row_id, timepoint, @@ -305,7 +326,7 @@ impl DataTable { } } - if row_id.len() > 1 { + if col_row_id.len() > 1 { re_log::warn_once!( "batching features are not ready for use, use single-row data tables instead!" ); @@ -313,10 +334,10 @@ impl DataTable { Self { table_id, - row_id, - timepoint, - entity_path, - num_instances, + col_row_id, + col_timepoint, + col_entity_path, + col_num_instances, columns, } } @@ -325,7 +346,7 @@ impl DataTable { impl DataTable { #[inline] pub fn num_rows(&self) -> u32 { - self.row_id.len() as _ + self.col_row_id.len() as _ } #[inline] @@ -334,10 +355,10 @@ impl DataTable { let Self { table_id: _, - row_id, - timepoint, - entity_path, - num_instances, + col_row_id, + col_timepoint, + col_entity_path, + col_num_instances, columns, } = self; @@ -347,20 +368,66 @@ impl DataTable { .filter_map(|rows| rows[i].clone() /* shallow */); DataRow::from_cells( - row_id[i], - timepoint[i].clone(), - entity_path[i].clone(), - num_instances[i], + col_row_id[i], + col_timepoint[i].clone(), + col_entity_path[i].clone(), + col_num_instances[i], cells, ) }) } + #[inline] + pub fn into_rows(self) -> impl Iterator { + let Self { + table_id: _, + col_row_id, + col_timepoint, + col_entity_path, + col_num_instances, + columns, + } = self; + + use itertools::Itertools as _; + + let col_row_id = col_row_id.into_iter(); + let col_timepoint = col_timepoint.into_iter(); + let col_entity_path = col_entity_path.into_iter(); + let col_num_instances = col_num_instances.into_iter(); + + let mut columns = columns + .into_values() + .map(|column| column.0.into_iter()) + .collect_vec(); + let rows = std::iter::from_fn(move || { + let mut next = Vec::with_capacity(columns.len()); + for column in &mut columns { + if let Some(cell) = column.next()? { + next.push(cell); + } + } + + Some(next) + }); + + let control = itertools::izip!( + col_row_id, + col_timepoint, + col_entity_path, + col_num_instances, + rows, + ); + + control.map(|(row_id, timepoint, entity_path, num_instances, cells)| { + DataRow::from_cells(row_id, timepoint, entity_path, num_instances, cells) + }) + } + /// Computes the maximum value for each and every timeline present across this entire table, /// and returns the corresponding [`TimePoint`]. #[inline] pub fn timepoint_max(&self) -> TimePoint { - self.timepoint + self.col_timepoint .iter() .fold(TimePoint::timeless(), |acc, tp| acc.union_max(tp)) } @@ -382,6 +449,8 @@ use arrow2_convert::{ // TODO(#1696): Those names should come from the datatypes themselves. +// TODO: We need a fast path for primitive data + pub const COLUMN_ROW_ID: &str = "rerun.row_id"; pub const COLUMN_TIMEPOINT: &str = "rerun.timepoint"; pub const COLUMN_ENTITY_PATH: &str = "rerun.entity_path"; @@ -434,74 +503,36 @@ impl DataTable { fn serialize_control_columns(&self) -> DataTableResult<(Schema, Vec>)> { crate::profile_function!(); - /// Serializes an iterable of dense arrow-like data. - fn serialize_dense_column + 'static>( - name: &str, - values: &[C], - ) -> DataTableResult<(Field, Box)> { - let data: Box = values.try_into_arrow()?; - // let data = unit_values_to_unit_lists(data); - - let mut field = Field::new(name, data.data_type().clone(), false).with_metadata( - [(METADATA_KIND.to_owned(), METADATA_KIND_CONTROL.to_owned())].into(), - ); - - // TODO(cmc): why do we have to do this manually on the way out, but it's done - // automatically on our behalf on the way in...? - if let DataType::Extension(name, _, _) = data.data_type() { - field - .metadata - .extend([("ARROW:extension:name".to_owned(), name.clone())]); - } - - Ok((field, data)) - } - - /// Transforms an array of unit values into a list of unit arrays. - /// - /// * Before: `[C, C, C, C, C, ...]` - /// * After: `ListArray[ [C], [C], [C], [C], [C], ... ]` - // NOTE: keeping that one around, just in case. - #[allow(dead_code)] - fn unit_values_to_unit_lists(array: Box) -> Box { - let datatype = array.data_type().clone(); - let datatype = ListArray::::default_datatype(datatype); - let offsets = Offsets::try_from_lengths(std::iter::repeat(1).take(array.len())) - .unwrap() - .into(); - let validity = None; - ListArray::::new(datatype, offsets, array, validity).boxed() - } - let Self { table_id, - row_id, - timepoint, - entity_path, - num_instances, + col_row_id, + col_timepoint, + col_entity_path, + col_num_instances, columns: _, } = self; let mut schema = Schema::default(); let mut columns = Vec::new(); - let (row_id_field, row_id_column) = serialize_dense_column(COLUMN_ROW_ID, row_id)?; + let (row_id_field, row_id_column) = + Self::serialize_control_column(COLUMN_ROW_ID, col_row_id)?; schema.fields.push(row_id_field); columns.push(row_id_column); let (timepoint_field, timepoint_column) = - serialize_dense_column(COLUMN_TIMEPOINT, timepoint)?; + Self::serialize_control_column(COLUMN_TIMEPOINT, col_timepoint)?; schema.fields.push(timepoint_field); columns.push(timepoint_column); let (entity_path_field, entity_path_column) = - serialize_dense_column(COLUMN_ENTITY_PATH, entity_path)?; + Self::serialize_control_column(COLUMN_ENTITY_PATH, col_entity_path)?; schema.fields.push(entity_path_field); columns.push(entity_path_column); // TODO(#1712): This is unnecessarily slow... let (num_instances_field, num_instances_column) = - serialize_dense_column(COLUMN_NUM_INSTANCES, num_instances)?; + Self::serialize_control_column(COLUMN_NUM_INSTANCES, col_num_instances)?; schema.fields.push(num_instances_field); columns.push(num_instances_column); @@ -510,6 +541,44 @@ impl DataTable { Ok((schema, columns)) } + /// Serializes a single control column: an iterable of dense arrow-like data. + pub fn serialize_control_column + 'static>( + name: &str, + values: &[C], + ) -> DataTableResult<(Field, Box)> { + /// Transforms an array of unit values into a list of unit arrays. + /// + /// * Before: `[C, C, C, C, C, ...]` + /// * After: `ListArray[ [C], [C], [C], [C], [C], ... ]` + // NOTE: keeping that one around, just in case. + #[allow(dead_code)] + fn unit_values_to_unit_lists(array: Box) -> Box { + let datatype = array.data_type().clone(); + let datatype = ListArray::::default_datatype(datatype); + let offsets = Offsets::try_from_lengths(std::iter::repeat(1).take(array.len())) + .unwrap() + .into(); + let validity = None; + ListArray::::new(datatype, offsets, array, validity).boxed() + } + + let data: Box = values.try_into_arrow()?; + // let data = unit_values_to_unit_lists(data); + + let mut field = Field::new(name, data.data_type().clone(), false) + .with_metadata([(METADATA_KIND.to_owned(), METADATA_KIND_CONTROL.to_owned())].into()); + + // TODO(cmc): why do we have to do this manually on the way out, but it's done + // automatically on our behalf on the way in...? + if let DataType::Extension(name, _, _) = data.data_type() { + field + .metadata + .extend([("ARROW:extension:name".to_owned(), name.clone())]); + } + + Ok((field, data)) + } + /// Serializes all data columns into an arrow payload and schema. /// /// They are optional, potentially sparse, and never deserialized on the server-side (not by @@ -519,49 +588,30 @@ impl DataTable { let Self { table_id: _, - row_id: _, - timepoint: _, - entity_path: _, - num_instances: _, + col_row_id: _, + col_timepoint: _, + col_entity_path: _, + col_num_instances: _, columns: table, } = self; let mut schema = Schema::default(); let mut columns = Vec::new(); - fn serialize_sparse_column( - name: &str, - column: &[Option], - ) -> DataTableResult<(Field, Box)> { - // TODO(cmc): All we're doing here is allocating and filling a nice contiguous array so - // our `ListArray`s can compute their indices and for the serializer to work with... - // In a far enough future, we could imagine having much finer grain control over the - // serializer and doing all of this at once, bypassing all the mem copies and - // allocations. - - let cell_refs = column - .iter() - .flatten() - .map(|cell| cell.as_arrow_ref()) - .collect_vec(); - - // NOTE: Avoid paying for the cost of the concatenation machinery if there's a single - // row in the column. - let data = if cell_refs.len() == 1 { - data_to_lists(column, cell_refs[0].to_boxed()) - } else { - // NOTE: This is a column of cells, it shouldn't ever fail to concatenate since - // they share the same underlying type. - let data = arrow2::compute::concatenate::concatenate(cell_refs.as_slice())?; - data_to_lists(column, data) - }; - - let field = Field::new(name, data.data_type().clone(), false) - .with_metadata([(METADATA_KIND.to_owned(), METADATA_KIND_DATA.to_owned())].into()); - - Ok((field, data)) + for (component, rows) in table { + let (field, column) = Self::serialize_data_column(component.as_str(), rows)?; + schema.fields.push(field); + columns.push(column); } + Ok((schema, columns)) + } + + /// Serializes a single data column. + pub fn serialize_data_column( + name: &str, + column: &[Option], + ) -> DataTableResult<(Field, Box)> { /// Create a list-array out of a flattened array of cell values. /// /// * Before: `[C, C, C, C, C, C, C, ...]` @@ -584,20 +634,40 @@ impl DataTable { ListArray::::new(datatype, offsets, data, validity.into()).boxed() } - for (component, rows) in table { - let (field, column) = serialize_sparse_column(component.as_str(), rows)?; - schema.fields.push(field); - columns.push(column); - } + // TODO(cmc): All we're doing here is allocating and filling a nice contiguous array so + // our `ListArray`s can compute their indices and for the serializer to work with... + // In a far enough future, we could imagine having much finer grain control over the + // serializer and doing all of this at once, bypassing all the mem copies and + // allocations. - Ok((schema, columns)) + let cell_refs = column + .iter() + .flatten() + .map(|cell| cell.as_arrow_ref()) + .collect_vec(); + + // NOTE: Avoid paying for the cost of the concatenation machinery if there's a single + // row in the column. + let data = if cell_refs.len() == 1 { + data_to_lists(column, cell_refs[0].to_boxed()) + } else { + // NOTE: This is a column of cells, it shouldn't ever fail to concatenate since + // they share the same underlying type. + let data = arrow2::compute::concatenate::concatenate(cell_refs.as_slice())?; + data_to_lists(column, data) + }; + + let field = Field::new(name, data.data_type().clone(), false) + .with_metadata([(METADATA_KIND.to_owned(), METADATA_KIND_DATA.to_owned())].into()); + + Ok((field, data)) } } impl DataTable { /// Deserializes an entire table from an arrow payload and schema. pub fn deserialize( - table_id: MsgId, + table_id: TableId, schema: &Schema, chunk: &Chunk>, ) -> DataTableResult { @@ -621,14 +691,14 @@ impl DataTable { }; // NOTE: the unwrappings cannot fail since control_index() makes sure the index is valid - let row_id = + let col_row_id = (&**chunk.get(control_index(COLUMN_ROW_ID)?).unwrap()).try_into_collection()?; - let timepoint = + let col_timepoint = (&**chunk.get(control_index(COLUMN_TIMEPOINT)?).unwrap()).try_into_collection()?; - let entity_path = + let col_entity_path = (&**chunk.get(control_index(COLUMN_ENTITY_PATH)?).unwrap()).try_into_collection()?; // TODO(#1712): This is unnecessarily slow... - let num_instances = + let col_num_instances = (&**chunk.get(control_index(COLUMN_NUM_INSTANCES)?).unwrap()).try_into_collection()?; let columns: DataTableResult<_> = schema @@ -655,10 +725,10 @@ impl DataTable { Ok(Self { table_id, - row_id, - timepoint, - entity_path, - num_instances, + col_row_id, + col_timepoint, + col_entity_path, + col_num_instances, columns, }) } @@ -731,3 +801,62 @@ impl std::fmt::Display for DataTable { .fmt(f) } } + +// --- + +#[cfg(not(target_arch = "wasm32"))] +impl DataTable { + /// Crafts a simple but interesting `DataTable`. + pub fn example(timeless: bool) -> Self { + let table_id = TableId::random(); + + let timepoint = |frame_nr: i64| { + if timeless { + TimePoint::timeless() + } else { + TimePoint::from([ + (Timeline::new_temporal("log_time"), Time::now().into()), + (Timeline::new_sequence("frame_nr"), frame_nr.into()), + ]) + } + }; + + let row0 = { + let num_instances = 2; + let points: &[Point2D] = &[[10.0, 10.0].into(), [20.0, 20.0].into()]; + let colors: &[_] = &[ColorRGBA::from_rgb(128, 128, 128)]; + let labels: &[Label] = &[]; + + DataRow::from_cells3( + RowId::random(), + "a", + timepoint(1), + num_instances, + (points, colors, labels), + ) + }; + + let row1 = { + let num_instances = 0; + let colors: &[ColorRGBA] = &[]; + + DataRow::from_cells1(RowId::random(), "b", timepoint(1), num_instances, colors) + }; + + let row2 = { + let num_instances = 1; + let colors: &[_] = &[ColorRGBA::from_rgb(255, 255, 255)]; + let labels: &[_] = &[Label("hey".into())]; + + DataRow::from_cells2( + RowId::random(), + "c", + timepoint(2), + num_instances, + (colors, labels), + ) + }; + + DataTable::from_rows(table_id, [row0, row1, row2]) + } +} diff --git a/crates/re_log_types/src/lib.rs b/crates/re_log_types/src/lib.rs index 61b0aa817f671..8d7bc6b2c8a27 100644 --- a/crates/re_log_types/src/lib.rs +++ b/crates/re_log_types/src/lib.rs @@ -42,13 +42,18 @@ pub use self::component_types::context; pub use self::component_types::coordinates; pub use self::component_types::AnnotationContext; pub use self::component_types::Arrow3D; -pub use self::component_types::MsgId; pub use self::component_types::ViewCoordinates; pub use self::component_types::{EncodedMesh3D, Mesh3D, MeshFormat, MeshId, RawMesh3D}; +pub use self::component_types::{MsgId, RowId, TableId}; pub use self::data::*; pub use self::data_cell::{DataCell, DataCellError, DataCellResult}; pub use self::data_row::{DataRow, DataRowError, DataRowResult}; -pub use self::data_table::{DataTable, DataTableError, DataTableResult}; +pub use self::data_table::{ + DataCellColumn, DataCellOptVec, DataTable, DataTableError, DataTableResult, EntityPathVec, + ErasedTimeVec, NumInstancesVec, RowIdVec, TimePointVec, COLUMN_ENTITY_PATH, + COLUMN_NUM_INSTANCES, COLUMN_ROW_ID, COLUMN_TIMEPOINT, METADATA_KIND, METADATA_KIND_CONTROL, + METADATA_KIND_DATA, +}; pub use self::index::*; pub use self::path::*; pub use self::time::{Duration, Time}; diff --git a/crates/re_query/src/query.rs b/crates/re_query/src/query.rs index f52130654907a..ba9a538a82006 100644 --- a/crates/re_query/src/query.rs +++ b/crates/re_query/src/query.rs @@ -49,16 +49,17 @@ pub fn get_component_with_instances( ) -> crate::Result { let components = [InstanceKey::name(), component]; - let row_indices = store + let mut cells = store .latest_at(query, ent_path, component, &components) .ok_or(QueryError::PrimaryNotFound)?; - let mut results = store.get(&components, &row_indices); - Ok(ComponentWithInstances { name: component, - instance_keys: results[0].take(), - values: results[1].take().ok_or(QueryError::PrimaryNotFound)?, + instance_keys: cells[0].take().map(|cell| cell.into_arrow()), + values: cells[1] + .take() + .map(|cell| cell.into_arrow()) + .ok_or(QueryError::PrimaryNotFound)?, }) } diff --git a/crates/re_query/src/range.rs b/crates/re_query/src/range.rs index 41d59a81c974f..8eb8496814b67 100644 --- a/crates/re_query/src/range.rs +++ b/crates/re_query/src/range.rs @@ -4,6 +4,8 @@ use re_log_types::{Component, ComponentName, EntityPath}; use crate::{get_component_with_instances, ComponentWithInstances, EntityView}; +// TODO: propagate datacells all the way through re_query + // --- /// Iterates over the rows of any number of components and their respective cluster keys, all from @@ -93,27 +95,21 @@ pub fn range_entity_with_primary<'a, Primary: Component + 'a, const N: usize>( .chain( store .range(query, ent_path, components) - .map(move |(time, _, row_indices)| { - let results = store.get(&components, &row_indices); - let instance_keys = results[cluster_col].clone(); // shallow - let cwis = results + .map(move |(time, _, mut cells)| { + let instance_keys = cells[cluster_col].take().map(|cell| cell.into_arrow()); + let is_primary = cells[primary_col].is_some(); + let cwis = cells .into_iter() .enumerate() - .map(|(i, res)| { - res.map(|res| { - ComponentWithInstances { - name: components[i], - instance_keys: instance_keys.clone(), // shallow - values: res.clone(), // shallow - } + .map(|(i, cell)| { + cell.map(|cell| ComponentWithInstances { + name: components[i], + instance_keys: instance_keys.clone(), /* shallow */ + values: cell.into_arrow(), }) }) .collect::>(); - ( - time, - row_indices[primary_col].is_some(), // is_primary - cwis, - ) + (time, is_primary, cwis) }), ) .filter_map(move |(time, is_primary, cwis)| { diff --git a/crates/re_viewer/src/ui/memory_panel.rs b/crates/re_viewer/src/ui/memory_panel.rs index 98bd19dd1c8bd..bff514d5f7b84 100644 --- a/crates/re_viewer/src/ui/memory_panel.rs +++ b/crates/re_viewer/src/ui/memory_panel.rs @@ -26,9 +26,7 @@ impl MemoryPanel { (gpu_resource_stats.total_buffer_size_in_bytes + gpu_resource_stats.total_texture_size_in_bytes) as _, ), - Some( - (store_stats.total_index_size_bytes + store_stats.total_component_size_bytes) as _, - ), + Some(store_stats.total_index_size_bytes as _), ); } @@ -198,22 +196,9 @@ impl MemoryPanel { ui.label(re_format::format_number(num_rows as _)) } }; - let label_size = |ui: &mut egui::Ui, size| { - if size == u64::MAX { - ui.label("+∞") - } else { - ui.label(re_format::format_bytes(size as _)) - } - }; ui.label("Indices:"); - label_rows(ui, config.index_bucket_nb_rows); - label_size(ui, config.index_bucket_size_bytes); - ui.end_row(); - - ui.label("Components:"); - label_rows(ui, config.component_bucket_nb_rows); - label_size(ui, config.component_bucket_size_bytes); + label_rows(ui, config.indexed_bucket_num_rows); ui.end_row(); }); @@ -225,18 +210,11 @@ impl MemoryPanel { let DataStoreStats { total_timeless_index_rows, total_timeless_index_size_bytes, - total_timeless_component_rows, - total_timeless_component_size_bytes, total_temporal_index_rows, total_temporal_index_size_bytes, total_temporal_index_buckets, - total_temporal_component_rows, - total_temporal_component_size_bytes, - total_temporal_component_buckets, total_index_rows, total_index_size_bytes, - total_component_rows, - total_component_size_bytes, config: _, } = *store_stats; @@ -271,24 +249,6 @@ impl MemoryPanel { label_rows(ui, total_index_rows); label_size(ui, total_index_size_bytes); ui.end_row(); - - ui.label("Components (timeless):"); - ui.label(""); - label_rows(ui, total_timeless_component_rows); - label_size(ui, total_timeless_component_size_bytes); - ui.end_row(); - - ui.label("Components (temporal):"); - label_buckets(ui, total_temporal_component_buckets); - label_rows(ui, total_temporal_component_rows); - label_size(ui, total_temporal_component_size_bytes); - ui.end_row(); - - ui.label("Components (total):"); - label_buckets(ui, total_temporal_component_buckets); - label_rows(ui, total_component_rows); - label_size(ui, total_component_size_bytes); - ui.end_row(); }); }