Skip to content

Commit

Permalink
chore(cubestore): Upgrade DF: fix partial aggregate not pushed under …
Browse files Browse the repository at this point in the history
…ClusterSend
  • Loading branch information
paveltiunov committed Nov 29, 2024
1 parent 3f8b2df commit 6316cfd
Show file tree
Hide file tree
Showing 4 changed files with 13 additions and 28 deletions.
2 changes: 1 addition & 1 deletion rust/cubestore/cubestore-sql-tests/src/multiproc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ where
for inputs in worker_inputs {
let (send_done, recv_done) = ipc_channel::ipc::bytes_channel().unwrap();
let args = (send_init.clone(), recv_done, inputs, timeout);
let handle = respawn(args, &[], &[]).unwrap();
let handle = respawn(args, &["--".to_string(), "--nocapture".to_string()], &[]).unwrap();
// Ensure we signal completion to all started workers even if errors occur along the way.
join_workers.push(scopeguard::guard(
(send_done, handle),
Expand Down
8 changes: 7 additions & 1 deletion rust/cubestore/cubestore-sql-tests/tests/cluster.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ use serde_derive::{Deserialize, Serialize};

use cubestore::config::Config;
use cubestore::util::respawn;
use cubestore::util::respawn::register_pushdownable_envs;
use cubestore_sql_tests::multiproc::{
multiproc_child_main, run_multiproc_test, MultiProcTest, SignalInit, WaitCompletion, WorkerProc,
};
Expand All @@ -16,6 +17,7 @@ const WORKER_PORTS: [u16; 2] = [51337, 51338];

#[cfg(not(target_os = "windows"))]
fn main() {
register_pushdownable_envs(&["CUBESTORE_TEST_LOG_WORKER"]);
respawn::register_handler(multiproc_child_main::<ClusterSqlTest>);
respawn::init(); // TODO: logs in worker processes.

Expand Down Expand Up @@ -99,7 +101,11 @@ impl WorkerProc<WorkerArgs> for WorkerFn {
}
Config::test(&test_name)
.update_config(|mut c| {
c.select_worker_pool_size = 2;
c.select_worker_pool_size = if std::env::var("CUBESTORE_TEST_LOG_WORKER").is_ok() {
0
} else {
2
};
c.server_name = format!("localhost:{}", WORKER_PORTS[id]);
c.worker_bind_address = Some(c.server_name.clone());
c.metastore_remote_address = Some(format!("localhost:{}", METASTORE_PORT));
Expand Down
19 changes: 2 additions & 17 deletions rust/cubestore/cubestore/src/queryplanner/planning.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1627,30 +1627,15 @@ impl CubeExtensionPlanner {
}
// Note that MergeExecs are added automatically when needed.
if let Some(c) = self.cluster.as_ref() {
let mut send: Arc<dyn ExecutionPlan> = Arc::new(ClusterSendExec::new(
Ok(Arc::new(ClusterSendExec::new(
schema,
c.clone(),
self.serialized_plan.clone(),
snapshots,
input,
use_streaming,
)?);
// TODO upgrade DF
if send.properties().partitioning.partition_count() != 1 {
send = Arc::new(RepartitionExec::try_new(
send,
Partitioning::UnknownPartitioning(1),
)?);
}
Ok(send)
)?))
} else {
// TODO upgrade DF
if input.output_partitioning().partition_count() != 1 {
input = Arc::new(RepartitionExec::try_new(
input,
Partitioning::UnknownPartitioning(1),
)?);
}
Ok(Arc::new(WorkerExec {
input,
schema,
Expand Down
12 changes: 3 additions & 9 deletions rust/cubestore/cubestore/src/queryplanner/query_executor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -748,12 +748,9 @@ impl CubeTable {
}

let schema = table_projected_schema;
let partition_num = partition_execs
.iter()
.map(|c| c.properties().partitioning.partition_count())
.sum();
let partition_num = partition_execs.len();

let read_data = Arc::new(CubeTableExec {
let read_data: Arc<dyn ExecutionPlan> = Arc::new(CubeTableExec {
schema: schema.clone(),
partition_execs,
index_snapshot: self.index_snapshot.clone(),
Expand Down Expand Up @@ -856,10 +853,7 @@ impl CubeTable {
.collect::<Result<Vec<_>, _>>()?;
Arc::new(SortPreservingMergeExec::new(join_columns, read_data))
} else {
Arc::new(RepartitionExec::try_new(
read_data,
Partitioning::UnknownPartitioning(1),
)?)
read_data
};

Ok(plan)
Expand Down

0 comments on commit 6316cfd

Please sign in to comment.