Skip to content

Commit 783c645

Browse files
committed
chore(cubestore): Upgrade DF: Reimplement and use PreSerializedPlan::remove_unused_tables
1 parent 7956b2a commit 783c645

File tree

3 files changed

+425
-392
lines changed

3 files changed

+425
-392
lines changed

rust/cubestore/cubestore/src/queryplanner/query_executor.rs

+5-5
Original file line numberDiff line numberDiff line change
@@ -1504,21 +1504,21 @@ impl ClusterSendExec {
15041504
}
15051505
}
15061506

1507-
pub fn worker_plans(&self) -> Vec<(String, PreSerializedPlan)> {
1507+
pub fn worker_plans(&self) -> Result<Vec<(String, PreSerializedPlan)>, CubeError> {
15081508
let mut res = Vec::new();
15091509
for (node_name, partitions) in self.partitions.iter() {
15101510
res.push((
15111511
node_name.clone(),
1512-
self.serialized_plan_for_partitions(partitions),
1512+
self.serialized_plan_for_partitions(partitions)?,
15131513
));
15141514
}
1515-
res
1515+
Ok(res)
15161516
}
15171517

15181518
fn serialized_plan_for_partitions(
15191519
&self,
15201520
partitions: &(Vec<(u64, RowRange)>, Vec<InlineTableId>),
1521-
) -> PreSerializedPlan {
1521+
) -> Result<PreSerializedPlan, CubeError> {
15221522
let (partitions, inline_table_ids) = partitions;
15231523
let mut ps = HashMap::<_, RowFilter>::new();
15241524
for (id, range) in partitions {
@@ -1577,7 +1577,7 @@ impl ExecutionPlan for ClusterSendExec {
15771577
) -> Result<SendableRecordBatchStream, DataFusionError> {
15781578
let (node_name, partitions) = &self.partitions[partition];
15791579

1580-
let plan = self.serialized_plan_for_partitions(partitions);
1580+
let plan = self.serialized_plan_for_partitions(partitions)?;
15811581

15821582
let cluster = self.cluster.clone();
15831583
let schema = self.properties.eq_properties.schema().clone();

0 commit comments

Comments
 (0)