Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions kernel/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,8 @@ tokio = { version = "1.47", optional = true, features = ["rt-multi-thread"] }
# both arrow versions below are optional and require object_store
object_store = { version = "0.12.3", optional = true, features = ["aws", "azure", "gcp", "http"] }
comfy-table = { version = "7.1", optional = true }
# used for Float trait in stats computation
num-traits = { version = "0.2", optional = true }

# arrow 56
[dependencies.arrow_56]
Expand Down Expand Up @@ -118,6 +120,7 @@ default-engine-base = [
"arrow-expression",
"futures",
"need-arrow",
"num-traits",
"tokio",
]
# the default-engine-native-tls use the reqwest crate with default features which uses native-tls. if you want
Expand Down
4 changes: 2 additions & 2 deletions kernel/examples/write-table/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -94,9 +94,9 @@ async fn try_main() -> DeltaResult<()> {
.with_data_change(true);

// Write the data using the engine
let write_context = Arc::new(txn.get_write_context());
let write_context = txn.get_write_context()?;
let file_metadata = engine
.write_parquet(&sample_data, write_context.as_ref(), HashMap::new())
.write_parquet(&sample_data, &write_context, HashMap::new())
.await?;

// Add the file metadata to the transaction
Expand Down
8 changes: 7 additions & 1 deletion kernel/src/engine/default/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ pub mod file_stream;
pub mod filesystem;
pub mod json;
pub mod parquet;
pub mod stats;
pub mod storage;

/// Converts a Stream-producing future to a synchronous iterator.
Expand Down Expand Up @@ -216,7 +217,12 @@ impl<E: TaskExecutor> DefaultEngine<E> {
)?;
let physical_data = logical_to_physical_expr.evaluate(data)?;
self.parquet
.write_parquet_file(write_context.target_dir(), physical_data, partition_values)
.write_parquet_file(
write_context.target_dir(),
physical_data,
partition_values,
write_context.stats_columns(),
)
.await
}
}
Expand Down
103 changes: 82 additions & 21 deletions kernel/src/engine/default/parquet.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
use delta_kernel_derive::internal_api;

use crate::arrow::array::builder::{MapBuilder, MapFieldNames, StringBuilder};
use crate::arrow::array::{Int64Array, RecordBatch, StringArray, StructArray};
use crate::arrow::array::{Array, Int64Array, RecordBatch, StringArray, StructArray};
use crate::arrow::datatypes::{DataType, Field};
use crate::parquet::arrow::arrow_reader::{
ArrowReaderMetadata, ArrowReaderOptions, ParquetRecordBatchReaderBuilder,
Expand All @@ -23,9 +23,10 @@
use uuid::Uuid;

use super::file_stream::{FileOpenFuture, FileOpener, FileStream};
use super::stats::StatisticsCollector;
use super::UrlExt;
use crate::engine::arrow_conversion::{TryFromArrow as _, TryIntoArrow as _};
use crate::engine::arrow_data::ArrowEngineData;
use crate::engine::arrow_data::{extract_record_batch, ArrowEngineData};
use crate::engine::arrow_utils::{
fixup_parquet_read, generate_mask, get_requested_indices, ordering_needs_row_indexes,
RowIndexBuilder,
Expand Down Expand Up @@ -54,16 +55,25 @@
file_meta: FileMeta,
// NB: We use usize instead of u64 since arrow uses usize for record batch sizes
num_records: usize,
/// Collected statistics for this file (optional).
stats: Option<StructArray>,
}

impl DataFileMetadata {
pub fn new(file_meta: FileMeta, num_records: usize) -> Self {
Self {
file_meta,
num_records,
stats: None,
}
}

/// Set the collected statistics for this file.
pub fn with_stats(mut self, stats: StructArray) -> Self {
self.stats = Some(stats);
self
}

/// Convert DataFileMetadata into a record batch which matches the schema returned by
/// [`add_files_schema`].
///
Expand All @@ -81,6 +91,7 @@
size,
},
num_records,
stats,
} = self;
// create the record batch of the write metadata
let path = Arc::new(StringArray::from(vec![location.to_string()]));
Expand All @@ -104,20 +115,53 @@
.map_err(|_| Error::generic("Failed to convert parquet metadata 'size' to i64"))?;
let size = Arc::new(Int64Array::from(vec![size]));
let modification_time = Arc::new(Int64Array::from(vec![*last_modified]));
let stats = Arc::new(StructArray::try_new_with_length(
vec![Field::new("numRecords", DataType::Int64, true)].into(),
vec![Arc::new(Int64Array::from(vec![*num_records as i64]))],
None,
1,
)?);

Ok(Box::new(ArrowEngineData::new(RecordBatch::try_new(
Arc::new(
crate::transaction::BASE_ADD_FILES_SCHEMA
.as_ref()
.try_into_arrow()?,
// Use full stats if available, otherwise just numRecords
let stats_array: Arc<StructArray> = if let Some(full_stats) = stats {
Arc::new(full_stats.clone())
} else {
Arc::new(StructArray::try_new_with_length(
vec![Field::new("numRecords", DataType::Int64, true)].into(),
vec![Arc::new(Int64Array::from(vec![*num_records as i64]))],
None,
1,
)?)
};

// Build schema dynamically based on stats
let stats_field = Field::new("stats", stats_array.data_type().clone(), true);
let schema = crate::arrow::datatypes::Schema::new(vec![
Field::new("path", crate::arrow::datatypes::DataType::Utf8, false),
Field::new(
"partitionValues",
crate::arrow::datatypes::DataType::Map(
Arc::new(Field::new(
"key_value",
crate::arrow::datatypes::DataType::Struct(
vec![
Field::new("key", crate::arrow::datatypes::DataType::Utf8, false),
Field::new("value", crate::arrow::datatypes::DataType::Utf8, true),
]
.into(),
),
false,
)),
false,
),
false,
),
Field::new("size", crate::arrow::datatypes::DataType::Int64, false),
Field::new(
"modificationTime",
crate::arrow::datatypes::DataType::Int64,
false,
),
vec![path, partitions, size, modification_time, stats],
stats_field,
]);

Ok(Box::new(ArrowEngineData::new(RecordBatch::try_new(
Arc::new(schema),
vec![path, partitions, size, modification_time, stats_array],
)?)))
}
}
Expand Down Expand Up @@ -201,8 +245,22 @@
path: &url::Url,
data: Box<dyn EngineData>,
partition_values: HashMap<String, String>,
stats_columns: &[String],
) -> DeltaResult<Box<dyn EngineData>> {
let parquet_metadata = self.write_parquet(path, data).await?;
// Collect statistics from the data during write
let record_batch = extract_record_batch(data.as_ref())?;

// Initialize stats collector and update with this batch
let mut stats_collector = StatisticsCollector::new(record_batch.schema(), stats_columns);
stats_collector.update(record_batch, None)?; // No mask for new file writes

Check failure on line 255 in kernel/src/engine/default/parquet.rs

View workflow job for this annotation

GitHub Actions / coverage

no method named `update` found for enum `Result<T, E>` in the current scope

Check failure on line 255 in kernel/src/engine/default/parquet.rs

View workflow job for this annotation

GitHub Actions / build (ubuntu-latest)

no method named `update` found for enum `std::result::Result<T, E>` in the current scope

Check failure on line 255 in kernel/src/engine/default/parquet.rs

View workflow job for this annotation

GitHub Actions / test (ubuntu-latest)

no method named `update` found for enum `Result<T, E>` in the current scope

Check failure on line 255 in kernel/src/engine/default/parquet.rs

View workflow job for this annotation

GitHub Actions / arrow_integration_test (ubuntu-latest)

no method named `update` found for enum `Result<T, E>` in the current scope

Check failure on line 255 in kernel/src/engine/default/parquet.rs

View workflow job for this annotation

GitHub Actions / check_if_pr_breaks_semver

no method named `update` found for enum `Result<T, E>` in the current scope

Check failure on line 255 in kernel/src/engine/default/parquet.rs

View workflow job for this annotation

GitHub Actions / docs

no method named `update` found for enum `Result<T, E>` in the current scope

Check failure on line 255 in kernel/src/engine/default/parquet.rs

View workflow job for this annotation

GitHub Actions / run-examples

no method named `update` found for enum `Result<T, E>` in the current scope

Check failure on line 255 in kernel/src/engine/default/parquet.rs

View workflow job for this annotation

GitHub Actions / arrow_integration_test (macOS-latest)

no method named `update` found for enum `Result<T, E>` in the current scope

Check failure on line 255 in kernel/src/engine/default/parquet.rs

View workflow job for this annotation

GitHub Actions / msrv-run-tests

no method named `update` found for enum `Result` in the current scope
let stats = stats_collector.finalize()?;

Check failure on line 256 in kernel/src/engine/default/parquet.rs

View workflow job for this annotation

GitHub Actions / coverage

no method named `finalize` found for enum `Result<T, E>` in the current scope

Check failure on line 256 in kernel/src/engine/default/parquet.rs

View workflow job for this annotation

GitHub Actions / build (ubuntu-latest)

no method named `finalize` found for enum `std::result::Result<T, E>` in the current scope

Check failure on line 256 in kernel/src/engine/default/parquet.rs

View workflow job for this annotation

GitHub Actions / test (ubuntu-latest)

no method named `finalize` found for enum `Result<T, E>` in the current scope

Check failure on line 256 in kernel/src/engine/default/parquet.rs

View workflow job for this annotation

GitHub Actions / arrow_integration_test (ubuntu-latest)

no method named `finalize` found for enum `Result<T, E>` in the current scope

Check failure on line 256 in kernel/src/engine/default/parquet.rs

View workflow job for this annotation

GitHub Actions / check_if_pr_breaks_semver

no method named `finalize` found for enum `Result<T, E>` in the current scope

Check failure on line 256 in kernel/src/engine/default/parquet.rs

View workflow job for this annotation

GitHub Actions / docs

no method named `finalize` found for enum `Result<T, E>` in the current scope

Check failure on line 256 in kernel/src/engine/default/parquet.rs

View workflow job for this annotation

GitHub Actions / run-examples

no method named `finalize` found for enum `Result<T, E>` in the current scope

Check failure on line 256 in kernel/src/engine/default/parquet.rs

View workflow job for this annotation

GitHub Actions / arrow_integration_test (macOS-latest)

no method named `finalize` found for enum `Result<T, E>` in the current scope

Check failure on line 256 in kernel/src/engine/default/parquet.rs

View workflow job for this annotation

GitHub Actions / msrv-run-tests

no method named `finalize` found for enum `Result` in the current scope

// Write the parquet file
let mut parquet_metadata = self.write_parquet(path, data).await?;

// Attach the collected statistics
parquet_metadata = parquet_metadata.with_stats(stats);

parquet_metadata.as_record_batch(&partition_values)
}
}
Expand Down Expand Up @@ -294,6 +352,7 @@
/// - `location` - The full URL path where the Parquet file should be written
/// (e.g., `s3://bucket/path/file.parquet`, `file:///path/to/file.parquet`).
/// - `data` - An iterator of engine data to be written to the Parquet file.
/// - `stats_columns` - Column names for which statistics should be collected.
///
/// # Returns
///
Expand All @@ -302,6 +361,7 @@
&self,
location: url::Url,
mut data: Box<dyn Iterator<Item = DeltaResult<Box<dyn EngineData>>> + Send>,
_stats_columns: &[String],
) -> DeltaResult<()> {
let store = self.store.clone();

Expand Down Expand Up @@ -682,6 +742,7 @@
size,
},
num_records,
..
} = write_metadata;
let expected_location = Url::parse("memory:///data/").unwrap();

Expand Down Expand Up @@ -776,7 +837,7 @@
// Test writing through the trait method
let file_url = Url::parse("memory:///test/data.parquet").unwrap();
parquet_handler
.write_parquet_file(file_url.clone(), data_iter)
.write_parquet_file(file_url.clone(), data_iter, &[])
.unwrap();

// Verify we can read the file back
Expand Down Expand Up @@ -964,7 +1025,7 @@
// Write the data
let file_url = Url::parse("memory:///roundtrip/test.parquet").unwrap();
parquet_handler
.write_parquet_file(file_url.clone(), data_iter)
.write_parquet_file(file_url.clone(), data_iter, &[])
.unwrap();

// Read it back
Expand Down Expand Up @@ -1152,7 +1213,7 @@

// Write the first file
parquet_handler
.write_parquet_file(file_url.clone(), data_iter1)
.write_parquet_file(file_url.clone(), data_iter1, &[])
.unwrap();

// Create second data set with different data
Expand All @@ -1168,7 +1229,7 @@

// Overwrite with second file (overwrite=true)
parquet_handler
.write_parquet_file(file_url.clone(), data_iter2)
.write_parquet_file(file_url.clone(), data_iter2, &[])
.unwrap();

// Read back and verify it contains the second data set
Expand Down Expand Up @@ -1231,7 +1292,7 @@

// Write the first file
parquet_handler
.write_parquet_file(file_url.clone(), data_iter1)
.write_parquet_file(file_url.clone(), data_iter1, &[])
.unwrap();

// Create second data set
Expand All @@ -1247,7 +1308,7 @@

// Write again - should overwrite successfully (new behavior always overwrites)
parquet_handler
.write_parquet_file(file_url.clone(), data_iter2)
.write_parquet_file(file_url.clone(), data_iter2, &[])
.unwrap();

// Verify the file was overwritten with the new data
Expand Down
Loading
Loading