Skip to content
Merged
Show file tree
Hide file tree
Changes from 19 commits
Commits
Show all changes
127 commits
Select commit Hold shift + click to select a range
435302e
introduce visitors
sebastiantia Mar 12, 2025
e500a10
remove pub
sebastiantia Mar 12, 2025
19733cd
assert! instead of assert_eq with bool
sebastiantia Mar 12, 2025
87c9f31
log replay for checkpoints
sebastiantia Mar 13, 2025
db5ccd0
rename & some clean up
sebastiantia Mar 13, 2025
42c08c1
remove new path for now
sebastiantia Mar 13, 2025
f91baeb
merge non file action visitor tests
sebastiantia Mar 22, 2025
9fdfba7
mvp for refactor
sebastiantia Mar 24, 2025
d420fd1
these github action checks clog my screen
sebastiantia Mar 24, 2025
9e0e048
base file actions struct
sebastiantia Mar 25, 2025
303444b
combine visitors
sebastiantia Mar 25, 2025
5dbc924
fmt
sebastiantia Mar 26, 2025
b793961
remove old code
sebastiantia Mar 26, 2025
508976f
move FileActionKey
sebastiantia Mar 26, 2025
bccaa17
Merge branch 'main' into checkpoint-visitors
sebastiantia Mar 26, 2025
a23d7cb
merge
sebastiantia Mar 26, 2025
0160ef1
fix whitespace
sebastiantia Mar 26, 2025
aae7046
remove old code
sebastiantia Mar 26, 2025
f574370
refactor more
sebastiantia Mar 26, 2025
a618833
refactor
sebastiantia Mar 26, 2025
7da74b2
more docs
sebastiantia Mar 26, 2025
220a216
invert is_log_batch logic
sebastiantia Mar 26, 2025
9d86911
docs
sebastiantia Mar 26, 2025
e5b0e32
docs
sebastiantia Mar 26, 2025
a5393dc
docs and imports
sebastiantia Mar 26, 2025
a23c651
improve mod doc
sebastiantia Mar 27, 2025
d712d18
improve doc
sebastiantia Mar 27, 2025
e564ae1
docs'
sebastiantia Mar 27, 2025
b14ff19
docs
sebastiantia Mar 27, 2025
a52d484
update
sebastiantia Mar 27, 2025
a243a98
nits
sebastiantia Mar 27, 2025
9f06382
Revert "nits"
sebastiantia Mar 28, 2025
58f38c0
nits
sebastiantia Mar 28, 2025
628546c
refactor
sebastiantia Mar 27, 2025
88cf983
move
sebastiantia Mar 27, 2025
10bb7b5
fix rebase
sebastiantia Mar 28, 2025
4b5a3a9
introduce visitors
sebastiantia Mar 12, 2025
1cb9364
assert! instead of assert_eq with bool
sebastiantia Mar 12, 2025
797a05c
merge non file action visitor tests
sebastiantia Mar 22, 2025
45c698d
base file actions struct
sebastiantia Mar 25, 2025
b062125
combine visitors
sebastiantia Mar 25, 2025
90e46cd
fmt
sebastiantia Mar 26, 2025
3c25392
remove old code
sebastiantia Mar 26, 2025
cba8ed6
move FileActionKey
sebastiantia Mar 26, 2025
28f1fb4
fix merge
sebastiantia Mar 27, 2025
48f831a
doc
sebastiantia Mar 27, 2025
7c3d976
docs
sebastiantia Mar 28, 2025
b2bb0ce
fix rebase
sebastiantia Mar 28, 2025
0054c71
merge
sebastiantia Mar 28, 2025
abc7e1f
merge fixes
sebastiantia Mar 28, 2025
964f294
docs
sebastiantia Mar 30, 2025
c026258
clean up and docs
sebastiantia Mar 30, 2025
88ba96c
docs
sebastiantia Mar 30, 2025
4c98c84
docs
sebastiantia Mar 30, 2025
c7cd2d1
Merge branch 'extract-deduplication-logic-from-addRemoveDedupVisitor'…
sebastiantia Mar 30, 2025
542166c
merge
sebastiantia Apr 1, 2025
655ed1d
fix merge
sebastiantia Apr 1, 2025
6c222a3
crate mod
sebastiantia Apr 1, 2025
30bd7d6
dev vis
sebastiantia Apr 1, 2025
159b0dd
merge
sebastiantia Apr 1, 2025
5777e5a
improve docs
sebastiantia Apr 1, 2025
5e6695b
Merge branch 'extract-log-replay-processing-structure' into checkpoin…
sebastiantia Apr 1, 2025
bdbc3fb
docs
sebastiantia Apr 1, 2025
6491113
breaking merge
sebastiantia Apr 1, 2025
95d0164
accept metadata & protocol param
sebastiantia Apr 1, 2025
51104aa
Merge branch 'checkpoint-visitors' into checkpoint-replay
sebastiantia Apr 1, 2025
7a59eab
improve docs
sebastiantia Apr 1, 2025
e4bc34e
docs
sebastiantia Apr 1, 2025
d24a80c
refactor into checkpoint mod
sebastiantia Apr 1, 2025
1981ab4
refactor into test_utils
sebastiantia Apr 1, 2025
f084424
rebase on test-utils refactor
sebastiantia Apr 2, 2025
6a28d99
merge
sebastiantia Apr 2, 2025
3488318
merge
sebastiantia Apr 2, 2025
c4e5522
redundant docs
sebastiantia Apr 2, 2025
18d1a29
fix doc
sebastiantia Apr 2, 2025
92b7296
Merge branch 'main' into checkpoint-visitors
sebastiantia Apr 2, 2025
6167cf2
merge
sebastiantia Apr 2, 2025
0d8b3c0
hoist selection vector and data skipping filter
sebastiantia Apr 3, 2025
43760a5
docs
sebastiantia Apr 3, 2025
1137be6
refactorg
sebastiantia Apr 3, 2025
6e3d722
docs
sebastiantia Apr 3, 2025
2252cec
match simplification
sebastiantia Apr 3, 2025
09f3930
docs
sebastiantia Apr 3, 2025
3efeef6
docs and rename
sebastiantia Apr 4, 2025
63f0294
nits and renames
sebastiantia Apr 4, 2025
fab97ba
rename
sebastiantia Apr 4, 2025
f79d9a5
priv mod
sebastiantia Apr 4, 2025
568b59e
docs
sebastiantia Apr 5, 2025
bce9384
clean up docs
sebastiantia Apr 6, 2025
87b17d4
polish docs
sebastiantia Apr 6, 2025
d8df2ea
notes
sebastiantia Apr 6, 2025
7f49ccd
fix indentation
sebastiantia Apr 6, 2025
e809306
merge
sebastiantia Apr 6, 2025
c9f6edd
bool flags
sebastiantia Apr 6, 2025
3f8a69b
Merge branch 'extract-log-replay-processing-structure' into checkpoin…
sebastiantia Apr 6, 2025
e520d1f
remove atomic counters
sebastiantia Apr 6, 2025
f31e51d
box counters
sebastiantia Apr 6, 2025
79d6ff8
review
sebastiantia Apr 7, 2025
a3cf0f2
revert
sebastiantia Apr 7, 2025
4416968
rc<refcell>
sebastiantia Apr 7, 2025
20fe7fe
unignore
sebastiantia Apr 7, 2025
29489d7
fix docs
sebastiantia Apr 7, 2025
ae22a6b
merge
sebastiantia Apr 7, 2025
5ccde93
oops
sebastiantia Apr 7, 2025
00c834b
docs
sebastiantia Apr 7, 2025
3c11320
clean up doc & test
sebastiantia Apr 7, 2025
4f61757
clean up docs
sebastiantia Apr 7, 2025
fdd4f68
update docs
sebastiantia Apr 7, 2025
f3257b4
Merge branch 'main' into checkpoint-visitors
sebastiantia Apr 7, 2025
9c992fc
merge
sebastiantia Apr 7, 2025
2aec9c3
remove mod docs in this PR
sebastiantia Apr 8, 2025
2e2062f
update docs
sebastiantia Apr 8, 2025
c92ea56
Merge branch 'checkpoint-visitors' into checkpoint-replay
sebastiantia Apr 8, 2025
fcb289d
docs
sebastiantia Apr 8, 2025
4d2029e
docs
sebastiantia Apr 9, 2025
e0d81ab
docs
sebastiantia Apr 9, 2025
e9de5bc
arc
sebastiantia Apr 9, 2025
0b609d5
merge
sebastiantia Apr 10, 2025
ab0a373
docs
sebastiantia Apr 10, 2025
9a9697a
test coverage
sebastiantia Apr 10, 2025
c7630a3
doc
sebastiantia Apr 10, 2025
48a0153
review
sebastiantia Apr 11, 2025
4a1a1dd
schema spec
sebastiantia Apr 11, 2025
4d48a8a
pub crate
sebastiantia Apr 11, 2025
411b2c4
forgot to include this file
sebastiantia Apr 11, 2025
48d529d
review
sebastiantia Apr 14, 2025
6a672d8
Merge branch 'main' into checkpoint-replay
sebastiantia Apr 14, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
745 changes: 699 additions & 46 deletions kernel/src/actions/visitors.rs

Large diffs are not rendered by default.

248 changes: 248 additions & 0 deletions kernel/src/checkpoints/log_replay.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,248 @@
use std::collections::HashSet;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;

use crate::actions::visitors::CheckpointVisitor;
use crate::engine_data::RowVisitor;
use crate::log_replay::{
apply_processor_to_iterator, FileActionKey, HasSelectionVector, LogReplayProcessor,
};
use crate::{DeltaResult, EngineData};

pub struct CheckpointData {
#[allow(unused)]
data: Box<dyn EngineData>,
selection_vector: Vec<bool>,
}

impl HasSelectionVector for CheckpointData {
fn has_selected_rows(&self) -> bool {
self.selection_vector.contains(&true)
}
}

/// `CheckpointLogReplayProcessor` is responsible for filtering actions during log
/// replay to include only those that should be included in a V1 checkpoint.
#[allow(unused)] // TODO: Remove once checkpoint_v1 API is implemented
struct CheckpointLogReplayProcessor {
/// Tracks file actions that have been seen during log replay to avoid duplicates.
/// Contains (data file path, dv_unique_id) pairs as `FileActionKey` instances.
seen_file_keys: HashSet<FileActionKey>,

/// Counter for the total number of actions processed during log replay.
total_actions: Arc<AtomicUsize>,

/// Counter for the total number of add actions processed during log replay.
total_add_actions: Arc<AtomicUsize>,

/// Indicates whether a protocol action has been seen in the log.
seen_protocol: bool,

/// Indicates whether a metadata action has been seen in the log.
seen_metadata: bool,

/// Set of transaction app IDs that have been processed to avoid duplicates.
seen_txns: HashSet<String>,

/// Minimum timestamp for file retention, used for filtering expired tombstones.
minimum_file_retention_timestamp: i64,
}

impl LogReplayProcessor for CheckpointLogReplayProcessor {
// Define the processing result type as a tuple of the data and selection vector
type ProcessingResult = CheckpointData;

/// This function processes batches of actions in reverse chronological order
/// (from most recent to least recent) and performs the necessary filtering
/// to ensure the checkpoint contains only the actions needed to reconstruct
/// the complete state of the table.
///
/// # Filtering Rules
///
/// The following rules apply when filtering actions:
///
/// 1. Only the most recent protocol and metadata actions are included
/// 2. For each app ID, only the most recent transaction action is included
/// 3. File actions are deduplicated based on path and unique ID
/// 4. Tombstones older than `minimum_file_retention_timestamp` are excluded
fn process_batch(
&mut self,
batch: Box<dyn EngineData>,
is_log_batch: bool,
) -> DeltaResult<Self::ProcessingResult> {
// Initialize selection vector with all rows un-selected
let selection_vector = vec![false; batch.len()];
assert_eq!(
selection_vector.len(),
batch.len(),
"Initial selection vector length does not match actions length"
);

// Create the checkpoint visitor to process actions and update selection vector
let mut visitor = CheckpointVisitor::new(
&mut self.seen_file_keys,
selection_vector,
is_log_batch,
self.minimum_file_retention_timestamp,
self.seen_protocol,
self.seen_metadata,
&mut self.seen_txns,
);

// Process actions and let visitor update selection vector
visitor.visit_rows_of(batch.as_ref())?;

// Update shared counters with file action counts from this batch
self.total_actions.fetch_add(
visitor.total_file_actions + visitor.total_non_file_actions,
Ordering::SeqCst,
);
self.total_add_actions
.fetch_add(visitor.total_add_actions, Ordering::SeqCst);

// Update protocol and metadata seen flags
self.seen_protocol = visitor.seen_protocol;
self.seen_metadata = visitor.seen_metadata;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not atomic because it's a monotonic flag change? Once true it never reverts back to false?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

But where is the race that requires atomicity in the first place?

If it's not safe to do a normal add at L81-87 above, why would it be safe to do a normal assignment here? If two visitors could both update these "seen" flags at the same time, it's quite likely only one of them actually saw P&M, and if the one who saw loses the race to update these flags, we could end up with "seen" as false instead of true.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wonder if there even is a thread race, tho? If there were, wouldn't the compiler freak out when we try to pass the visitor unprotected &mut HashSet for seen_file_keys and seen_txns?

Copy link
Collaborator Author

@sebastiantia sebastiantia Apr 7, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think we need atomicity for the fields you mentioned since they are owned directly by the CheckpointLogReplayProcessor. They aren't shared across the iterator boundary the same way the counters are. We use Arc<atomici64> for the action counters since the resulting iterator of checkpoint_actions_iter is marked with the trait bounds Send and 'static, so mut refs, and rc< refcells> dont work. Let me know if I'm off the mark!

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

On a second look though, the resulting iterator for checkpoint_actions_iterdo not need to be marked with the trait bounds in the first place. Will be replacing the counters with Rc< refcells > for single threaded shared mutability to allow both:
The iterator to update the counters during processing
The caller to observe the final values afterward


Ok(CheckpointData {
data: batch,
selection_vector: visitor.deduplicator.selection_vector(),
})
}

// Get a reference to the set of seen file keys
fn seen_file_keys(&mut self) -> &mut HashSet<FileActionKey> {
&mut self.seen_file_keys
}
}

#[allow(unused)] // TODO: Remove once checkpoint_v1 API is implemented
impl CheckpointLogReplayProcessor {
pub(super) fn new(
total_actions_counter: Arc<AtomicUsize>,
total_add_actions_counter: Arc<AtomicUsize>,
minimum_file_retention_timestamp: i64,
) -> Self {
Self {
seen_file_keys: Default::default(),
total_actions: total_actions_counter,
total_add_actions: total_add_actions_counter,
seen_protocol: false,
seen_metadata: false,
seen_txns: Default::default(),
minimum_file_retention_timestamp,
}
}
}

/// Given an iterator of (engine_data, bool) tuples, returns an iterator of
/// `(engine_data, selection_vec)`. Each row that is selected in the returned `engine_data` _must_
/// be written to the V1 checkpoint file in order to capture the table version's complete state.
/// Non-selected rows _must_ be ignored. The boolean flag indicates whether the record batch
/// is a log or checkpoint batch.
///
/// Note: The iterator of (engine_data, bool) tuples must be sorted by the order of the actions in
/// the log from most recent to least recent.
#[allow(unused)] // TODO: Remove once checkpoint_v1 API is implemented
pub(crate) fn checkpoint_actions_iter(
action_iter: impl Iterator<Item = DeltaResult<(Box<dyn EngineData>, bool)>> + Send + 'static,
total_actions_counter: Arc<AtomicUsize>,
total_add_actions_counter: Arc<AtomicUsize>,
minimum_file_retention_timestamp: i64,
) -> impl Iterator<Item = DeltaResult<CheckpointData>> + Send + 'static {
let mut log_scanner = CheckpointLogReplayProcessor::new(
total_actions_counter,
total_add_actions_counter,
minimum_file_retention_timestamp,
);

apply_processor_to_iterator(log_scanner, action_iter)
}

#[cfg(test)]
mod tests {
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;

use crate::arrow::array::StringArray;
use crate::checkpoints::log_replay::checkpoint_actions_iter;
use crate::utils::test_utils::parse_json_batch;
use crate::DeltaResult;

/// Tests the end-to-end processing of multiple batches with various action types.
/// This tests the integration of the visitors with the main iterator function.
/// More granular testing is performed in the individual visitor tests.
#[test]
fn test_v1_checkpoint_actions_iter_multi_batch_integration() -> DeltaResult<()> {
// Setup counters
let total_actions_counter = Arc::new(AtomicUsize::new(0));
let total_add_actions_counter = Arc::new(AtomicUsize::new(0));

// Create first batch with protocol, metadata, and some files
let json_strings1: StringArray = vec![
r#"{"protocol":{"minReaderVersion":1,"minWriterVersion":2}}"#,
r#"{"metaData":{"id":"test2","format":{"provider":"parquet","options":{}},"schemaString":"{\"type\":\"struct\",\"fields\":[{\"name\":\"c1\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}},{\"name\":\"c2\",\"type\":\"string\",\"nullable\":true,\"metadata\":{}},{\"name\":\"c3\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}","partitionColumns":["c1","c2"],"configuration":{},"createdTime":1670892997849}}"#,
r#"{"add":{"path":"file1","partitionValues":{"c1":"4","c2":"c"},"size":452,"modificationTime":1670892998135,"dataChange":true,"stats":"{\"numRecords\":1,\"minValues\":{\"c3\":5},\"maxValues\":{\"c3\":5},\"nullCount\":{\"c3\":0}}"}}"#,
r#"{"add":{"path":"file2","partitionValues":{"c1":"4","c2":"c"},"size":452,"modificationTime":1670892998135,"dataChange":true,"stats":"{\"numRecords\":1,\"minValues\":{\"c3\":5},\"maxValues\":{\"c3\":5},\"nullCount\":{\"c3\":0}}"}}"#,
].into();

// Create second batch with some duplicates and new files
let json_strings2: StringArray = vec![
// Protocol and metadata should be skipped as duplicates
r#"{"protocol":{"minReaderVersion":1,"minWriterVersion":2}}"#,
r#"{"metaData":{"id":"test1","format":{"provider":"parquet","options":{}},"schemaString":"{\"type\":\"struct\",\"fields\":[{\"name\":\"c1\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}},{\"name\":\"c2\",\"type\":\"string\",\"nullable\":true,\"metadata\":{}},{\"name\":\"c3\",\"type\":\"integer\",\"nullable\":true,\"metadata\":{}}]}","partitionColumns":["c1","c2"],"configuration":{},"createdTime":1670892997849}}"#,
// New files
r#"{"add":{"path":"file3","partitionValues":{},"size":800,"modificationTime":102,"dataChange":true}}"#,
// Duplicate file should be skipped
r#"{"add":{"path":"file1","partitionValues":{"c1":"4","c2":"c"},"size":452,"modificationTime":1670892998135,"dataChange":true,"stats":"{\"numRecords\":1,\"minValues\":{\"c3\":5},\"maxValues\":{\"c3\":5},\"nullCount\":{\"c3\":0}}"}}"#, // Transaction
r#"{"txn":{"appId":"app1","version":1,"lastUpdated":123456789}}"#
].into();

// Create third batch with all duplicate actions (should be filtered out completely)
let json_strings3: StringArray = vec![
r#"{"add":{"path":"file1","partitionValues":{"c1":"4","c2":"c"},"size":452,"modificationTime":1670892998135,"dataChange":true,"stats":"{\"numRecords\":1,\"minValues\":{\"c3\":5},\"maxValues\":{\"c3\":5},\"nullCount\":{\"c3\":0}}"}}"#,
r#"{"add":{"path":"file2","partitionValues":{"c1":"4","c2":"c"},"size":452,"modificationTime":1670892998135,"dataChange":true,"stats":"{\"numRecords\":1,\"minValues\":{\"c3\":5},\"maxValues\":{\"c3\":5},\"nullCount\":{\"c3\":0}}"}}"#,
].into();

let input_batches = vec![
Ok((parse_json_batch(json_strings1), true)),
Ok((parse_json_batch(json_strings2), true)),
Ok((parse_json_batch(json_strings3), true)),
];

// Run the iterator
let results: Vec<_> = checkpoint_actions_iter(
input_batches.into_iter(),
total_actions_counter.clone(),
total_add_actions_counter.clone(),
0,
)
.collect::<Result<Vec<_>, _>>()?;

// Expect two batches in results (third batch should be filtered)"
assert_eq!(results.len(), 2);

// First batch should have all rows selected
let checkpoint_data = &results[0];
assert_eq!(
checkpoint_data.selection_vector,
vec![true, true, true, true]
);

// Second batch should have only new file and transaction selected
let checkpoint_data = &results[1];
assert_eq!(
checkpoint_data.selection_vector,
vec![false, false, true, false, true]
);

// Verify counters
// 6 total actions (4 from batch1 + 2 from batch2 + 0 from batch3)
assert_eq!(total_actions_counter.load(Ordering::Relaxed), 6);

// 3 add actions (2 from batch1 + 1 from batch2)
assert_eq!(total_add_actions_counter.load(Ordering::Relaxed), 3);

Ok(())
}
}
1 change: 1 addition & 0 deletions kernel/src/checkpoints/mod.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
pub mod log_replay;
19 changes: 3 additions & 16 deletions kernel/src/engine/arrow_data.rs
Original file line number Diff line number Diff line change
Expand Up @@ -294,27 +294,14 @@ impl ArrowEngineData {

#[cfg(test)]
mod tests {
use std::sync::Arc;

use crate::arrow::array::{RecordBatch, StringArray};
use crate::arrow::datatypes::{DataType, Field, Schema as ArrowSchema};

use crate::arrow::array::StringArray;
use crate::utils::test_utils::string_array_to_engine_data;
use crate::{
actions::{get_log_schema, Metadata, Protocol},
engine::sync::SyncEngine,
DeltaResult, Engine, EngineData,
DeltaResult, Engine,
};

use super::ArrowEngineData;

fn string_array_to_engine_data(string_array: StringArray) -> Box<dyn EngineData> {
let string_field = Arc::new(Field::new("a", DataType::Utf8, true));
let schema = Arc::new(ArrowSchema::new(vec![string_field]));
let batch = RecordBatch::try_new(schema, vec![Arc::new(string_array)])
.expect("Can't convert to record batch");
Box::new(ArrowEngineData::new(batch))
}

#[test]
fn test_md_extract() -> DeltaResult<()> {
let engine = SyncEngine::new();
Expand Down
9 changes: 1 addition & 8 deletions kernel/src/engine/default/json.rs
Original file line number Diff line number Diff line change
Expand Up @@ -257,6 +257,7 @@ mod tests {
use crate::engine::default::executor::tokio::{
TokioBackgroundExecutor, TokioMultiThreadExecutor,
};
use crate::utils::test_utils::string_array_to_engine_data;
use futures::future;
use itertools::Itertools;
use object_store::local::LocalFileSystem;
Expand Down Expand Up @@ -471,14 +472,6 @@ mod tests {
}
}

fn string_array_to_engine_data(string_array: StringArray) -> Box<dyn EngineData> {
let string_field = Arc::new(Field::new("a", DataType::Utf8, true));
let schema = Arc::new(ArrowSchema::new(vec![string_field]));
let batch = RecordBatch::try_new(schema, vec![Arc::new(string_array)])
.expect("Can't convert to record batch");
Box::new(ArrowEngineData::new(batch))
}

#[test]
fn test_parse_json() {
let store = Arc::new(LocalFileSystem::new());
Expand Down
2 changes: 2 additions & 0 deletions kernel/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -74,9 +74,11 @@ use url::Url;
use self::schema::{DataType, SchemaRef};

pub mod actions;
pub mod checkpoints;
pub mod engine_data;
pub mod error;
pub mod expressions;
pub mod log_replay;
pub mod scan;
pub mod schema;
pub mod snapshot;
Expand Down
Loading
Loading