Skip to content

Commit 2f8d800

Browse files
liuchengxuclaude
andcommitted
Refactor trie node handling to use enum for type safety
- Replace three separate fields (trie_node_writer, trie_nodes, trie_nodes_written) with TrieNodeImportMode enum that enforces mutual exclusivity at compile time - Change import_state_from_key_values from associated function to method taking &self since it always uses self.executor 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <[email protected]>
1 parent e20e792 commit 2f8d800

File tree

2 files changed

+93
-78
lines changed

2 files changed

+93
-78
lines changed

substrate/client/network/sync/src/strategy/state_sync.rs

Lines changed: 47 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -139,20 +139,22 @@ impl<B: BlockT> StateSyncMetadata<B> {
139139
}
140140
}
141141

142+
/// Defines how trie nodes are imported during state sync.
143+
enum TrieNodeImportMode {
144+
/// Incremental mode: write nodes directly to storage as they arrive.
145+
Incremental { writer: Arc<dyn TrieNodeWriter>, nodes_written: u64 },
146+
/// Legacy mode: accumulate nodes in memory for later import.
147+
Accumulated { nodes: Vec<(Vec<u8>, Vec<u8>)> },
148+
}
149+
142150
/// State sync state machine.
143151
///
144152
/// Accumulates partial state data until it is ready to be imported.
145153
pub struct StateSync<B: BlockT, Client> {
146154
metadata: StateSyncMetadata<B>,
147155
state: HashMap<Vec<u8>, (Vec<(Vec<u8>, Vec<u8>)>, Vec<Vec<u8>>)>,
148-
/// Optional writer for incremental trie node import.
149-
/// When set, trie nodes are written directly to storage as each chunk arrives.
150-
/// When None, trie nodes are accumulated in memory (legacy behavior).
151-
trie_node_writer: Option<Arc<dyn TrieNodeWriter>>,
152-
/// Accumulated trie nodes when no writer is provided (legacy fallback).
153-
trie_nodes: Vec<(Vec<u8>, Vec<u8>)>,
154-
/// Count of trie nodes written incrementally.
155-
trie_nodes_written: u64,
156+
/// How trie nodes are imported during sync.
157+
trie_node_import_mode: TrieNodeImportMode,
156158
client: Arc<Client>,
157159
}
158160

@@ -174,6 +176,11 @@ where
174176
skip_proof: bool,
175177
trie_node_writer: Option<Arc<dyn TrieNodeWriter>>,
176178
) -> Self {
179+
let trie_node_import_mode = match trie_node_writer {
180+
Some(writer) => TrieNodeImportMode::Incremental { writer, nodes_written: 0 },
181+
None => TrieNodeImportMode::Accumulated { nodes: Vec::new() },
182+
};
183+
177184
Self {
178185
client,
179186
metadata: StateSyncMetadata {
@@ -186,30 +193,29 @@ where
186193
skip_proof,
187194
},
188195
state: HashMap::default(),
189-
trie_node_writer,
190-
trie_nodes: Vec::new(),
191-
trie_nodes_written: 0,
196+
trie_node_import_mode,
192197
}
193198
}
194199

195200
/// Process trie nodes from a chunk - either write incrementally or accumulate.
196201
fn process_trie_nodes(&mut self, chunk_nodes: Vec<(Vec<u8>, Vec<u8>)>) {
197202
let node_count = chunk_nodes.len() as u64;
198203

199-
if let Some(ref writer) = self.trie_node_writer {
200-
// Incremental mode: write directly to storage
201-
if let Err(e) = writer.write_trie_nodes(chunk_nodes) {
202-
// Log error but continue - the final state root check will catch issues
203-
debug!(
204-
target: LOG_TARGET,
205-
"Failed to write trie nodes incrementally: {}", e
206-
);
207-
} else {
208-
self.trie_nodes_written += node_count;
209-
}
210-
} else {
211-
// Legacy mode: accumulate in memory
212-
self.trie_nodes.extend(chunk_nodes);
204+
match &mut self.trie_node_import_mode {
205+
TrieNodeImportMode::Incremental { writer, nodes_written } => {
206+
if let Err(e) = writer.write_trie_nodes(chunk_nodes) {
207+
// Log error but continue - the final state root check will catch issues
208+
debug!(
209+
target: LOG_TARGET,
210+
"Failed to write trie nodes incrementally: {e}"
211+
);
212+
} else {
213+
*nodes_written += node_count;
214+
}
215+
},
216+
TrieNodeImportMode::Accumulated { nodes } => {
217+
nodes.extend(chunk_nodes);
218+
},
213219
}
214220
}
215221

@@ -356,22 +362,22 @@ where
356362
let target_hash = self.metadata.target_hash();
357363

358364
// Determine trie_nodes based on whether incremental writing was used
359-
let trie_nodes = if self.trie_node_writer.is_some() {
360-
// Incremental mode: nodes already written to storage
361-
info!(
362-
target: LOG_TARGET,
363-
"State sync complete: {} trie nodes written incrementally",
364-
self.trie_nodes_written
365-
);
366-
None
367-
} else {
368-
// Legacy mode: return accumulated nodes
369-
info!(
370-
target: LOG_TARGET,
371-
"State sync complete: {} trie nodes accumulated",
372-
self.trie_nodes.len()
373-
);
374-
Some(std::mem::take(&mut self.trie_nodes))
365+
let trie_nodes = match &mut self.trie_node_import_mode {
366+
TrieNodeImportMode::Incremental { nodes_written, .. } => {
367+
info!(
368+
target: LOG_TARGET,
369+
"State sync complete: {nodes_written} trie nodes written incrementally"
370+
);
371+
None
372+
},
373+
TrieNodeImportMode::Accumulated { ref mut nodes } => {
374+
info!(
375+
target: LOG_TARGET,
376+
"State sync complete: {} trie nodes accumulated",
377+
nodes.len()
378+
);
379+
Some(std::mem::take(nodes))
380+
},
375381
};
376382

377383
ImportResult::Import(

substrate/client/service/src/client/client.rs

Lines changed: 46 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -576,9 +576,9 @@ where
576576

577577
// the block is lower than our last finalized block so it must revert
578578
// finality, refusing import.
579-
if status == blockchain::BlockStatus::Unknown &&
580-
*import_headers.post().number() <= info.finalized_number &&
581-
!gap_block
579+
if status == blockchain::BlockStatus::Unknown
580+
&& *import_headers.post().number() <= info.finalized_number
581+
&& !gap_block
582582
{
583583
return Err(sp_blockchain::Error::NotInFinalizedChain);
584584
}
@@ -587,8 +587,9 @@ where
587587
// but the general goal is to only make notifications when we are already fully synced
588588
// and get a new chain head.
589589
let make_notifications = match origin {
590-
BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast =>
591-
true,
590+
BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => {
591+
true
592+
},
592593
BlockOrigin::Genesis | BlockOrigin::NetworkInitialSync | BlockOrigin::File => false,
593594
};
594595

@@ -627,19 +628,17 @@ where
627628
None
628629
} else {
629630
// Fall back to key-value import if no trie nodes
630-
Self::import_state_from_key_values(
631+
self.import_state_from_key_values(
631632
changes.state,
632633
&mut operation.op,
633-
&self.executor,
634634
import_headers.post().state_root(),
635635
)?
636636
}
637637
} else {
638638
// Fall back to key-value import
639-
Self::import_state_from_key_values(
639+
self.import_state_from_key_values(
640640
changes.state,
641641
&mut operation.op,
642-
&self.executor,
643642
import_headers.post().state_root(),
644643
)?
645644
}
@@ -663,11 +662,12 @@ where
663662
)?;
664663
}
665664

666-
let is_new_best = !gap_block &&
667-
(finalized ||
668-
match fork_choice {
669-
ForkChoiceStrategy::LongestChain =>
670-
import_headers.post().number() > &info.best_number,
665+
let is_new_best = !gap_block
666+
&& (finalized
667+
|| match fork_choice {
668+
ForkChoiceStrategy::LongestChain => {
669+
import_headers.post().number() > &info.best_number
670+
},
671671
ForkChoiceStrategy::Custom(v) => v,
672672
});
673673

@@ -786,18 +786,21 @@ where
786786
let state_action = std::mem::replace(&mut import_block.state_action, StateAction::Skip);
787787
let (enact_state, storage_changes) = match (self.block_status(*parent_hash)?, state_action)
788788
{
789-
(BlockStatus::KnownBad, _) =>
790-
return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad)),
789+
(BlockStatus::KnownBad, _) => {
790+
return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad))
791+
},
791792
(
792793
BlockStatus::InChainPruned,
793794
StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(_)),
794795
) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)),
795796
(_, StateAction::ApplyChanges(changes)) => (true, Some(changes)),
796-
(BlockStatus::Unknown, _) =>
797-
return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent)),
797+
(BlockStatus::Unknown, _) => {
798+
return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent))
799+
},
798800
(_, StateAction::Skip) => (false, None),
799-
(BlockStatus::InChainPruned, StateAction::Execute) =>
800-
return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)),
801+
(BlockStatus::InChainPruned, StateAction::Execute) => {
802+
return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState))
803+
},
801804
(BlockStatus::InChainPruned, StateAction::ExecuteIfPossible) => (false, None),
802805
(_, StateAction::Execute) => (true, None),
803806
(_, StateAction::ExecuteIfPossible) => (true, None),
@@ -1096,12 +1099,13 @@ where
10961099

10971100
let hash_and_number = self.backend.blockchain().number(hash)?.map(|n| (hash, n));
10981101
match hash_and_number {
1099-
Some((hash, number)) =>
1102+
Some((hash, number)) => {
11001103
if self.backend.have_state_at(hash, number) {
11011104
Ok(BlockStatus::InChainWithState)
11021105
} else {
11031106
Ok(BlockStatus::InChainPruned)
1104-
},
1107+
}
1108+
},
11051109
None => Ok(BlockStatus::Unknown),
11061110
}
11071111
}
@@ -1167,9 +1171,9 @@ where
11671171

11681172
/// Helper to import state from key-values (original reset_storage path).
11691173
fn import_state_from_key_values(
1174+
&self,
11701175
state: KeyValueStates,
11711176
op: &mut B::BlockImportOperation,
1172-
executor: &E,
11731177
expected_state_root: &Block::Hash,
11741178
) -> sp_blockchain::Result<Option<(StorageCollection, ChildStorageCollection)>> {
11751179
let mut storage = sp_storage::Storage::default();
@@ -1183,8 +1187,9 @@ where
11831187
let storage_key = PrefixedStorageKey::new_ref(&parent_storage);
11841188
let storage_key = match ChildType::from_prefixed_key(storage_key) {
11851189
Some((ChildType::ParentKeyId, storage_key)) => storage_key,
1186-
None =>
1187-
return Err(Error::Backend("Invalid child storage key.".to_string())),
1190+
None => {
1191+
return Err(Error::Backend("Invalid child storage key.".to_string()))
1192+
},
11881193
};
11891194
let entry = storage
11901195
.children_default
@@ -1202,7 +1207,7 @@ where
12021207

12031208
// This is used by fast sync for runtime version to be resolvable from changes.
12041209
let state_version =
1205-
resolve_state_version_from_wasm::<_, HashingFor<Block>>(&storage, executor)?;
1210+
resolve_state_version_from_wasm::<_, HashingFor<Block>>(&storage, &self.executor)?;
12061211
let state_root = op.reset_storage(storage, state_version)?;
12071212
if state_root != *expected_state_root {
12081213
// State root mismatch when importing state. This should not happen in
@@ -1292,8 +1297,9 @@ where
12921297
let child_info = |storage_key: &Vec<u8>| -> sp_blockchain::Result<ChildInfo> {
12931298
let storage_key = PrefixedStorageKey::new_ref(storage_key);
12941299
match ChildType::from_prefixed_key(storage_key) {
1295-
Some((ChildType::ParentKeyId, storage_key)) =>
1296-
Ok(ChildInfo::new_default(storage_key)),
1300+
Some((ChildType::ParentKeyId, storage_key)) => {
1301+
Ok(ChildInfo::new_default(storage_key))
1302+
},
12971303
None => Err(Error::Backend("Invalid child storage key.".to_string())),
12981304
}
12991305
};
@@ -1353,9 +1359,9 @@ where
13531359
}
13541360
total_size += size;
13551361

1356-
if current_child.is_none() &&
1357-
sp_core::storage::well_known_keys::is_child_storage_key(next_key.as_slice()) &&
1358-
!child_roots.contains(value.as_slice())
1362+
if current_child.is_none()
1363+
&& sp_core::storage::well_known_keys::is_child_storage_key(next_key.as_slice())
1364+
&& !child_roots.contains(value.as_slice())
13591365
{
13601366
child_roots.insert(value.clone());
13611367
switch_child_key = Some((next_key.clone(), value.clone()));
@@ -1860,10 +1866,12 @@ where
18601866
.block_status(hash)
18611867
.map_err(|e| ConsensusError::ClientImport(e.to_string()))?
18621868
{
1863-
BlockStatus::InChainWithState | BlockStatus::Queued =>
1864-
return Ok(ImportResult::AlreadyInChain),
1865-
BlockStatus::InChainPruned if !import_existing =>
1866-
return Ok(ImportResult::AlreadyInChain),
1869+
BlockStatus::InChainWithState | BlockStatus::Queued => {
1870+
return Ok(ImportResult::AlreadyInChain)
1871+
},
1872+
BlockStatus::InChainPruned if !import_existing => {
1873+
return Ok(ImportResult::AlreadyInChain)
1874+
},
18671875
BlockStatus::InChainPruned => {},
18681876
BlockStatus::Unknown => {},
18691877
BlockStatus::KnownBad => return Ok(ImportResult::KnownBad),
@@ -2029,8 +2037,9 @@ where
20292037

20302038
fn block(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<SignedBlock<Block>>> {
20312039
Ok(match (self.header(hash)?, self.body(hash)?, self.justifications(hash)?) {
2032-
(Some(header), Some(extrinsics), justifications) =>
2033-
Some(SignedBlock { block: Block::new(header, extrinsics), justifications }),
2040+
(Some(header), Some(extrinsics), justifications) => {
2041+
Some(SignedBlock { block: Block::new(header, extrinsics), justifications })
2042+
},
20342043
_ => None,
20352044
})
20362045
}

0 commit comments

Comments
 (0)