Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

12 changes: 8 additions & 4 deletions polkadot/node/core/approval-voting/src/import.rs
Original file line number Diff line number Diff line change
Expand Up @@ -320,6 +320,7 @@ pub struct BlockImportedCandidates {
pub block_hash: Hash,
pub block_number: BlockNumber,
pub block_tick: Tick,
pub session_index: SessionIndex,
pub imported_candidates: Vec<(CandidateHash, CandidateEntry)>,
}

Expand All @@ -341,7 +342,7 @@ pub(crate) async fn handle_new_head<
>(
sender: &mut Sender,
approval_voting_sender: &mut AVSender,
state: &State,
state: &mut State,
db: &mut OverlayedBackend<'_, B>,
session_info_provider: &mut RuntimeInfo,
head: Hash,
Expand Down Expand Up @@ -580,6 +581,7 @@ pub(crate) async fn handle_new_head<
block_hash,
block_number: block_header.number,
block_tick,
session_index: session_index,
imported_candidates: candidate_entries
.into_iter()
.map(|(h, e)| (h, e.into()))
Expand Down Expand Up @@ -653,7 +655,7 @@ pub(crate) mod tests {
}
}

fn blank_state() -> State {
pub fn blank_state() -> State {
State {
keystore: Arc::new(LocalKeystore::in_memory()),
slot_duration_millis: 6_000,
Expand All @@ -663,6 +665,8 @@ pub(crate) mod tests {
MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS,
)),
no_show_stats: Default::default(),
last_session_index: None,
approvals_usage: Default::default(),
}
}

Expand Down Expand Up @@ -1356,7 +1360,7 @@ pub(crate) mod tests {
.map(|(r, c, g)| CandidateEvent::CandidateIncluded(r, Vec::new().into(), c, g))
.collect::<Vec<_>>();

let (state, mut session_info_provider) = single_session_state();
let (mut state, mut session_info_provider) = single_session_state();
overlay_db.write_block_entry(
v3::BlockEntry {
block_hash: parent_hash,
Expand Down Expand Up @@ -1385,7 +1389,7 @@ pub(crate) mod tests {
let result = handle_new_head(
ctx.sender(),
&mut approval_voting_sender,
&state,
&mut state,
&mut overlay_db,
&mut session_info_provider,
hash,
Expand Down
146 changes: 143 additions & 3 deletions polkadot/node/core/approval-voting/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,6 @@ use std::{
sync::Arc,
time::Duration,
};

use schnellru::{ByLength, LruMap};

use approval_checking::RequiredTranches;
Expand Down Expand Up @@ -891,15 +890,23 @@ struct State {
keystore: Arc<LocalKeystore>,
slot_duration_millis: u64,
clock: Arc<dyn Clock + Send + Sync>,
last_session_index: Option<SessionIndex>,
assignment_criteria: Box<dyn AssignmentCriteria + Send + Sync>,
// Per block, candidate records about how long we take until we gather enough
// assignments, this is relevant because it gives us a good idea about how many
// tranches we trigger and why.
per_block_assignments_gathering_times:
LruMap<BlockNumber, HashMap<(Hash, CandidateHash), AssignmentGatheringRecord>>,
no_show_stats: NoShowStats,

candidates_per_session: HashMap<SessionIndex, Vec<CandidateHash>>,

// amount of approvals usage per epoch per validator index
// where the ith index in the vector corresponds to the
approvals_usage: HashMap<SessionIndex, Vec<ApprovalTallyLine>>,
}


// Regularly dump the no-show stats at this block number frequency.
const NO_SHOW_DUMP_FREQUENCY: BlockNumber = 50;
// The maximum number of validators we record no-shows for, per candidate.
Expand Down Expand Up @@ -982,7 +989,7 @@ impl State {
block_entry.parent_hash(),
block_entry.session(),
)
.await
.await
{
Some(s) => s,
None => return None,
Expand Down Expand Up @@ -1252,11 +1259,14 @@ where
keystore: subsystem.keystore,
slot_duration_millis: subsystem.slot_duration_millis,
clock: subsystem.clock,
last_session_index: None,
assignment_criteria,
per_block_assignments_gathering_times: LruMap::new(ByLength::new(
MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS,
)),
no_show_stats: NoShowStats::default(),
candidates_per_session: Default::default(),
approvals_usage: Default::default(),
};

let mut last_finalized_height: Option<BlockNumber> = {
Expand Down Expand Up @@ -2053,6 +2063,11 @@ async fn handle_from_overseer<
for (c_hash, c_entry) in block_batch.imported_candidates {
metrics.on_candidate_imported();

state.candidates_per_session
.entry(c_entry.session)
.and_modify(|candidates| { candidates.push(c_hash.clone()) })
.or_insert_with(|| vec![c_hash.clone()]);

let our_tranche = c_entry
.approval_entry(&block_batch.block_hash)
.and_then(|a| a.our_assignment().map(|a| a.tranche()));
Expand Down Expand Up @@ -2088,8 +2103,37 @@ async fn handle_from_overseer<
},
FromOrchestra::Signal(OverseerSignal::BlockFinalized(block_hash, block_number)) => {
gum::debug!(target: LOG_TARGET, ?block_hash, ?block_number, "Block finalized");
*last_finalized_height = Some(block_number);
let finalized_tip = db.load_block_entry(&block_hash)?.unwrap();


let is_new_session = match state.last_session_index {
Some(last_session) if finalized_tip.session() > last_session => true,
Some(_) => false,
None => {
// in case of NONE we dont need to share the approvals
// usage as we are under an ongoing session
state.last_session_index = Some(finalized_tip.session())
return false
},
};

if is_new_session {
// new session started then
// lets compute the previous session approvals usage
compute_approvals_usage_and_share(
sender,
approval_voting_sender,
state,
db,
finalized_tip.session(),
(block_hash, block_number),
last_finalized_height,
).await?;

state.last_session_index = Some(finalized_tip.session());
};

*last_finalized_height = Some(block_number);
crate::ops::canonicalize(db, block_number, block_hash)
.map_err(|e| SubsystemError::with_origin("db", e))?;

Expand Down Expand Up @@ -2324,6 +2368,75 @@ async fn get_approval_signatures_for_candidate<
Ok(())
}

#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)]
async fn compute_approvals_usage_and_share<
Sender: SubsystemSender<ChainApiMessage>,
ADSender: SubsystemSender<ApprovalDistributionMessage>
>(
sender: &mut Sender,
approval_voting_sender: &mut ADSender,
state: &mut State,
db: &mut OverlayedBackend<'_, impl Backend>,
current_session: u32,
current_finalized: (Hash, u32),
last_finalized_height: &mut Option<BlockNumber>
) -> SubsystemResult<()> {

// 0. 1
// 1 ... 10 | 11

let retrieve_size: usize = last_finalized_height
.clone()
.map_or(current_finalized.1, |b| current_finalized.1 - (b as u32)) as usize;

// TODO: change fetch ancestry to return not the range
// but all the hashes of a given session so we can
// perform on_finalized_block with all the session blocks correctly
let finalized_hashes: HashSet<Hash> = fetch_ancestry(
sender,
current_finalized.0,
retrieve_size,
).await?.into_iter().collect();

let mut prev_session_approvals: HashMap<ValidatorIndex, u32> = HashMap::new();
let prev = current_session.saturating_sub(1) as SessionIndex;
let candidates = match state.candidates_per_session.remove(&prev) {
Some(candidates) => candidates,
_ => vec![],
};

for c_hash in candidates {
match db.load_candidate_entry(&c_hash)? {
Some(candidate) => {
// TODO: fix the candidate check as it does not
// check correclty every finalized session block
let on_finalized_block = candidate.block_assignments
.keys()
.any(|b_hash| finalized_hashes.contains(b_hash));

if on_finalized_block {
for idx in candidate.approvals.iter_ones() {
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is a bit trickier than that, we can't count just the approvals because in that case, validators can game this by triggering all their tranches and approving stuff, although that wasn't used. So we need to count the approvals that were used for approving the candidate.

See for understanding how https://github.com/paritytech/polkadot-sdk/blob/c1a31e3505c0c4e01b9d2daad5f4d19b220345ec/polkadot/node/core/approval-voting/src/approval_checking.rs#L415.

Copy link

@burdges burdges Sep 17, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Appears this was not clear enough in the RFC. I've explained the notions of "useful" approval check more clearly before, but maybe lost in conversion to RFC format.

A validator v thinks an approval vote x on candidate c by validator u is "useful" if the ELVES approval gadget on v counted x during the run where it approved the candidate c. We only count "useful" approval checks here

https://github.com/polkadot-fellows/RFCs/pull/119/files#diff-6df38c3dde24813f6d40274ca84be564308822d181edde1f40d2e787d741f8b3R205

The "useful" approval votes should be computed by returning some Vec<AuthId> from the ELVES approvals gadget, or maybe rerunning the gadet in some "accumulate" mode that computes this Vec, if we're afraid that computing the Vec each time the loop runs causes too many allocations.

We consider this notion of "useful" approval check in the availability part too, becuase if the approval cehck is not useful then we do not worry about those chunks being paid.

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Actually it was somewhat clear, the paragraph above spelled everything out, but I've edited the RFC to be much clearer.

prev_session_approvals
.entry(idx)
.and_modify(|e| *e += 1)
.or_insert(0);
}
}
},
_ => {},
}
};

if prev_session_approvals.len() > 0 {
let message = ApprovalDistributionMessage::DistributeUsages(
prev,
prev_session_approvals
);
}

Ok(())
}

#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)]
async fn handle_approved_ancestor<Sender: SubsystemSender<ChainApiMessage>>(
sender: &mut Sender,
Expand Down Expand Up @@ -3971,6 +4084,33 @@ async fn maybe_create_signature<
Ok(None)
}

// Fetch ancestors in descending order, up to the amount requested.
#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)]
async fn fetch_ancestry<Sender: SubsystemSender<ChainApiMessage>>(
sender: &mut Sender,
relay_hash: Hash,
ancestors: usize,
) -> SubsystemResult<Vec<Hash>> {
if ancestors == 0 {
return Ok(Vec::new())
}

let (tx, rx) = oneshot::channel();
sender.send_message(ChainApiMessage::Ancestors {
hash: relay_hash,
k: ancestors,
response_channel: tx,
}).await;

let hashes = match rx.await {
Ok(Ok(hashes)) => hashes,
Ok(Err(e)) => return Err(SubsystemError::with_origin("chain-api", e)),
Err(e) => return Err(SubsystemError::with_origin("chain-api", e)),
};

Ok(hashes)
}

// Sign an approval vote. Fails if the key isn't present in the store.
fn sign_approval(
keystore: &LocalKeystore,
Expand Down
4 changes: 4 additions & 0 deletions polkadot/node/core/approval-voting/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5638,6 +5638,8 @@ fn test_gathering_assignments_statements() {
MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS,
)),
no_show_stats: NoShowStats::default(),
last_session_index: None,
approvals_usage: Default::default(),
};

for i in 0..200i32 {
Expand Down Expand Up @@ -5732,6 +5734,8 @@ fn test_observe_assignment_gathering_status() {
MAX_BLOCKS_WITH_ASSIGNMENT_TIMESTAMPS,
)),
no_show_stats: NoShowStats::default(),
last_session_index: None,
approvals_usage: Default::default(),
};

let metrics_inner = MetricsInner {
Expand Down
3 changes: 2 additions & 1 deletion polkadot/node/network/approval-distribution/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,9 @@ polkadot-node-primitives = { workspace = true, default-features = true }
polkadot-node-subsystem = { workspace = true, default-features = true }
polkadot-node-subsystem-util = { workspace = true, default-features = true }
polkadot-primitives = { workspace = true, default-features = true }
polkadot-core-primitives = { workspace = true, default-features = true }
rand = { workspace = true, default-features = true }

sp-keystore = { workspace = true, default-features = true }
futures = { workspace = true }
futures-timer = { workspace = true }
gum = { workspace = true, default-features = true }
Expand Down
Loading
Loading