From ec4635c9beb0a01ce37b2348e08be00c1bcc3ead Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Thu, 14 Aug 2025 04:40:03 +0700 Subject: [PATCH 001/173] feat: add blocksAndData testing utils --- packages/beacon-node/src/util/sszBytes.ts | 2 +- .../beacon-node/test/utils/blocksAndData.ts | 323 ++++++++++++++++++ 2 files changed, 324 insertions(+), 1 deletion(-) create mode 100644 packages/beacon-node/test/utils/blocksAndData.ts diff --git a/packages/beacon-node/src/util/sszBytes.ts b/packages/beacon-node/src/util/sszBytes.ts index b0d641e8c774..9c131fec0d7c 100644 --- a/packages/beacon-node/src/util/sszBytes.ts +++ b/packages/beacon-node/src/util/sszBytes.ts @@ -44,7 +44,7 @@ export type CommitteeBitsBase64 = string; const VARIABLE_FIELD_OFFSET = 4; const ATTESTATION_BEACON_BLOCK_ROOT_OFFSET = VARIABLE_FIELD_OFFSET + 8 + 8; -const ROOT_SIZE = 32; +export const ROOT_SIZE = 32; const SLOT_SIZE = 8; const COMMITTEE_INDEX_SIZE = 8; const ATTESTATION_DATA_SIZE = 128; diff --git a/packages/beacon-node/test/utils/blocksAndData.ts b/packages/beacon-node/test/utils/blocksAndData.ts new file mode 100644 index 000000000000..438ae969c02a --- /dev/null +++ b/packages/beacon-node/test/utils/blocksAndData.ts @@ -0,0 +1,323 @@ +import {randomBytes} from "node:crypto"; +import {SIGNATURE_LENGTH_UNCOMPRESSED} from "@chainsafe/blst"; +import { + BYTES_PER_BLOB, + BYTES_PER_COMMITMENT, + BYTES_PER_FIELD_ELEMENT, + BYTES_PER_PROOF, +} from "@crate-crypto/node-eth-kzg"; +import {createChainForkConfig, defaultChainConfig} from "@lodestar/config"; +import { + ForkName, + ForkPostCapella, + ForkPostDeneb, + ForkPostFulu, + NUMBER_OF_COLUMNS, + isForkPostDeneb, + isForkPostFulu, +} from "@lodestar/params"; +import { + blindedOrFullBlockToHeader, + blockToHeader, + computeStartSlotAtEpoch, + signedBlockToSignedHeader, +} from "@lodestar/state-transition"; +import {BeaconBlock, SignedBeaconBlock, Slot, deneb, fulu, ssz} from "@lodestar/types"; +import {toRootHex} from "@lodestar/utils"; +import {VersionedHashes} from "../../src/execution/index.js"; +import { + computeInclusionProof, + computeKzgCommitmentsInclusionProof, + kzgCommitmentToVersionedHash, +} from "../../src/util/blobs.js"; +import {kzg} from "../../src/util/kzg.js"; +import {ROOT_SIZE} from "../../src/util/sszBytes.js"; + +export const CAPELLA_FORK_EPOCH = 0; +export const DENEB_FORK_EPOCH = 10; +export const ELECTRA_FORK_EPOCH = 20; +export const FULU_FORK_EPOCH = 30; +export const config = createChainForkConfig({ + ...defaultChainConfig, + CAPELLA_FORK_EPOCH, + DENEB_FORK_EPOCH, + ELECTRA_FORK_EPOCH, + FULU_FORK_EPOCH, +}); + +export const slots: Record = { + capella: computeStartSlotAtEpoch(CAPELLA_FORK_EPOCH), + deneb: computeStartSlotAtEpoch(DENEB_FORK_EPOCH), + electra: computeStartSlotAtEpoch(ELECTRA_FORK_EPOCH), + fulu: computeStartSlotAtEpoch(FULU_FORK_EPOCH), +}; + +/** + * Value used in c-kzg + * https://github.com/matthewkeil/c-kzg-4844/blob/cc7c4e90669efc777a92b375574036a64f8ae9ae/bindings/node.js/test/kzg.test.ts#L42 + */ +const MAX_TOP_BYTE = 114; + +/** + * Generates a random blob of the correct length for the KZG library + * https://github.com/matthewkeil/c-kzg-4844/blob/cc7c4e90669efc777a92b375574036a64f8ae9ae/bindings/node.js/test/kzg.test.ts#L87 + */ +export function generateRandomBlob(): Uint8Array { + return new Uint8Array( + randomBytes(BYTES_PER_BLOB).map((x, i) => { + // Set the top byte to be low enough that the field element doesn't overflow the BLS modulus + if (x > MAX_TOP_BYTE && i % BYTES_PER_FIELD_ELEMENT === 0) { + return Math.floor(Math.random() * MAX_TOP_BYTE); + } + return x; + }) + ); +} + +/** + * Generate a random number between min and max (inclusive) + */ +function generateRandomInt(min: number, max: number): number { + return Math.floor(Math.random() * (max - min + 1)) + min; +} +function generateProposerIndex(min = 0, max = 100_000): number { + return generateRandomInt(max, min); +} + +function generateBeaconBlock({ + forkName, + slot, + parentRoot, +}: {forkName: F; slot?: Slot; parentRoot?: Uint8Array}): SignedBeaconBlock { + const block = ssz[forkName].SignedBeaconBlock.defaultValue(); + block.message.slot = slot ? slot : slots[forkName]; + block.message.parentRoot = parentRoot ? parentRoot : Uint8Array.from(randomBytes(ROOT_SIZE)); + block.message.stateRoot = Uint8Array.from(randomBytes(ROOT_SIZE)); + block.message.proposerIndex = generateProposerIndex(); + block.signature = Uint8Array.from(randomBytes(SIGNATURE_LENGTH_UNCOMPRESSED)); + return block; +} + +function generateRoots( + forkName: F, + block: SignedBeaconBlock +): { + blockRoot: Uint8Array; + rootHex: string; +} { + const blockRoot = ssz[forkName].BeaconBlock.hashTreeRoot(block.message); + const rootHex = toRootHex(blockRoot); + const signed = ssz[forkName].SignedBeaconBlock.defaultValue(); + return { + block: signed, + blockRoot, + rootHex, + }; +} + +function generateBlobSidecars( + forkName: ForkPostDeneb, + block: SignedBeaconBlock, + count: number, + oomProtection = false +): { + block: SignedBeaconBlock; + blobSidecars: deneb.BlobSidecars; + // versionedHashes: VersionedHashes +} { + const blobKzgCommitments: Uint8Array[] = []; + const blobSidecars: deneb.BlobSidecars = []; + const signedBlockHeader = signedBlockToSignedHeader(config, block); + + for (let index = 0; index < count; index++) { + const blobSidecar = ssz[forkName].BlobSidecar.defaultValue(); + blobSidecar.index = index; + blobSidecar.signedBlockHeader = signedBlockHeader; + blobSidecar.blob = generateRandomBlob(index); + blobSidecar.kzgCommitment = kzg.blobToKzgCommitment(blobSidecar.blob); + blobSidecar.kzgCommitmentInclusionProof = computeInclusionProof(forkName, block.message.body, index); + blobSidecar.kzgProof = kzg.computeBlobKzgProof(blobSidecar.blob, blobSidecar.kzgCommitment); + + if (oomProtection) { + blobSidecar.blob = new Uint8Array(1); + } + + blobSidecars.push(blobSidecar); + blobKzgCommitments.push(blobSidecar.kzgCommitment); + } + + block.message.body.blobKzgCommitments = blobKzgCommitments; + // const versionedHashes = blobKzgCommitments.map((commitment) => kzgCommitmentToVersionedHash(commitment)); + + return { + block, + blobSidecars, + // versionedHashes, + }; +} + +function generateColumnSidecars( + forkName: F, + block: SignedBeaconBlock, + numberOfBlobs: number, + oomProtection = false +): { + block: SignedBeaconBlock; + columnSidecars: fulu.DataColumnSidecars; +} { + const blobs = Array.from({length: numberOfBlobs}, () => generateRandomBlob()); + const kzgCommitments = blobs.map((blob) => kzg.blobToKzgCommitment(blob)); + block.body.blobKzgCommitments = kzgCommitments; + + const signedBlockHeader = signedBlockToSignedHeader(config, block); + const cellsAndProofs = blobs.map((blob) => kzg.computeCellsAndKzgProofs(blob)); + const kzgCommitmentsInclusionProof = Array.from({length: blobs.length}, () => + computeKzgCommitmentsInclusionProof(forkName, block.body) + ); + + const columnSidecars = Array.from({length: NUMBER_OF_COLUMNS}, (_, columnIndex) => { + const column = oomProtection + ? [] + : Array.from({length: blobs.length}, (_, rowNumber) => cellsAndProofs[rowNumber].cells[columnIndex]); + const kzgProofs = Array.from( + {length: blobs.length}, + (_, rowNumber) => cellsAndProofs[rowNumber].proofs[columnIndex] + ); + return { + index: columnIndex, + column, + kzgCommitments, + kzgProofs, + signedBlockHeader, + kzgCommitmentsInclusionProof, + }; + }); + + return { + block, + columnSidecars, + }; +} + +export type BlockTestSet = { + block: SignedBeaconBlock; + blockRoot: Uint8Array; + rootHex: string; +}; + +export function generateChainOfBlocks({ + forkName, + count, +}: {forkName: F; count: number}): BlockTestSet[] { + let parentRoot = Uint8Array.from(randomBytes(ROOT_SIZE)); + let slot = slots[forkName]; + const blocks: BlockTestSet[] = []; + for (; slot < slot + count; slot++) { + const block = generateBeaconBlock({forkName, parentRoot, slot}); + const {blockRoot, rootHex} = generateRoots(forkName, block); + parentRoot = block.message.parentRoot; + blocks.push({ + block, + blockRoot, + rootHex, + }); + } + return blocks; +} + +export type BlockWithBlobsTestSet = BlockTestSet & {blobSidecars: deneb.BlobSidecars}; + +export type BlockWithColumnsTestSet = BlockTestSet & { + columnSidecars: fulu.DataColumnSidecars; +}; + +export function generateBlockWithBlobSidecars({ + forkName, + slot, + parentRoot, + oomProtection = false, +}: { + forkName: F; + parentRoot?: Uint8Array; + slot?: Slot; + oomProtection?: boolean; +}): BlockWithBlobsTestSet { + const {block, blobSidecars} = generateBlobSidecars( + forkName, + generateBeaconBlock({forkName, parentRoot, slot}), + generateRandomInt(1, 6) + ); + const {blockRoot, rootHex} = generateRoots(forkName, block); + return { + block, + blobSidecars, + blockRoot, + rootHex, + }; +} + +export function generateBlockWithColumnSidecars({ + forkName, + slot, + parentRoot, + oomProtection = false, +}: { + forkName: F; + parentRoot?: Uint8Array; + slot?: Slot; + oomProtection?: boolean; +}): BlockWithColumnsTestSet { + const {block, columnSidecars} = generateColumnSidecars( + forkName, + generateBeaconBlock({forkName, parentRoot, slot}), + generateRandomInt(1, 6) + ); + const {blockRoot, rootHex} = generateRoots(forkName, block); + return { + block, + columnSidecars, + blockRoot, + rootHex, + }; +} + +export type BlocksWithSidecars = F extends ForkPostFulu + ? BlockWithColumnsTestSet[] + : BlockWithBlobsTestSet[]; +export function generateChainOfBlocksWithBlobs({ + forkName, + count, + oomProtection = false, +}: { + forkName: F; + count: number; + oomProtection?: boolean; +}): BlocksWithSidecars { + let parentRoot = Uint8Array.from(randomBytes(ROOT_SIZE)); + let slot = slots[forkName]; + const blocks: BlocksWithSidecars = []; + for (; slot < slot + count; slot++) { + const blockWithSidecars = isForkPostFulu(forkName) + ? generateBlockWithColumnSidecars({forkName, parentRoot, slot, oomProtection}) + : generateBlockWithBlobSidecars({forkName, parentRoot, slot, oomProtection}); + parentRoot = blockWithSidecars.blockRoot; + blocks.push(blockWithSidecars); + } + return blocks; +} + +export type ChainOfBlockMaybeSidecars = F extends ForkPostFulu + ? BlockWithColumnsTestSet[] + : F extends ForkPostDeneb + ? BlockWithBlobsTestSet[] + : BlockTestSet[]; +export function generateChainOfBlockMaybeSidecars( + forkName: F, + count: number, + oomProtection = false +): ChainOfBlockMaybeSidecars { + if (isForkPostDeneb(forkName)) { + return generateChainOfBlocksWithBlobs({forkName, count, oomProtection}); + } + return generateChainOfBlocks({forkName, count}); +} From 337d3486bb0adc2ca5c79121b19f264681c8bb55 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Thu, 14 Aug 2025 04:56:06 +0700 Subject: [PATCH 002/173] feat: add SeenBlockInputCache.getByColumn --- packages/beacon-node/src/chain/chain.ts | 1 + .../src/chain/seenCache/seenBlockInput.ts | 76 ++++++++++++++++++- .../src/metrics/metrics/lodestar.ts | 5 ++ 3 files changed, 79 insertions(+), 3 deletions(-) diff --git a/packages/beacon-node/src/chain/chain.ts b/packages/beacon-node/src/chain/chain.ts index 1a5da816c63f..85c08e74317c 100644 --- a/packages/beacon-node/src/chain/chain.ts +++ b/packages/beacon-node/src/chain/chain.ts @@ -302,6 +302,7 @@ export class BeaconChain implements IBeaconChain { this.checkpointBalancesCache = new CheckpointBalancesCache(); this.seenBlockInputCache = new SeenBlockInputCache({ config, + custodyConfig: this.custodyConfig, clock, chainEvents: emitter, signal, diff --git a/packages/beacon-node/src/chain/seenCache/seenBlockInput.ts b/packages/beacon-node/src/chain/seenCache/seenBlockInput.ts index eb0ba6d98114..66175c7ae641 100644 --- a/packages/beacon-node/src/chain/seenCache/seenBlockInput.ts +++ b/packages/beacon-node/src/chain/seenCache/seenBlockInput.ts @@ -2,20 +2,24 @@ import {ChainForkConfig} from "@lodestar/config"; import {CheckpointWithHex} from "@lodestar/fork-choice"; import {ForkName, isForkPostDeneb} from "@lodestar/params"; import {computeStartSlotAtEpoch} from "@lodestar/state-transition"; -import {RootHex, SignedBeaconBlock, Slot, deneb} from "@lodestar/types"; +import {RootHex, SignedBeaconBlock, Slot, deneb, fulu} from "@lodestar/types"; import {LodestarError, Logger, toRootHex} from "@lodestar/utils"; import {Metrics} from "../../metrics/metrics.js"; import {IClock} from "../../util/clock.js"; +import {CustodyConfig} from "../../util/dataColumns.js"; import { BlockInputBlobs, + BlockInputColumns, BlockInputPreData, DAType, ForkBlobsDA, IBlockInput, LogMetaBasic, LogMetaBlobs, + LogMetaColumns, SourceMeta, isBlockInputBlobs, + isBlockInputColumns, isDaOutOfRange, } from "../blocks/blockInput/index.js"; import {ChainEvent, ChainEventEmitter} from "../emitter.js"; @@ -27,7 +31,7 @@ export type SeenBlockInputCacheModules = { clock: IClock; chainEvents: ChainEventEmitter; signal: AbortSignal; - // custodyConfig: CustodyConfig; + custodyConfig: CustodyConfig; metrics: Metrics | null; logger?: Logger; }; @@ -70,6 +74,7 @@ export type GetByBlobOptions = { export class SeenBlockInputCache { private readonly config: ChainForkConfig; + private readonly custodyConfig: CustodyConfig; private readonly clock: IClock; private readonly chainEvents: ChainEventEmitter; private readonly signal: AbortSignal; @@ -77,8 +82,9 @@ export class SeenBlockInputCache { private readonly logger?: Logger; private blockInputs = new Map(); - constructor({config, clock, chainEvents, signal, metrics, logger}: SeenBlockInputCacheModules) { + constructor({config, custodyConfig, clock, chainEvents, signal, metrics, logger}: SeenBlockInputCacheModules) { this.config = config; + this.custodyConfig = custodyConfig; this.clock = clock; this.chainEvents = chainEvents; this.signal = signal; @@ -257,6 +263,66 @@ export class SeenBlockInputCache { return blockInput; } + getByColumn( + {columnSidecar, seenTimestampSec, source, peerIdStr}: SourceMeta & {columnSidecar: fulu.DataColumnSidecar}, + opts: GetByBlobOptions = {} + ): BlockInputColumns { + const blockRoot = this.config + .getForkTypes(columnSidecar.signedBlockHeader.message.slot) + .BeaconBlockHeader.hashTreeRoot(columnSidecar.signedBlockHeader.message); + const blockRootHex = toRootHex(blockRoot); + + let blockInput = this.blockInputs.get(blockRootHex); + let created = false; + if (!blockInput) { + created = true; + const {forkName, daOutOfRange} = this.buildCommonProps(columnSidecar.signedBlockHeader.message.slot); + blockInput = BlockInputColumns.createFromColumn({ + columnSidecar, + blockRootHex, + daOutOfRange, + forkName, + source, + seenTimestampSec, + peerIdStr, + custodyColumns: this.custodyConfig.custodyColumns, + sampledColumns: this.custodyConfig.sampledColumns, + }); + this.metrics?.seenCache.blockInput.createdByBlob.inc(); + this.blockInputs.set(blockRootHex, blockInput); + } + + if (!isBlockInputColumns(blockInput)) { + throw new SeenBlockInputCacheError( + { + code: SeenBlockInputCacheErrorCode.WRONG_BLOCK_INPUT_TYPE, + cachedType: blockInput.type, + requestedType: DAType.Columns, + ...blockInput.getLogMeta(), + }, + `BlockInputType mismatch adding columnIndex=${columnSidecar.index}` + ); + } + + if (!blockInput.hasColumn(columnSidecar.index)) { + blockInput.addColumn({columnSidecar, blockRootHex, source, seenTimestampSec, peerIdStr}); + } else if (!created) { + this.logger?.debug( + `Attempt to cache column index #${columnSidecar.index} but is already cached on BlockInput`, + blockInput.getLogMeta() + ); + this.metrics?.seenCache.blockInput.duplicateColumnCount.inc({source}); + if (opts.throwErrorIfAlreadyKnown) { + throw new SeenBlockInputCacheError({ + code: SeenBlockInputCacheErrorCode.GOSSIP_COLUMN_ALREADY_KNOWN, + ...blockInput.getLogMeta(), + }); + } + } + + return blockInput; + } + private buildCommonProps(slot: Slot): { daOutOfRange: boolean; forkName: ForkName; @@ -289,6 +355,7 @@ export class SeenBlockInputCache { enum SeenBlockInputCacheErrorCode { WRONG_BLOCK_INPUT_TYPE = "BLOCK_INPUT_CACHE_ERROR_WRONG_BLOCK_INPUT_TYPE", GOSSIP_BLOB_ALREADY_KNOWN = "BLOCK_INPUT_CACHE_ERROR_GOSSIP_BLOB_ALREADY_KNOWN", + GOSSIP_COLUMN_ALREADY_KNOWN = "BLOCK_INPUT_CACHE_ERROR_GOSSIP_COLUMN_ALREADY_KNOWN", } type SeenBlockInputCacheErrorType = @@ -299,6 +366,9 @@ type SeenBlockInputCacheErrorType = }) | (LogMetaBlobs & { code: SeenBlockInputCacheErrorCode.GOSSIP_BLOB_ALREADY_KNOWN; + }) + | (LogMetaColumns & { + code: SeenBlockInputCacheErrorCode.GOSSIP_COLUMN_ALREADY_KNOWN; }); class SeenBlockInputCacheError extends LodestarError {} diff --git a/packages/beacon-node/src/metrics/metrics/lodestar.ts b/packages/beacon-node/src/metrics/metrics/lodestar.ts index 1d4a331065ae..4bf65eb7d0f6 100644 --- a/packages/beacon-node/src/metrics/metrics/lodestar.ts +++ b/packages/beacon-node/src/metrics/metrics/lodestar.ts @@ -1314,6 +1314,11 @@ export function createLodestarMetrics( help: "Total number of duplicate blobs that pass validation and attempt to be cached but are known", labelNames: ["source"], }), + duplicateColumnCount: register.gauge<{source: BlockInputSource}>({ + name: "lodestar_seen_block_input_cache_duplicate_column_count", + help: "Total number of duplicate columns that pass validation and attempt to be cached but are known", + labelNames: ["source"], + }), createdByBlock: register.gauge({ name: "lodestar_seen_block_input_cache_items_created_by_block", help: "Number of BlockInputs created via a block being seen first", From 004860af669f629c4e1090c171f9110a722bfb55 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Thu, 14 Aug 2025 04:58:16 +0700 Subject: [PATCH 003/173] feat: add downloadByRange.ts --- .../src/sync/utils/downloadByRange.ts | 846 ++++++++++++++++++ 1 file changed, 846 insertions(+) create mode 100644 packages/beacon-node/src/sync/utils/downloadByRange.ts diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts new file mode 100644 index 000000000000..a8c584bed8c1 --- /dev/null +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -0,0 +1,846 @@ +import {ChainForkConfig} from "@lodestar/config"; +import {ForkPostDeneb, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; +import {DataAvailabilityStatus} from "@lodestar/state-transition"; +import {RootHex, SignedBeaconBlock, Slot, WithBytes, deneb, fulu, phase0} from "@lodestar/types"; +import {LodestarError, Logger, prettyBytes, prettyPrintIndices} from "@lodestar/utils"; +import {BlockInputSource, IBlockInput} from "../../chain/blocks/blockInput/index.js"; +import {SeenBlockInputCache} from "../../chain/seenCache/seenBlockInput.js"; +import {INetwork, prettyPrintPeerIdStr} from "../../network/index.js"; +import {linspace} from "../../util/numpy.js"; +import {PeerIdStr} from "../../util/peerId.js"; + +export type DownloadByRangeRequests = { + blocksRequest: phase0.BeaconBlocksByRangeRequest; + blobsRequest?: deneb.BlobSidecarsByRangeRequest; + columnsRequest?: fulu.DataColumnSidecarsByRangeRequest; +}; + +export type DownloadByRangeResponses = { + blocks?: SignedBeaconBlock[]; + blobSidecars?: deneb.BlobSidecars; + columnSidecars?: fulu.DataColumnSidecars; +}; + +export type DownloadAndCacheByRangeProps = DownloadByRangeRequests & { + config: ChainForkConfig; + cache: SeenBlockInputCache; + network: INetwork; + logger: Logger; + peerIdStr: string; + dataAvailabilityStatus: DataAvailabilityStatus; +}; + +export type DownloadAndCacheByRangeResults = { + blockInputs: IBlockInput[]; + numberOfBlocks: number; + numberOfBlobs: number; + numberOfColumns: number; +}; + +export async function downloadAndCacheByRange( + request: DownloadAndCacheByRangeProps +): Promise { + const {logger, cache, peerIdStr} = request; + const {blocks, blobSidecars, columnSidecars} = await downloadByRange(request); + const blockInputs = new Map(); + const seenTimestampSec = Date.now() / 1000; + + function uncache() { + for (const [rootHex] of blockInputs) { + try { + cache.remove(rootHex); + } catch (e) { + logger.error( + "Error removing blockInput from seenBlockInputCache", + {blockRoot: prettyBytes(rootHex)}, + e as Error + ); + } + } + } + + let numberOfBlocks = 0; + if (blocks) { + try { + for (const block of blocks) { + const blockInput = cache.getByBlock({ + block, + seenTimestampSec, + source: BlockInputSource.byRange, + peerIdStr, + }); + numberOfBlocks++; + blockInputs.set(blockInput.blockRootHex, blockInput); + } + } catch (err) { + uncache(); + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.CACHING_ERROR, + peerId: prettyPrintPeerIdStr(peerIdStr), + message: (err as Error).message, + }, + "Error caching ByRange fetched block" + ); + } + } + + const processedBlobs = new Map(); + let numberOfBlobs = 0; + if (blobSidecars) { + try { + for (const blobSidecar of blobSidecars) { + const blockInput = cache.getByBlob({ + peerIdStr, + blobSidecar, + seenTimestampSec, + source: BlockInputSource.byRange, + }); + numberOfBlobs++; + blockInputs.set(blockInput.blockRootHex, blockInput); + const indices = processedBlobs.get(blockInput.blockRootHex) ?? []; + indices.push(blobSidecar.index); + processedBlobs.set(blockInput.blockRootHex, indices); + } + } catch (err) { + uncache(); + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.CACHING_ERROR, + peerId: prettyPrintPeerIdStr(peerIdStr), + message: (err as Error).message, + }, + "Error caching ByRange fetched blob" + ); + } + } + + const processedColumns = new Map(); + let numberOfColumns = 0; + if (columnSidecars) { + try { + for (const columnSidecar of columnSidecars) { + const blockInput = cache.getByColumn({ + peerIdStr, + columnSidecar, + seenTimestampSec, + source: BlockInputSource.byRange, + }); + numberOfColumns++; + blockInputs.set(blockInput.blockRootHex, blockInput); + const indices = processedColumns.get(blockInput.blockRootHex) ?? []; + indices.push(columnSidecar.index); + processedColumns.set(blockInput.blockRootHex, indices); + } + } catch (err) { + uncache(); + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.CACHING_ERROR, + peerId: prettyPrintPeerIdStr(peerIdStr), + message: (err as Error).message, + }, + "Error caching ByRange fetched column" + ); + } + } + + return { + blockInputs: Array.from(blockInputs.values()), + numberOfBlocks, + numberOfBlobs, + numberOfColumns, + }; +} + +export async function downloadByRange({ + config, + network, + logger, + peerIdStr, + dataAvailabilityStatus, + blocksRequest, + blobsRequest, + columnsRequest, +}: Omit): Promise { + const slotRangeString = validateRequests({ + config, + dataAvailabilityStatus, + blocksRequest, + blobsRequest, + columnsRequest, + }); + + let response: DownloadByRangeResponses; + try { + response = await requestByRange({ + network, + peerIdStr, + blocksRequest, + blobsRequest, + columnsRequest, + }); + } catch (err) { + logger.verbose("RangeSync *ByRange error", {}, err as Error); + throw new DownloadByRangeError({ + code: DownloadByRangeErrorCode.REQ_RESP_ERROR, + peerId: peerIdStr, + slotRange: slotRangeString, + }); + } + + validateResponses({ + peerIdStr, + slotRangeString, + blocksRequest, + blobsRequest, + columnsRequest, + ...response, + }); + + return response; +} + +/** + * Should not be called directly. Only exported for unit testing purposes + */ +export function validateRequests({ + config, + dataAvailabilityStatus, + blocksRequest, + blobsRequest, + columnsRequest, +}: DownloadByRangeRequests & Pick): string { + const startSlot = (blocksRequest?.startSlot ?? blobsRequest?.startSlot ?? columnsRequest?.startSlot) as number; + const count = (blocksRequest?.count ?? blobsRequest?.count ?? columnsRequest?.count) as number; + const slotRange = `${startSlot} - ${startSlot + count}`; + const dataRequest = blobsRequest ?? columnsRequest; + + if (!blocksRequest) { + throw new DownloadByRangeError({ + code: DownloadByRangeErrorCode.MISSING_BLOCKS_REQUEST, + slotRange, + }); + } + + if (dataAvailabilityStatus !== DataAvailabilityStatus.Available) { + if (dataRequest) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.INVALID_DATA_REQUEST, + slotRange, + }, + "Cannot request data if it is not available" + ); + } + + return slotRange; + } + + if (!dataRequest) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.MISSING_DATA_REQUEST, + slotRange, + }, + "Must request data if it is available" + ); + } + + if (blobsRequest && columnsRequest) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.INVALID_DATA_REQUEST, + slotRange, + }, + "Cannot request both blob and column data in the same slot range" + ); + } + + const forkName = config.getForkName(startSlot); + if (!isForkPostDeneb(forkName)) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.INVALID_DATA_REQUEST, + slotRange, + }, + "Cannot request data pre-deneb" + ); + } + + if (isForkPostDeneb(forkName) && !isForkPostFulu(forkName) && !blobsRequest) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.MISSING_BLOBS_REQUEST, + slotRange, + }, + "Must request blobs for blob-only forks" + ); + } + + if (isForkPostFulu(forkName) && !columnsRequest) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.MISSING_COLUMNS_REQUEST, + slotRange, + }, + "Must request columns for forks with columns" + ); + } + + if (blocksRequest.startSlot !== dataRequest.startSlot) { + throw new DownloadByRangeError({ + code: DownloadByRangeErrorCode.START_SLOT_MISMATCH, + blockStartSlot: blocksRequest.startSlot, + dataStartSlot: dataRequest.startSlot, + }); + } + + if (blocksRequest.count !== dataRequest.count) { + throw new DownloadByRangeError({ + code: DownloadByRangeErrorCode.COUNT_MISMATCH, + blockCount: blocksRequest.count, + dataCount: dataRequest.count, + }); + } + + return slotRange; +} + +/** + * Should not be called directly. Only exported for unit testing purposes + */ +export async function requestByRange({ + network, + peerIdStr, + blocksRequest, + blobsRequest, + columnsRequest, +}: DownloadByRangeRequests & { + network: INetwork; + peerIdStr: PeerIdStr; +}): Promise { + let blocks: undefined | SignedBeaconBlock[]; + let blobSidecars: undefined | deneb.BlobSidecars; + let columnSidecars: undefined | fulu.DataColumnSidecars; + + const requests: Promise[] = []; + + if (blocksRequest) { + requests.push( + network.sendBeaconBlocksByRange(peerIdStr, blocksRequest).then((blockResponse) => { + blocks = blockResponse.map(({data}) => data); + }) + ); + } + + if (blobsRequest) { + requests.push( + network.sendBlobSidecarsByRange(peerIdStr, blobsRequest).then((blobResponse) => { + blobSidecars = blobResponse; + }) + ); + } + + if (columnsRequest) { + requests.push( + network.sendDataColumnSidecarsByRange(peerIdStr, columnsRequest).then((columnResponse) => { + columnSidecars = columnResponse; + }) + ); + } + + await Promise.all(requests); + + return { + blocks, + blobSidecars, + columnSidecars, + }; +} + +/** + * Should not be called directly. Only exported for unit testing purposes + */ +export function validateResponses({ + peerIdStr, + slotRangeString, + blocksRequest, + blobsRequest, + columnsRequest, + blocks, + blobSidecars, + columnSidecars, +}: DownloadByRangeRequests & DownloadByRangeResponses & {peerIdStr: string; slotRangeString: string}): void { + if (!blocks) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.MISSING_BLOCKS_RESPONSE, + slotRange: slotRangeString, + }, + "No blocks to validate requests against" + ); + } + + const {missingSlots, extraSlots} = compareBlockByRangeRequestAndResponse(blocksRequest, blocks); + if (missingSlots) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.MISSING_BLOCKS, + peerId: prettyPrintPeerIdStr(peerIdStr), + missingSlots: prettyPrintIndices(missingSlots), + }, + "Not all blocks included in BeaconBlocksByRange response" + ); + } + if (extraSlots) { + // extra slots array is allocated when checking requested length against returned array length. If there are no + // extras found that means there are duplicates + if (extraSlots.length === 0) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.DUPLICATE_BLOCKS, + peerId: prettyPrintPeerIdStr(peerIdStr), + }, + "Duplicate blocks in BeaconBlocksByRange response" + ); + } + + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.EXTRA_BLOCKS, + peerId: prettyPrintPeerIdStr(peerIdStr), + extraSlots: prettyPrintIndices(extraSlots), + }, + "Extra blocks outside of requested range in BeaconBlocksByRange response" + ); + } + + if (blobsRequest) { + if (!blobSidecars) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.MISSING_BLOBS_RESPONSE, + slotRange: slotRangeString, + }, + "No blobSidecars to validate against blobsRequest" + ); + } + const { + expectedBlobCount, + missingBlobCount, + missingBlobsDescription, + extraBlobCount, + extraBlobsDescription, + duplicateBlobCount, + duplicateBlobsDescription, + } = compareBlobsByRangeRequestAndResponse(blocks, blobSidecars); + + if (duplicateBlobCount > 0) { + throw new DownloadByRangeError({ + code: DownloadByRangeErrorCode.DUPLICATE_BLOBS, + peerId: prettyPrintPeerIdStr(peerIdStr), + expectedBlobCount, + duplicateBlobCount, + slotsWithIndices: duplicateBlobsDescription.join(","), + }); + } + + if (extraBlobCount > 0) { + throw new DownloadByRangeError({ + code: DownloadByRangeErrorCode.EXTRA_BLOBS, + peerId: prettyPrintPeerIdStr(peerIdStr), + expectedBlobCount, + extraBlobCount, + slotsWithIndices: extraBlobsDescription.join(","), + }); + } + + if (missingBlobCount > 0) { + throw new DownloadByRangeError({ + code: DownloadByRangeErrorCode.MISSING_BLOBS, + peerId: prettyPrintPeerIdStr(peerIdStr), + expectedBlobCount, + missingBlobCount, + slotsWithIndices: missingBlobsDescription.join(","), + }); + } + } + + if (columnsRequest) { + if (!columnSidecars) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.MISSING_COLUMNS_RESPONSE, + slotRange: slotRangeString, + }, + "No columnSidecars to check columnRequest against" + ); + } + + const {missingByIndex, extraByIndex} = compareColumnsByRangeRequestAndResponse(columnsRequest, columnSidecars); + + if (extraByIndex.size > 0) { + const fullExtraColumns: number[] = []; + let extraColumnCount = 0; + const partialExtraColumns: string[] = []; + for (const [index, extraSlots] of extraByIndex) { + if (extraSlots.length === columnsRequest.count) { + fullExtraColumns.push(index); + } else { + extraColumnCount += extraSlots.length; + partialExtraColumns.push(`${index}${prettyPrintIndices(extraSlots)}`); + } + } + + if (fullExtraColumns.length) { + // this should be severe peer infraction + throw new DownloadByRangeError({ + code: DownloadByRangeErrorCode.EXTRA_COLUMNS_ALL_SLOTS, + peerId: prettyPrintPeerIdStr(peerIdStr), + extraColumns: prettyPrintIndices(fullExtraColumns), + }); + } + + // this should be a minor peer infraction? What do you think @twoeths @g11tech? + throw new DownloadByRangeError({ + code: DownloadByRangeErrorCode.EXTRA_COLUMNS_SOME_SLOTS, + peerId: prettyPrintPeerIdStr(peerIdStr), + extraColumnCount, + indicesWithSlots: partialExtraColumns.join(", "), + }); + } + + if (missingByIndex.size > 0) { + const missingPeerCustody = []; + let missingColumnCount = 0; + const indicesWithSlots = []; + for (const [index, missingSlots] of missingByIndex) { + if (missingSlots.length === columnsRequest.count) { + missingPeerCustody.push(index); + } else { + missingColumnCount += missingSlots.length; + indicesWithSlots.push(`${index}${prettyPrintIndices(missingSlots)}`); + } + } + + if (missingPeerCustody.length) { + // this should be a severe peer infraction + throw new DownloadByRangeError({ + code: DownloadByRangeErrorCode.PEER_CUSTODY_FAILURE, + peerId: prettyPrintPeerIdStr(peerIdStr), + missingColumns: prettyPrintIndices(missingPeerCustody), + }); + } + + // this should be a minor peer infraction? What do you think @twoeths @g11tech? + throw new DownloadByRangeError({ + code: DownloadByRangeErrorCode.MISSING_COLUMNS, + peerId: prettyPrintPeerIdStr(peerIdStr), + missingColumnCount, + indicesWithSlots: indicesWithSlots.join(", "), + }); + } + } +} + +/** + * Should not be called directly. Only exported for unit testing purposes + */ +export function compareBlockByRangeRequestAndResponse( + blocksRequest: phase0.BeaconBlocksByRangeRequest, + blocks: SignedBeaconBlock[] +): {missingSlots?: number[]; extraSlots?: number[]} { + const {startSlot, count} = blocksRequest; + const slotsReceived = blocks.map((block) => block.message.slot); + + const extraSlots: number[] = []; + if (slotsReceived.length > count) { + for (const slot of slotsReceived) { + if (slot < startSlot || slot >= startSlot + count) { + extraSlots.push(slot); + } + } + + return { + extraSlots, + }; + } + + const missingSlots: number[] = []; + for (let slot = startSlot; slot < startSlot + count; slot++) { + if (!slotsReceived.includes(slot)) { + missingSlots.push(slot); + } + } + + if (missingSlots.length) { + return { + missingSlots, + }; + } + + return {}; +} + +type BlobComparisonResponse = { + expectedBlobCount: number; + missingBlobCount: number; + extraBlobCount: number; + duplicateBlobCount: number; + missingBlobsDescription: string[]; + extraBlobsDescription: string[]; + duplicateBlobsDescription: string[]; +}; +/** + * Should not be called directly. Only exported for unit testing purposes + */ +export function compareBlobsByRangeRequestAndResponse( + blocks: SignedBeaconBlock[], + blobSidecars: deneb.BlobSidecars +): BlobComparisonResponse { + let expectedBlobCount = 0; + let missingBlobCount = 0; + let extraBlobCount = 0; + let duplicateBlobCount = 0; + const missingBlobsDescription: string[] = []; + const extraBlobsDescription: string[] = []; + const duplicateBlobsDescription: string[] = []; + for (const block of blocks) { + const slot = block.message.slot; + const expectedBlobs = (block as SignedBeaconBlock).message.body.blobKzgCommitments.length; + const expectedIndices = linspace(0, expectedBlobs - 1); + expectedBlobCount += expectedBlobs; + const receivedBlobs = blobSidecars + .filter((blobSidecar) => { + return blobSidecar.signedBlockHeader.message.slot === slot; + }) + .map((blobSidecar) => blobSidecar.index); + + const missingIndices: number[] = []; + const duplicateIndices: number[] = []; + for (const index of expectedIndices) { + if (!receivedBlobs.includes(index)) { + missingIndices.push(index); + } + if (receivedBlobs.filter((blobIndex) => blobIndex === index).length > 1) { + duplicateIndices.push(index); + } + } + if (missingIndices.length > 0) { + missingBlobCount += missingIndices.length; + missingBlobsDescription.push(`${slot}${prettyPrintIndices(missingIndices)}`); + } + if (duplicateIndices.length > 0) { + duplicateBlobCount += duplicateIndices.length; + duplicateBlobsDescription.push(`${slot}${prettyPrintIndices(duplicateIndices)}`); + } + + const extraIndices: number[] = []; + for (const index of receivedBlobs) { + if (!expectedIndices.includes(index)) { + extraIndices.push(index); + } + } + if (extraIndices.length > 0) { + extraBlobCount += extraIndices.length; + extraBlobsDescription.push(`${slot}${prettyPrintIndices(extraIndices)}`); + } + } + + if (expectedBlobCount !== blobSidecars.length) { + const expectedSlots = blocks.map((block) => block.message.slot); + const extraBlocks = new Map(); + for (const blobSidecar of blobSidecars) { + const blobSlot = blobSidecar.signedBlockHeader.message.slot; + if (!expectedSlots.includes(blobSlot)) { + const extra = extraBlocks.get(blobSlot) ?? []; + extra.push(blobSidecar.index); + extraBlocks.set(blobSlot, extra); + extraBlobCount++; + } + } + if (extraBlocks.size) { + for (const [slot, extraIndices] of extraBlocks) { + extraBlobsDescription.push(`${slot}${prettyPrintIndices(extraIndices)}`); + } + } + } + + return { + expectedBlobCount, + missingBlobCount, + extraBlobCount, + duplicateBlobCount, + missingBlobsDescription, + extraBlobsDescription, + duplicateBlobsDescription, + }; +} + +type ColumnComparisonResponse = { + missingByIndex: Map; + extraByIndex: Map; +}; +/** + * Should not be called directly. Only exported for unit testing purposes + */ +export function compareColumnsByRangeRequestAndResponse( + columnRequest: fulu.DataColumnSidecarsByRangeRequest, + columnSidecars: fulu.DataColumnSidecars +): ColumnComparisonResponse { + const {startSlot, count, columns: expectedColumns} = columnRequest; + + const missingByIndex = new Map(); + const extraByIndex = new Map(); + + for (let slot = startSlot; slot < startSlot + count; slot++) { + const receivedIndices = columnSidecars + .filter((columnSidecar) => columnSidecar.signedBlockHeader.message.slot === slot) + .map((columnSidecar) => columnSidecar.index); + + for (const index of receivedIndices) { + if (!expectedColumns.includes(index)) { + const extraSlots = extraByIndex.get(index) ?? []; + extraSlots.push(slot); + extraByIndex.set(index, extraSlots); + } + } + + for (const index of expectedColumns) { + if (!receivedIndices.includes(index)) { + const missingSlots = missingByIndex.get(index) ?? []; + missingSlots.push(slot); + missingByIndex.set(index, missingSlots); + } + } + } + + return { + missingByIndex, + extraByIndex, + }; +} + +export enum DownloadByRangeErrorCode { + MISSING_BLOCKS_REQUEST = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOCKS_REQUEST", + MISSING_BLOCKS_RESPONSE = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOCKS_RESPONSE", + MISSING_BLOBS_REQUEST = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOBS_REQUEST", + MISSING_COLUMNS_REQUEST = "DOWNLOAD_BY_RANGE_ERROR_MISSING_COLUMNS_REQUEST", + MISSING_BLOBS_RESPONSE = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOBS_RESPONSE", + MISSING_COLUMNS_RESPONSE = "DOWNLOAD_BY_RANGE_ERROR_MISSING_COLUMNS_RESPONSE", + INVALID_DATA_REQUEST = "DOWNLOAD_BY_RANGE_ERROR_INVALID_DATA_REQUEST", + MISSING_DATA_REQUEST = "DOWNLOAD_BY_RANGE_ERROR_MISSING_DATA_REQUEST", + START_SLOT_MISMATCH = "DOWNLOAD_BY_RANGE_ERROR_START_SLOT_MISMATCH", + COUNT_MISMATCH = "DOWNLOAD_BY_RANGE_ERROR_COUNT_MISMATCH", + REQ_RESP_ERROR = "DOWNLOAD_BY_RANGE_ERROR_REQ_RESP_ERROR", + MISSING_BLOCKS = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOCKS", + EXTRA_BLOCKS = "DOWNLOAD_BY_RANGE_ERROR_EXTRA_BLOCKS", + DUPLICATE_BLOCKS = "DOWNLOAD_BY_RANGE_ERROR_DUPLICATE_BLOCKS", + MISSING_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOBS", + EXTRA_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_EXTRA_BLOBS", + DUPLICATE_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_DUPLICATE_BLOBS", + MISSING_COLUMNS = "DOWNLOAD_BY_RANGE_ERROR_MISSING_COLUMNS", + EXTRA_COLUMNS_ALL_SLOTS = "DOWNLOAD_BY_RANGE_ERROR_EXTRA_COLUMNS_ALL_SLOTS", + EXTRA_COLUMNS_SOME_SLOTS = "DOWNLOAD_BY_RANGE_ERROR_EXTRA_COLUMNS_SOME_SLOTS", + PEER_CUSTODY_FAILURE = "DOWNLOAD_BY_RANGE_ERROR_PEER_CUSTODY_FAILURE", + CACHING_ERROR = "DOWNLOAD_BY_RANGE_CACHING_ERROR", +} + +export type DownloadByRangeErrorType = + | { + code: + | DownloadByRangeErrorCode.MISSING_BLOCKS_REQUEST + | DownloadByRangeErrorCode.MISSING_BLOCKS_RESPONSE + | DownloadByRangeErrorCode.MISSING_BLOBS_REQUEST + | DownloadByRangeErrorCode.MISSING_BLOBS_RESPONSE + | DownloadByRangeErrorCode.MISSING_COLUMNS_REQUEST + | DownloadByRangeErrorCode.MISSING_COLUMNS_RESPONSE + | DownloadByRangeErrorCode.INVALID_DATA_REQUEST + | DownloadByRangeErrorCode.MISSING_DATA_REQUEST; + slotRange: string; + } + | { + code: DownloadByRangeErrorCode.START_SLOT_MISMATCH; + blockStartSlot: number; + dataStartSlot: number; + } + | { + code: DownloadByRangeErrorCode.COUNT_MISMATCH; + blockCount: number; + dataCount: number; + } + | { + code: DownloadByRangeErrorCode.REQ_RESP_ERROR; + peerId: string; + slotRange: string; + } + | { + code: DownloadByRangeErrorCode.CACHING_ERROR; + peerId: string; + message: string; + } + | { + code: DownloadByRangeErrorCode.MISSING_BLOCKS; + peerId: string; + missingSlots: string; + } + | { + code: DownloadByRangeErrorCode.EXTRA_BLOCKS; + peerId: string; + extraSlots: string; + } + | { + code: DownloadByRangeErrorCode.DUPLICATE_BLOCKS; + peerId: string; + } + | { + code: DownloadByRangeErrorCode.EXTRA_BLOCKS; + peerId: string; + extraSlots: string; + } + | { + code: DownloadByRangeErrorCode.MISSING_BLOBS; + peerId: string; + expectedBlobCount: number; + missingBlobCount: number; + slotsWithIndices: string; + } + | { + code: DownloadByRangeErrorCode.EXTRA_BLOBS; + peerId: string; + expectedBlobCount: number; + extraBlobCount: number; + slotsWithIndices: string; + } + | { + code: DownloadByRangeErrorCode.DUPLICATE_BLOBS; + peerId: string; + expectedBlobCount: number; + duplicateBlobCount: number; + slotsWithIndices: string; + } + | { + code: DownloadByRangeErrorCode.MISSING_COLUMNS; + peerId: string; + missingColumnCount: number; + indicesWithSlots: string; + } + | { + code: DownloadByRangeErrorCode.EXTRA_COLUMNS_ALL_SLOTS; + peerId: string; + extraColumns: string; + } + | { + code: DownloadByRangeErrorCode.EXTRA_COLUMNS_SOME_SLOTS; + peerId: string; + extraColumnCount: number; + indicesWithSlots: string; + } + | { + code: DownloadByRangeErrorCode.PEER_CUSTODY_FAILURE; + peerId: string; + missingColumns: string; + }; + +export class DownloadByRangeError extends LodestarError {} From b74691bc5ad5e1e6a2a0d19f09b4ef6472d1a699 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Thu, 14 Aug 2025 05:09:31 +0700 Subject: [PATCH 004/173] test: add tests for getByColumn and downloadByRange. need to get them fully working --- .../chain/seenCache/seenBlockInput.test.ts | 144 +++++- .../unit/sync/utils/downloadByRange.test.ts | 448 ++++++++++++++++++ 2 files changed, 582 insertions(+), 10 deletions(-) create mode 100644 packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts diff --git a/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts b/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts index 5c0cc42361c0..1016859c49cf 100644 --- a/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts +++ b/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts @@ -8,6 +8,7 @@ import { BlockInputSource, IBlockInput, isBlockInputBlobs, + isBlockInputColumns, isBlockInputPreDeneb, } from "../../../../src/chain/blocks/blockInput/index.js"; import {ChainEvent, ChainEventEmitter} from "../../../../src/chain/emitter.js"; @@ -306,16 +307,15 @@ describe("SeenBlockInputCache", () => { }); expect(isBlockInputBlobs(blockInput)).toBeTruthy(); }); - // TODO(fulu): need to turn this on once we have custodyConfig available with peerDAS branch - // it("should return a BlockInputColumns", () => { - // const {block} = buildBlockTestSet(ForkName.fulu); - // const blockInput = cache.getByBlock({ - // block, - // source: BlockInputSource.gossip, - // seenTimestampSec: Date.now(), - // }); - // expect(isBlockInputColumns(blockInput)).toBeTruthy(); - // }); + it("should return a BlockInputColumns", () => { + const {block} = buildBlockTestSet(ForkName.fulu); + const blockInput = cache.getByBlock({ + block, + source: BlockInputSource.gossip, + seenTimestampSec: Date.now(), + }); + expect(isBlockInputColumns(blockInput)).toBeTruthy(); + }); }); it("should return the same BlockInput for an existing block root", () => { const {block, rootHex} = buildBlockTestSet(ForkName.capella); @@ -370,6 +370,20 @@ describe("SeenBlockInputCache", () => { expect(blockInput1).toBe(blockInput2); }); + it("should return the correct BlockInput for a BlockInput created by column", () => { + // const {block, columnSidecar} = buildBlockAndBlobTestSet(ForkName.fulu); + // const blockInput1 = cache.getByColumn({ + // columnSidecar, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // }); + // const blockInput2 = cache.getByBlock({ + // block, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // }); + // expect(blockInput1).toBe(blockInput2); + }); }); describe("getByBlob()", () => { it("should return a new BlockInput for a new block root", () => { @@ -481,4 +495,114 @@ describe("SeenBlockInputCache", () => { ).toThrow(); }); }); + // describe("getByColumn()", () => { + // it("should return a new BlockInput for a new block root", () => { + // const {rootHex, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra); + // expect(cache.get(rootHex)).toBeUndefined(); + // const blockInput = cache.getByBlob({ + // blobSidecar, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // }); + // expect(cache.get(rootHex)).toBe(blockInput); + // }); + // it("should return the same BlockInput for an existing block root", () => { + // const {rootHex, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra); + + // const blockInput1 = cache.getByBlob({ + // blobSidecar, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // }); + // expect(cache.get(rootHex)).toBe(blockInput1); + // const blockInput2 = cache.getByBlob({ + // blobSidecar, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // }); + // expect(blockInput1).toBe(blockInput2); + // }); + // it("should throw if attempting to add a blob to wrong type of BlockInput", () => { + // const {block} = buildBlockTestSet(ForkName.capella); + // const blockInput = cache.getByBlock({ + // block, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // }); + // expect(isBlockInputPreDeneb(blockInput)).toBeTruthy(); + + // const {blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra); + // blobSidecar.signedBlockHeader = signedBlockToSignedHeader(config, block); + // expect(() => + // cache.getByBlob({blobSidecar, source: BlockInputSource.gossip, seenTimestampSec: Date.now()}) + // ).toThrow(); + // }); + // it("should add blob to an existing BlockInput", () => { + // const {block, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra); + + // const blockInput1 = cache.getByBlock({ + // block, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // }); + // const blockInput2 = cache.getByBlob({ + // blobSidecar, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // }); + + // expect(blockInput1).toBe(blockInput2); + // expect(blockInput2.getBlobs()[0]).toBe(blobSidecar); + // }); + // it("should not throw for a BlockInput with an existing blob", () => { + // const {rootHex, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra); + + // expect(cache.get(rootHex)).toBeUndefined(); + // const blockInput = cache.getByBlob({ + // blobSidecar, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // }); + // expect(cache.get(rootHex)).toBe(blockInput); + // expect(() => + // blockInput.addBlob({ + // blobSidecar, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // blockRootHex: rootHex, + // }) + // ).toThrow(); + // expect(() => + // cache.getByBlob({ + // blobSidecar, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // }) + // ).not.toThrow(); + // }); + // it("should throw for an existing blob with opts.throwGossipErrorIfAlreadyKnown", () => { + // const {rootHex, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra); + + // expect(cache.get(rootHex)).toBeUndefined(); + // const blockInput = cache.getByBlob( + // { + // blobSidecar, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // }, + // {throwErrorIfAlreadyKnown: true} + // ); + // expect(cache.get(rootHex)).toBe(blockInput); + // expect(() => + // cache.getByBlob( + // { + // blobSidecar, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // }, + // {throwErrorIfAlreadyKnown: true} + // ) + // ).toThrow(); + // }); + // }); }); diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts new file mode 100644 index 000000000000..381e942dc9f2 --- /dev/null +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts @@ -0,0 +1,448 @@ +import {ChainForkConfig, createChainForkConfig, defaultChainConfig} from "@lodestar/config"; +import {ForkName} from "@lodestar/params"; +import {DataAvailabilityStatus} from "@lodestar/state-transition"; +import {SignedBeaconBlock, WithBytes, deneb, ssz} from "@lodestar/types"; +import {Mock, beforeAll, beforeEach, describe, expect, it, vi} from "vitest"; +import {ChainEventEmitter} from "../../../../../src/chain/index.js"; +import {SeenBlockInputCache} from "../../../../../src/chain/seenCache/seenBlockInput.js"; +import {INetwork} from "../../../../../src/network/index.js"; +import { + DownloadByRangeRequests, + DownloadByRangeResponses, + compareBlobsByRangeRequestAndResponse, + compareBlockByRangeRequestAndResponse, + requestByRange, + validateRequests, +} from "../../../../../src/sync/range/utils/downloadByRange.js"; +import {Clock} from "../../../../../src/util/clock.js"; +import {getMockedLogger} from "../../../../../test/mocks/loggerMock.js"; +import {buildBatchOfBlockWithBlobs, config, slots} from "../../../../utils/blocksAndData.js"; + +describe("downloadByRange", () => { + const peerIdStr = "0x1234567890abcdef"; + let cache: SeenBlockInputCache; + let network: INetwork; + const logger = getMockedLogger(); + + const startSlot = slots.deneb; + const count = 32; + const minBlobs = 2; + const maxBlobs = 2; + let requests!: DownloadByRangeRequests; + let networkResponse!: { + blocks: WithBytes[]; + blobSidecars: deneb.BlobSidecars; + }; + let expected!: DownloadByRangeResponses; + beforeAll(() => { + // expectedBlobCount = count * minBlobs; + requests = { + blocksRequest: [{startSlot, count, step: 1}], + blobsRequest: [{count, startSlot}], + }; + const blockAndBlobs = buildBatchOfBlockWithBlobs(ForkName.deneb, startSlot, count, minBlobs, maxBlobs); + const blobSidecars = blockAndBlobs.flatMap(({blobSidecars}) => blobSidecars); + networkResponse = { + blocks: blockAndBlobs.map(({block}) => ({bytes: new Uint8Array(), data: block})), + blobSidecars, + }; + expected = { + blocks: blockAndBlobs.map(({block}) => block), + blobSidecars, + }; + }); + + beforeEach(() => { + const abortController = new AbortController(); + const signal = abortController.signal; + cache = new SeenBlockInputCache({ + config, + clock: new Clock({config, signal, genesisTime: Math.floor(Date.now() / 1000)}), + chainEvents: new ChainEventEmitter(), + signal, + metrics: null, + logger, + }); + network = { + sendBeaconBlocksByRange: vi.fn(), + sendBlobSidecarsByRange: vi.fn(), + // sendDataColumnSidecarsByRange: vi.fn(), + } as unknown as INetwork; + }); + + // describe("downloadAndCacheByRange", () => {}); + // describe("downloadByRange", () => {}); + describe("validateRequests", () => { + it("should return a slot-range string for unavailable data", () => { + expect( + typeof validateRequests({ + config, + dataAvailabilityStatus: DataAvailabilityStatus.PreData, + blocksRequest: {startSlot: slots.capella, count: 1}, + }) === "string" + ).toBeTruthy(); + expect( + typeof validateRequests({ + config, + dataAvailabilityStatus: DataAvailabilityStatus.OutOfRange, + blocksRequest: {startSlot: slots.deneb, count: 1}, + }) === "string" + ).toBeTruthy(); + }); + it("should throw for data requests outside of the data availability window", () => { + expect(() => + validateRequests({ + config, + dataAvailabilityStatus: DataAvailabilityStatus.OutOfRange, + blocksRequest: {startSlot: slots.deneb, count: 1}, + blobsRequest: {startSlot: slots.deneb, count: 1}, + }) + ).toThrow("Cannot request data if it is not available"); + }); + it("should throw for missing data request within data availability window", () => { + expect(() => + validateRequests({ + config, + dataAvailabilityStatus: DataAvailabilityStatus.Available, + blocksRequest: {startSlot: slots.deneb, count: 1}, + }) + ).toThrow("Must request data if it is available"); + }); + it("should throw if requesting blobs and columns", () => { + expect(() => + validateRequests({ + config, + dataAvailabilityStatus: DataAvailabilityStatus.Available, + blocksRequest: {startSlot: slots.deneb, count: 1}, + blobsRequest: {startSlot: slots.deneb, count: 1}, + columnsRequest: {startSlot: slots.fulu, count: 1}, + }) + ).toThrow(); + }); + it("should throw for data request pre-deneb", () => { + expect(() => + validateRequests({ + config, + dataAvailabilityStatus: DataAvailabilityStatus.Available, + blocksRequest: {startSlot: slots.capella, count: 1}, + columnsRequest: {startSlot: slots.capella, count: 1}, + }) + ).toThrow("Cannot request data pre-deneb"); + }); + it("should throw for missing blobsRequest on blob-fork when data is available", () => { + expect(() => + validateRequests({ + config, + dataAvailabilityStatus: DataAvailabilityStatus.Available, + blocksRequest: {startSlot: slots.deneb, count: 1}, + columnsRequest: {startSlot: slots.deneb, count: 1}, + }) + ).toThrow("Must request blobs for blob-only forks"); + }); + it("should throw for missing columnsRequest on column-fork when data is available", () => { + expect(() => + validateRequests({ + config, + dataAvailabilityStatus: DataAvailabilityStatus.Available, + blocksRequest: {startSlot: slots.fulu, count: 1}, + blobsRequest: {startSlot: slots.fulu, count: 1}, + }) + ).toThrow("Must request columns for forks with columns"); + }); + it("should throw for mismatch block/data startSlot", () => { + expect(() => + validateRequests({ + config, + dataAvailabilityStatus: DataAvailabilityStatus.Available, + blocksRequest: {startSlot: slots.deneb, count: 1}, + blobsRequest: {startSlot: slots.deneb + 1, count: 1}, + }) + ).toThrow(); + }); + it("should throw for mismatch block/data count", () => { + expect(() => + validateRequests({ + config, + dataAvailabilityStatus: DataAvailabilityStatus.Available, + blocksRequest: {startSlot: slots.deneb, count: 1}, + blobsRequest: {startSlot: slots.deneb, count: 2}, + }) + ).toThrow(); + }); + it("should return a slot-range string for properly formatted blob-fork requests", () => { + expect( + typeof validateRequests({ + config, + dataAvailabilityStatus: DataAvailabilityStatus.Available, + blocksRequest: {startSlot: slots.deneb, count: 1}, + blobsRequest: {startSlot: slots.deneb, count: 1}, + }) === "string" + ).toBeTruthy(); + }); + it("should return a slot-range string for properly formatted column-fork requests", () => { + expect( + typeof validateRequests({ + config, + dataAvailabilityStatus: DataAvailabilityStatus.Available, + blocksRequest: {startSlot: slots.fulu, count: 1}, + columnsRequest: {startSlot: slots.fulu, count: 1}, + }) === "string" + ).toBeTruthy(); + }); + }); + describe("requestByRange", () => { + it("should make block requests", async () => { + (network.sendBeaconBlocksByRange as Mock).mockResolvedValueOnce(networkResponse.blocks); + const response = await requestByRange({ + network, + peerIdStr, + blocksRequest: requests.blocksRequest, + }); + expect(network.sendBeaconBlocksByRange).toHaveBeenCalledWith(peerIdStr, requests.blocksRequest); + expect(response.blocks).toEqual(expected.blocks); + }); + it("should make blob requests", async () => { + (network.sendBlobSidecarsByRange as Mock).mockResolvedValueOnce(networkResponse.blobSidecars); + const response = await requestByRange({ + network, + peerIdStr, + blobsRequest: requests.blobsRequest, + }); + expect(network.sendBlobSidecarsByRange).toHaveBeenCalledWith(peerIdStr, requests.blobsRequest); + expect(response.blobSidecars).toEqual(expected.blobSidecars); + }); + // it("should make column requests", async () => { + // const response = await requestByRange({ + // network, + // peerIdStr, + // columnsRequest: requests.columnsRequest, + // }); + // expect(network.sendColumnSidecarsByRange).toHaveBeenCalledWith(peerIdStr, requests.columnsRequest); + // expect(response.columnSidecars).toBe(expected.columnSidecars); + // }); + it("should make concurrent block/blob/column requests from the same peer", async () => { + (network.sendBeaconBlocksByRange as Mock).mockResolvedValueOnce(networkResponse.blocks); + (network.sendBlobSidecarsByRange as Mock).mockResolvedValueOnce(networkResponse.blobSidecars); + const response = await requestByRange({ + network, + peerIdStr, + blocksRequest: requests.blocksRequest, + blobsRequest: requests.blobsRequest, + // columnsRequest: requests.columnsRequest, + }); + expect(network.sendBeaconBlocksByRange).toHaveBeenCalledWith(peerIdStr, requests.blocksRequest); + expect(network.sendBlobSidecarsByRange).toHaveBeenCalledWith(peerIdStr, requests.blobsRequest); + // expect(network.sendColumnSidecarsByRange).toHaveBeenCalledWith(peerIdStr, requests.columnsRequest); + expect(response.blocks).toEqual(expected.blocks); + expect(response.blobSidecars).toEqual(expected.blobSidecars); + // expect(response.columnSidecars).toBe(expected.columnSidecars); + }); + it("should throw if one of the calls fails", async () => { + (network.sendBeaconBlocksByRange as Mock).mockResolvedValueOnce(networkResponse.blocks); + const rejectionError = new Error("TEST_ERROR_MESSAGE"); + (network.sendBlobSidecarsByRange as Mock).mockRejectedValueOnce(rejectionError); + try { + await requestByRange({ + network, + peerIdStr, + blocksRequest: requests.blocksRequest, + blobsRequest: requests.blobsRequest, + // columnsRequest: requests.columnsRequest, + }); + expect.fail("Did not fail as expected"); + } catch (e) { + expect(e).toBe(rejectionError); + } finally { + expect(network.sendBeaconBlocksByRange).toHaveBeenCalledWith(peerIdStr, requests.blocksRequest); + expect(network.sendBlobSidecarsByRange).toHaveBeenCalledWith(peerIdStr, requests.blobsRequest); + } + }); + }); + describe("compareBlockByRangeRequestAndResponse", () => { + const block1 = ssz.capella.SignedBeaconBlock.defaultValue(); + block1.message.slot = slots.capella; + const block2 = ssz.capella.SignedBeaconBlock.defaultValue(); + block2.message.slot = slots.capella + 1; + const block3 = ssz.capella.SignedBeaconBlock.defaultValue(); + block3.message.slot = slots.capella + 2; + const block4 = ssz.capella.SignedBeaconBlock.defaultValue(); + block4.message.slot = slots.capella + 3; + const block5 = ssz.capella.SignedBeaconBlock.defaultValue(); + block5.message.slot = slots.capella + 4; + it("should always return an object", () => { + const response = compareBlockByRangeRequestAndResponse( + { + startSlot: slots.capella, + count: 0, + }, + [] + ); + expect(response).toBeInstanceOf(Object); + expect(Object.keys(response).length).toEqual(0); + }); + it("should correctly match request with response", () => { + const response = compareBlockByRangeRequestAndResponse( + { + startSlot: slots.capella, + count: 5, + }, + [block1, block2, block3, block4, block5] + ); + expect(response).toBeInstanceOf(Object); + expect(Object.keys(response).length).toEqual(0); + }); + it("should return an empty extraSlots array for duplicates within the given range", () => { + const response = compareBlockByRangeRequestAndResponse( + { + startSlot: slots.capella, + count: 4, + }, + [block1, block2, block3, block4, block4] + ); + expect(response).toBeInstanceOf(Object); + expect(Object.keys(response).length).toEqual(1); + expect("extraSlots" in response).toBeTruthy(); + expect(response.extraSlots).toBeInstanceOf(Array); + expect(response.extraSlots.length).toEqual(0); + }); + it("should return the extra slots if more blocks than were requested", () => { + const response = compareBlockByRangeRequestAndResponse( + { + startSlot: slots.capella, + count: 4, + }, + [block1, block2, block3, block4, block5] + ); + expect(response).toBeInstanceOf(Object); + expect(Object.keys(response).length).toEqual(1); + expect("extraSlots" in response).toBeTruthy(); + expect(response.extraSlots).toBeInstanceOf(Array); + expect(response.extraSlots.length).toEqual(1); + expect(response.extraSlots[0]).toEqual(block5.message.slot); + }); + describe("should return the missing slots if less blocks than were requested", () => { + it("beginning of range", () => { + const response = compareBlockByRangeRequestAndResponse( + { + startSlot: slots.capella, + count: 5, + }, + [block2, block3, block4, block5] + ); + expect(response).toBeInstanceOf(Object); + expect(Object.keys(response).length).toEqual(1); + expect("missingSlots" in response).toBeTruthy(); + expect(response.missingSlots).toBeInstanceOf(Array); + expect(response.missingSlots.length).toEqual(1); + expect(response.missingSlots[0]).toEqual(block1.message.slot); + }); + it("middle of range", () => { + const response = compareBlockByRangeRequestAndResponse( + { + startSlot: slots.capella, + count: 5, + }, + [block1, block2, block4, block5] + ); + expect(response).toBeInstanceOf(Object); + expect(Object.keys(response).length).toEqual(1); + expect("missingSlots" in response).toBeTruthy(); + expect(response.missingSlots).toBeInstanceOf(Array); + expect(response.missingSlots.length).toEqual(1); + expect(response.missingSlots[0]).toEqual(block3.message.slot); + }); + it("end of range", () => { + const response = compareBlockByRangeRequestAndResponse( + { + startSlot: slots.capella, + count: 5, + }, + [block1, block2, block3, block4] + ); + expect(response).toBeInstanceOf(Object); + expect(Object.keys(response).length).toEqual(1); + expect("missingSlots" in response).toBeTruthy(); + expect(response.missingSlots).toBeInstanceOf(Array); + expect(response.missingSlots.length).toEqual(1); + expect(response.missingSlots[0]).toEqual(block5.message.slot); + }); + }); + }); + describe("compareBlobsByRangeRequestAndResponse", () => { + it("should return a properly formatted object", () => { + const response = compareBlobsByRangeRequestAndResponse([], []); + expect(response).instanceOf(Object); + expect(Object.keys(response).length).toEqual(7); + expect(response.expectedBlobCount).toEqual(0); + expect(response.missingBlobCount).toEqual(0); + expect(response.extraBlobCount).toEqual(0); + expect(response.duplicateBlobCount).toEqual(0); + expect(response.missingBlobsDescription).toBeInstanceOf(Array); + expect(response.missingBlobsDescription.length).toEqual(0); + expect(response.extraBlobsDescription).toBeInstanceOf(Array); + expect(response.extraBlobsDescription.length).toEqual(0); + expect(response.duplicateBlobsDescription).toBeInstanceOf(Array); + expect(response.duplicateBlobsDescription.length).toEqual(0); + }); + it("should identify requested blobs missing from response", () => { + const response = compareBlobsByRangeRequestAndResponse(expected.blocks, expected.blobSidecars?.slice(0, -4)); + expect(response.missingBlobCount).toEqual(4); + expect(response.missingBlobsDescription.length).toEqual(2); + const lastSlot = startSlot + count - 1; + expect(response.missingBlobsDescription[0]).toEqual(`${lastSlot - 1}[2]`); + expect(response.missingBlobsDescription[1]).toEqual(`${lastSlot}[0,1,2]`); + }); + it("should identify extra blobs from blocks that were requested", () => { + // biome-ignore lint/style/noNonNullAssertion: + const [blob0, blob1, blob2, blob3] = expected.blobSidecars!; + const badBlob = ssz.deneb.BlobSidecar.clone(blob3); + badBlob.signedBlockHeader.message.slot = blob2.signedBlockHeader.message.slot; + badBlob.index = 3; + const response = compareBlobsByRangeRequestAndResponse(expected.blocks?.slice(0, 1), [ + blob0, + blob1, + blob2, + badBlob, + ]); + expect(response.extraBlobCount).toEqual(1); + expect(response.extraBlobsDescription.length).toEqual(1); + expect(response.extraBlobsDescription[0]).toEqual(`${expected.blocks[0].message.slot}[3]`); + }); + it("should identify extra blobs from blocks that were requested", () => { + // biome-ignore lint/style/noNonNullAssertion: + const [blob0, blob1, blob2] = expected.blobSidecars!; + const badBlob = ssz.deneb.BlobSidecar.clone(blob2); + const response = compareBlobsByRangeRequestAndResponse(expected.blocks?.slice(0, 1), [ + blob0, + blob1, + blob2, + badBlob, + ]); + expect(response.duplicateBlobCount).toEqual(1); + expect(response.duplicateBlobsDescription.length).toEqual(1); + expect(response.duplicateBlobsDescription[0]).toEqual(`${expected.blocks[0].message.slot}[2]`); + }); + it("should identify extra blobs from blocks that were not requested", () => { + const response = compareBlobsByRangeRequestAndResponse( + expected.blocks?.slice(0, 1), + expected.blobSidecars?.slice(0, 6) + ); + expect(response.extraBlobCount).toEqual(3); + expect(response.extraBlobsDescription.length).toEqual(1); + expect(response.extraBlobsDescription[0]).toEqual(`${expected.blocks[1].message.slot}[0,1,2]`); + }); + }); + describe("validateResponse", () => { + it("should throw if there are no blocks to validate", () => {}); + it("should throw for responses missing block from requested slots", () => {}); + it("should throw for extra block from slots that were not requested", () => {}); + it("should throw for duplicate blocks from requested slots", () => {}); + + it("should throw if there are no blobs but there was a blobsRequest", () => {}); + it("should throw for missing blobs in slots that were requested", () => {}); + + it("should throw if there are no columns but there was a columnsRequest", () => {}); + }); + // describe("compareColumnsByRangeRequestAndResponse", () => {}); + // describe("compareByRangeRequestsToResponse", () => {}); +}); From 8ef928b6d21804a66664e50dad5ab4aea00e38c8 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Thu, 14 Aug 2025 05:10:04 +0700 Subject: [PATCH 005/173] wip: add roughed out downloadByRoot --- .../src/sync/utils/downloadByRoot.ts | 182 ++++++++++++++++++ 1 file changed, 182 insertions(+) create mode 100644 packages/beacon-node/src/sync/utils/downloadByRoot.ts diff --git a/packages/beacon-node/src/sync/utils/downloadByRoot.ts b/packages/beacon-node/src/sync/utils/downloadByRoot.ts new file mode 100644 index 000000000000..1aa07b35c6ec --- /dev/null +++ b/packages/beacon-node/src/sync/utils/downloadByRoot.ts @@ -0,0 +1,182 @@ +import {ChainForkConfig} from "@lodestar/config"; +import {signedBlockToSignedHeader} from "@lodestar/state-transition"; +import {deneb} from "@lodestar/types"; +import {LodestarError, fromHex, prettyBytes, toHex} from "@lodestar/utils"; +import {BlockInputSource, DAType, IBlockInput, isBlockInputBlobs} from "../../chain/blocks/blockInput/index.js"; +import {SeenBlockInputCache} from "../../chain/seenCache/seenBlockInput.js"; +import {IExecutionEngine} from "../../execution/index.js"; +import {INetwork} from "../../network/index.js"; +import {computeInclusionProof} from "../../util/blobs.js"; +import {PeerIdStr} from "../../util/peerId.js"; +import { + BlockInputSyncCacheItem, + PendingBlockInput, + getBlockInputSyncCacheItemRootHex, + isPendingBlockInput, +} from "../types.js"; + +export type DownloadBlockInputByRootProps = { + config: ChainForkConfig; + network: INetwork; + cache: SeenBlockInputCache; + executionEngine?: IExecutionEngine; + pending: BlockInputSyncCacheItem; + peerIdStr: PeerIdStr; +}; + +export async function downloadBlockInputByRoot({ + config, + network, + cache, + executionEngine, + pending, + peerIdStr, +}: DownloadBlockInputByRootProps): Promise { + if (!isPendingBlockInput(pending) || !pending.blockInput.hasBlock()) { + pending = await downloadAndCacheBlock({ + network, + cache, + pending, + peerIdStr, + }); + } + + if (!pending.blockInput.hasAllData()) { + await downloadAndCacheData({ + config, + network, + executionEngine, + peerIdStr, + blockInput: pending.blockInput, + }); + } + + return pending; +} + +export async function downloadAndCacheBlock({ + network, + cache, + pending, + peerIdStr, +}: Omit): Promise { + const blockRootHex = getBlockInputSyncCacheItemRootHex(pending); + const blockRoot = fromHex(blockRootHex); + const [response] = await network.sendBeaconBlocksByRoot(peerIdStr, [blockRoot]); + if (isPendingBlockInput(pending)) { + pending.blockInput.addBlock({ + blockRootHex, + block: response.data, + source: { + seenTimestampSec: Date.now() / 1000, + source: BlockInputSource.byRoot, + peerIdStr, + }, + }); + return pending; + } + + const blockInput = cache.getByBlock({ + block: response.data, + source: BlockInputSource.byRoot, + seenTimestampSec: Date.now() / 1000, + peerIdStr, + }); + return { + status: pending.status, + blockInput, + timeAddedSec: pending.timeAddedSec, + peerIdStrings: pending.peerIdStrings, + timeSyncedSec: pending.timeSyncedSec, + }; +} + +export async function downloadAndCacheData({ + config, + network, + executionEngine, + blockInput, + peerIdStr, +}: Omit & {blockInput: IBlockInput}): Promise { + if (isBlockInputBlobs(blockInput)) { + const missingBlobsMeta = blockInput.getMissingBlobMeta(); + if (executionEngine) { + const forkName = blockInput.forkName; + const response = await executionEngine.getBlobs( + forkName, + missingBlobsMeta.map(({versionHash}) => versionHash) + ); + const signedBeaconBlock = blockInput.getBlock(); + const blockBody = signedBeaconBlock.message.body; + for (const [requestIndex, blobAndProof] of response.entries()) { + if (blobAndProof) { + const {blob, proof} = blobAndProof; + const {index} = missingBlobsMeta[requestIndex]; + const kzgCommitmentInclusionProof = computeInclusionProof(forkName, blockBody, index); + const blobSidecar: deneb.BlobSidecar = { + blob, + index, + kzgProof: proof, + kzgCommitment: blockBody.blobKzgCommitments[index], + kzgCommitmentInclusionProof, + signedBlockHeader: signedBlockToSignedHeader(config, signedBeaconBlock), + }; + blockInput.addBlob({ + blobSidecar, + blockRootHex: blockInput.blockRootHex, + seenTimestampSec: Date.now() / 1000, + source: BlockInputSource.engine, + }); + } + } + + if (blockInput.hasAllData()) { + return; + } + } + + const response = await network.sendBlobSidecarsByRoot( + peerIdStr, + missingBlobsMeta.map(({blockRoot, index}) => ({blockRoot, index})) + ); + const seenTimestampSec = Date.now() / 1000; + + for (const blobSidecar of response) { + const blockRoot = config + .getForkTypes(blobSidecar.signedBlockHeader.message.slot) + .BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message); + blockInput.addBlob({ + blobSidecar, + peerIdStr, + seenTimestampSec, + blockRootHex: toHex(blockRoot), + source: BlockInputSource.byRoot, + }); + } + + return; + } + + throw new DownloadByRootError({ + code: DownloadByRootErrorCode.INVALID_BLOCK_INPUT_TYPE, + blockRoot: prettyBytes(blockInput.blockRootHex), + type: blockInput.type, + }); +} + +export enum DownloadByRootErrorCode { + INVALID_BLOCK_INPUT_TYPE = "DOWNLOAD_BY_ROOT_ERROR_INVALID_BLOCK_INPUT_TYPE", + BLOCK_NOT_DOWNLOADED = "DOWNLOAD_BY_ROOT_ERROR_BLOCK_NOT_DOWNLOADED", +} +export type DownloadByRootErrorType = + | { + code: DownloadByRootErrorCode.INVALID_BLOCK_INPUT_TYPE; + blockRoot: string; + type: DAType; + } + | { + code: DownloadByRootErrorCode.BLOCK_NOT_DOWNLOADED; + blockRoot: string; + }; + +export class DownloadByRootError extends LodestarError {} From 1b6046d57b1645062bf54b9403161fb06b5570ce Mon Sep 17 00:00:00 2001 From: Cayman Date: Thu, 14 Aug 2025 08:08:17 -0400 Subject: [PATCH 006/173] chore: get src building --- packages/beacon-node/src/sync/types.ts | 35 +++++++++++++++++++ .../src/sync/utils/downloadByRange.ts | 2 +- .../src/sync/utils/downloadByRoot.ts | 3 +- 3 files changed, 38 insertions(+), 2 deletions(-) create mode 100644 packages/beacon-node/src/sync/types.ts diff --git a/packages/beacon-node/src/sync/types.ts b/packages/beacon-node/src/sync/types.ts new file mode 100644 index 000000000000..c8698fb0dfe5 --- /dev/null +++ b/packages/beacon-node/src/sync/types.ts @@ -0,0 +1,35 @@ +import {IBlockInput} from "@lodestar/beacon-node/src/chain/blocks/blockInput/index.js"; +import {RootHex} from "@lodestar/types"; + +export enum PendingBlockInputStatus { + pending = "pending", + fetching = "fetching", + downloaded = "downloaded", + processing = "processing", +} + +export type PendingBlockInput = { + status: PendingBlockInputStatus; + blockInput: IBlockInput; + timeAddedSec: number; + timeSyncedSec?: number; + peerIdStrings: Set; +}; + +export type PendingRootHex = { + status: PendingBlockInputStatus; + rootHex: RootHex; + timeAddedSec: number; + timeSyncedSec?: number; + peerIdStrings: Set; +}; + +export type BlockInputSyncCacheItem = PendingBlockInput | PendingRootHex; + +export function isPendingBlockInput(pending: BlockInputSyncCacheItem): pending is PendingBlockInput { + return "blockInput" in pending; +} + +export function getBlockInputSyncCacheItemRootHex(block: BlockInputSyncCacheItem): RootHex { + return isPendingBlockInput(block) ? block.blockInput.blockRootHex : block.rootHex; +} diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index a8c584bed8c1..f75d08d6a6b2 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -1,7 +1,7 @@ import {ChainForkConfig} from "@lodestar/config"; import {ForkPostDeneb, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; import {DataAvailabilityStatus} from "@lodestar/state-transition"; -import {RootHex, SignedBeaconBlock, Slot, WithBytes, deneb, fulu, phase0} from "@lodestar/types"; +import {RootHex, SignedBeaconBlock, Slot, deneb, fulu, phase0} from "@lodestar/types"; import {LodestarError, Logger, prettyBytes, prettyPrintIndices} from "@lodestar/utils"; import {BlockInputSource, IBlockInput} from "../../chain/blocks/blockInput/index.js"; import {SeenBlockInputCache} from "../../chain/seenCache/seenBlockInput.js"; diff --git a/packages/beacon-node/src/sync/utils/downloadByRoot.ts b/packages/beacon-node/src/sync/utils/downloadByRoot.ts index 1aa07b35c6ec..b86429f48d0f 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRoot.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRoot.ts @@ -1,4 +1,5 @@ import {ChainForkConfig} from "@lodestar/config"; +import {ForkPreFulu} from "@lodestar/params"; import {signedBlockToSignedHeader} from "@lodestar/state-transition"; import {deneb} from "@lodestar/types"; import {LodestarError, fromHex, prettyBytes, toHex} from "@lodestar/utils"; @@ -101,7 +102,7 @@ export async function downloadAndCacheData({ if (isBlockInputBlobs(blockInput)) { const missingBlobsMeta = blockInput.getMissingBlobMeta(); if (executionEngine) { - const forkName = blockInput.forkName; + const forkName = blockInput.forkName as ForkPreFulu; const response = await executionEngine.getBlobs( forkName, missingBlobsMeta.map(({versionHash}) => versionHash) From 0be6543c0d79d170a5f092cce3d133c2755e6120 Mon Sep 17 00:00:00 2001 From: Cayman Date: Thu, 14 Aug 2025 08:12:53 -0400 Subject: [PATCH 007/173] chore: fix some test build issues --- .../chain/seenCache/seenBlockInput.test.ts | 11 +- .../unit/sync/utils/downloadByRange.test.ts | 18 +- .../beacon-node/test/utils/blocksAndData.ts | 169 +++++++----------- 3 files changed, 85 insertions(+), 113 deletions(-) diff --git a/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts b/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts index 1016859c49cf..ed0bcd181674 100644 --- a/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts +++ b/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts @@ -1,3 +1,4 @@ +import {generateKeyPair} from "@libp2p/crypto/keys"; import {createChainForkConfig, defaultChainConfig} from "@lodestar/config"; import {ForkName, ForkPostCapella, ForkPostDeneb} from "@lodestar/params"; import {computeStartSlotAtEpoch, signedBlockToSignedHeader} from "@lodestar/state-transition"; @@ -13,10 +14,12 @@ import { } from "../../../../src/chain/blocks/blockInput/index.js"; import {ChainEvent, ChainEventEmitter} from "../../../../src/chain/emitter.js"; import {SeenBlockInputCache} from "../../../../src/chain/seenCache/seenBlockInput.js"; +import {computeNodeIdFromPrivateKey} from "../../../../src/network/subnets/index.js"; import {Clock} from "../../../../src/util/clock.js"; +import {CustodyConfig} from "../../../../src/util/dataColumns.js"; import {testLogger} from "../../../utils/logger.js"; -describe("SeenBlockInputCache", () => { +describe("SeenBlockInputCache", async () => { let cache: SeenBlockInputCache; let abortController: AbortController; let chainEvents: ChainEventEmitter; @@ -32,6 +35,9 @@ describe("SeenBlockInputCache", () => { ELECTRA_FORK_EPOCH, FULU_FORK_EPOCH, }); + const privateKey = await generateKeyPair("secp256k1"); + const nodeId = computeNodeIdFromPrivateKey(privateKey); + const custodyConfig = new CustodyConfig({config, nodeId}); const slots: Record = { capella: computeStartSlotAtEpoch(CAPELLA_FORK_EPOCH), @@ -104,13 +110,14 @@ describe("SeenBlockInputCache", () => { } const logger = testLogger(); - beforeEach(() => { + beforeEach(async () => { chainEvents = new ChainEventEmitter(); abortController = new AbortController(); const signal = abortController.signal; const genesisTime = Math.floor(Date.now() / 1000); cache = new SeenBlockInputCache({ config, + custodyConfig, clock: new Clock({config, genesisTime, signal}), chainEvents, signal, diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts index 381e942dc9f2..ef9034b86ad7 100644 --- a/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts @@ -1,11 +1,10 @@ -import {ChainForkConfig, createChainForkConfig, defaultChainConfig} from "@lodestar/config"; import {ForkName} from "@lodestar/params"; import {DataAvailabilityStatus} from "@lodestar/state-transition"; import {SignedBeaconBlock, WithBytes, deneb, ssz} from "@lodestar/types"; import {Mock, beforeAll, beforeEach, describe, expect, it, vi} from "vitest"; -import {ChainEventEmitter} from "../../../../../src/chain/index.js"; -import {SeenBlockInputCache} from "../../../../../src/chain/seenCache/seenBlockInput.js"; -import {INetwork} from "../../../../../src/network/index.js"; +import {ChainEventEmitter} from "../../../../src/chain/index.js"; +import {SeenBlockInputCache} from "../../../../src/chain/seenCache/seenBlockInput.js"; +import {INetwork} from "../../../../src/network/index.js"; import { DownloadByRangeRequests, DownloadByRangeResponses, @@ -13,10 +12,10 @@ import { compareBlockByRangeRequestAndResponse, requestByRange, validateRequests, -} from "../../../../../src/sync/range/utils/downloadByRange.js"; -import {Clock} from "../../../../../src/util/clock.js"; -import {getMockedLogger} from "../../../../../test/mocks/loggerMock.js"; -import {buildBatchOfBlockWithBlobs, config, slots} from "../../../../utils/blocksAndData.js"; +} from "../../../../src/sync/utils/downloadByRange.js"; +import {Clock} from "../../../../src/util/clock.js"; +import {getMockedLogger} from "../../../../test/mocks/loggerMock.js"; +import {config, custodyConfig, generateChainOfBlockMaybeSidecars, slots} from "../../../utils/blocksAndData.js"; describe("downloadByRange", () => { const peerIdStr = "0x1234567890abcdef"; @@ -40,7 +39,7 @@ describe("downloadByRange", () => { blocksRequest: [{startSlot, count, step: 1}], blobsRequest: [{count, startSlot}], }; - const blockAndBlobs = buildBatchOfBlockWithBlobs(ForkName.deneb, startSlot, count, minBlobs, maxBlobs); + const blockAndBlobs = generateChainOfBlockMaybeSidecars(ForkName.deneb, startSlot, count, minBlobs, maxBlobs); const blobSidecars = blockAndBlobs.flatMap(({blobSidecars}) => blobSidecars); networkResponse = { blocks: blockAndBlobs.map(({block}) => ({bytes: new Uint8Array(), data: block})), @@ -57,6 +56,7 @@ describe("downloadByRange", () => { const signal = abortController.signal; cache = new SeenBlockInputCache({ config, + custodyConfig, clock: new Clock({config, signal, genesisTime: Math.floor(Date.now() / 1000)}), chainEvents: new ChainEventEmitter(), signal, diff --git a/packages/beacon-node/test/utils/blocksAndData.ts b/packages/beacon-node/test/utils/blocksAndData.ts index 438ae969c02a..997cd2c8ede1 100644 --- a/packages/beacon-node/test/utils/blocksAndData.ts +++ b/packages/beacon-node/test/utils/blocksAndData.ts @@ -1,35 +1,15 @@ import {randomBytes} from "node:crypto"; import {SIGNATURE_LENGTH_UNCOMPRESSED} from "@chainsafe/blst"; -import { - BYTES_PER_BLOB, - BYTES_PER_COMMITMENT, - BYTES_PER_FIELD_ELEMENT, - BYTES_PER_PROOF, -} from "@crate-crypto/node-eth-kzg"; -import {createChainForkConfig, defaultChainConfig} from "@lodestar/config"; -import { - ForkName, - ForkPostCapella, - ForkPostDeneb, - ForkPostFulu, - NUMBER_OF_COLUMNS, - isForkPostDeneb, - isForkPostFulu, -} from "@lodestar/params"; -import { - blindedOrFullBlockToHeader, - blockToHeader, - computeStartSlotAtEpoch, - signedBlockToSignedHeader, -} from "@lodestar/state-transition"; -import {BeaconBlock, SignedBeaconBlock, Slot, deneb, fulu, ssz} from "@lodestar/types"; +import {BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT} from "@crate-crypto/node-eth-kzg"; +import {generateKeyPair} from "@libp2p/crypto/keys"; +import {ChainForkConfig, createChainForkConfig, defaultChainConfig} from "@lodestar/config"; +import {ForkPostCapella, ForkPostDeneb, ForkPostFulu, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; +import {computeStartSlotAtEpoch, signedBlockToSignedHeader} from "@lodestar/state-transition"; +import {SignedBeaconBlock, Slot, deneb, fulu, ssz} from "@lodestar/types"; import {toRootHex} from "@lodestar/utils"; -import {VersionedHashes} from "../../src/execution/index.js"; -import { - computeInclusionProof, - computeKzgCommitmentsInclusionProof, - kzgCommitmentToVersionedHash, -} from "../../src/util/blobs.js"; +import {computeNodeIdFromPrivateKey} from "../../src/network/subnets/index.js"; +import {computeInclusionProof} from "../../src/util/blobs.js"; +import {CustodyConfig, getDataColumnSidecarsFromBlock} from "../../src/util/dataColumns.js"; import {kzg} from "../../src/util/kzg.js"; import {ROOT_SIZE} from "../../src/util/sszBytes.js"; @@ -44,6 +24,9 @@ export const config = createChainForkConfig({ ELECTRA_FORK_EPOCH, FULU_FORK_EPOCH, }); +export const privateKey = await generateKeyPair("secp256k1"); +export const nodeId = computeNodeIdFromPrivateKey(privateKey); +export const custodyConfig = new CustodyConfig({config, nodeId}); export const slots: Record = { capella: computeStartSlotAtEpoch(CAPELLA_FORK_EPOCH), @@ -84,13 +67,13 @@ function generateProposerIndex(min = 0, max = 100_000): number { return generateRandomInt(max, min); } -function generateBeaconBlock({ - forkName, +function generateBeaconBlock({ + config, slot, parentRoot, -}: {forkName: F; slot?: Slot; parentRoot?: Uint8Array}): SignedBeaconBlock { - const block = ssz[forkName].SignedBeaconBlock.defaultValue(); - block.message.slot = slot ? slot : slots[forkName]; +}: {config: ChainForkConfig; slot?: Slot; parentRoot?: Uint8Array}): SignedBeaconBlock { + const block = config.getForkTypes(slot ?? 0).SignedBeaconBlock.defaultValue(); + block.message.slot = slot ? slot : 0; block.message.parentRoot = parentRoot ? parentRoot : Uint8Array.from(randomBytes(ROOT_SIZE)); block.message.stateRoot = Uint8Array.from(randomBytes(ROOT_SIZE)); block.message.proposerIndex = generateProposerIndex(); @@ -98,25 +81,23 @@ function generateBeaconBlock({ return block; } -function generateRoots( - forkName: F, - block: SignedBeaconBlock +function generateRoots( + config: ChainForkConfig, + block: SignedBeaconBlock ): { blockRoot: Uint8Array; rootHex: string; } { - const blockRoot = ssz[forkName].BeaconBlock.hashTreeRoot(block.message); + const blockRoot = config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message); const rootHex = toRootHex(blockRoot); - const signed = ssz[forkName].SignedBeaconBlock.defaultValue(); return { - block: signed, blockRoot, rootHex, }; } function generateBlobSidecars( - forkName: ForkPostDeneb, + config: ChainForkConfig, block: SignedBeaconBlock, count: number, oomProtection = false @@ -130,12 +111,16 @@ function generateBlobSidecars( const signedBlockHeader = signedBlockToSignedHeader(config, block); for (let index = 0; index < count; index++) { - const blobSidecar = ssz[forkName].BlobSidecar.defaultValue(); + const blobSidecar = ssz.deneb.BlobSidecar.defaultValue(); blobSidecar.index = index; blobSidecar.signedBlockHeader = signedBlockHeader; - blobSidecar.blob = generateRandomBlob(index); + blobSidecar.blob = generateRandomBlob(); blobSidecar.kzgCommitment = kzg.blobToKzgCommitment(blobSidecar.blob); - blobSidecar.kzgCommitmentInclusionProof = computeInclusionProof(forkName, block.message.body, index); + blobSidecar.kzgCommitmentInclusionProof = computeInclusionProof( + config.getForkName(block.message.slot), + block.message.body, + index + ); blobSidecar.kzgProof = kzg.computeBlobKzgProof(blobSidecar.blob, blobSidecar.kzgCommitment); if (oomProtection) { @@ -157,41 +142,19 @@ function generateBlobSidecars( } function generateColumnSidecars( - forkName: F, + config: ChainForkConfig, block: SignedBeaconBlock, - numberOfBlobs: number, - oomProtection = false + numberOfBlobs: number ): { block: SignedBeaconBlock; columnSidecars: fulu.DataColumnSidecars; } { const blobs = Array.from({length: numberOfBlobs}, () => generateRandomBlob()); const kzgCommitments = blobs.map((blob) => kzg.blobToKzgCommitment(blob)); - block.body.blobKzgCommitments = kzgCommitments; + block.message.body.blobKzgCommitments = kzgCommitments; - const signedBlockHeader = signedBlockToSignedHeader(config, block); const cellsAndProofs = blobs.map((blob) => kzg.computeCellsAndKzgProofs(blob)); - const kzgCommitmentsInclusionProof = Array.from({length: blobs.length}, () => - computeKzgCommitmentsInclusionProof(forkName, block.body) - ); - - const columnSidecars = Array.from({length: NUMBER_OF_COLUMNS}, (_, columnIndex) => { - const column = oomProtection - ? [] - : Array.from({length: blobs.length}, (_, rowNumber) => cellsAndProofs[rowNumber].cells[columnIndex]); - const kzgProofs = Array.from( - {length: blobs.length}, - (_, rowNumber) => cellsAndProofs[rowNumber].proofs[columnIndex] - ); - return { - index: columnIndex, - column, - kzgCommitments, - kzgProofs, - signedBlockHeader, - kzgCommitmentsInclusionProof, - }; - }); + const columnSidecars = getDataColumnSidecarsFromBlock(config, block, cellsAndProofs); return { block, @@ -205,19 +168,19 @@ export type BlockTestSet = { rootHex: string; }; -export function generateChainOfBlocks({ - forkName, +export function generateChainOfBlocks({ + config, count, -}: {forkName: F; count: number}): BlockTestSet[] { +}: {config: ChainForkConfig; count: number}): BlockTestSet[] { let parentRoot = Uint8Array.from(randomBytes(ROOT_SIZE)); - let slot = slots[forkName]; - const blocks: BlockTestSet[] = []; - for (; slot < slot + count; slot++) { - const block = generateBeaconBlock({forkName, parentRoot, slot}); - const {blockRoot, rootHex} = generateRoots(forkName, block); + + const blocks: BlockTestSet[] = []; + for (let slot = 0; slot < count; slot++) { + const block = generateBeaconBlock({config, parentRoot, slot}); + const {blockRoot, rootHex} = generateRoots(config, block); parentRoot = block.message.parentRoot; blocks.push({ - block, + block: block as SignedBeaconBlock, blockRoot, rootHex, }); @@ -231,23 +194,22 @@ export type BlockWithColumnsTestSet = BlockTestSet & columnSidecars: fulu.DataColumnSidecars; }; -export function generateBlockWithBlobSidecars({ - forkName, +export function generateBlockWithBlobSidecars({ + config, slot, parentRoot, - oomProtection = false, }: { - forkName: F; + config: ChainForkConfig; parentRoot?: Uint8Array; slot?: Slot; oomProtection?: boolean; -}): BlockWithBlobsTestSet { +}): BlockWithBlobsTestSet { const {block, blobSidecars} = generateBlobSidecars( - forkName, - generateBeaconBlock({forkName, parentRoot, slot}), + config, + generateBeaconBlock({config, parentRoot, slot}) as SignedBeaconBlock, generateRandomInt(1, 6) ); - const {blockRoot, rootHex} = generateRoots(forkName, block); + const {blockRoot, rootHex} = generateRoots(config, block); return { block, blobSidecars, @@ -256,23 +218,22 @@ export function generateBlockWithBlobSidecars({ }; } -export function generateBlockWithColumnSidecars({ - forkName, +export function generateBlockWithColumnSidecars({ + config, slot, parentRoot, - oomProtection = false, }: { - forkName: F; + config: ChainForkConfig; parentRoot?: Uint8Array; slot?: Slot; oomProtection?: boolean; -}): BlockWithColumnsTestSet { +}): BlockWithColumnsTestSet { const {block, columnSidecars} = generateColumnSidecars( - forkName, - generateBeaconBlock({forkName, parentRoot, slot}), + config, + generateBeaconBlock({config, parentRoot, slot}) as SignedBeaconBlock, generateRandomInt(1, 6) ); - const {blockRoot, rootHex} = generateRoots(forkName, block); + const {blockRoot, rootHex} = generateRoots(config, block); return { block, columnSidecars, @@ -284,26 +245,29 @@ export function generateBlockWithColumnSidecars({ export type BlocksWithSidecars = F extends ForkPostFulu ? BlockWithColumnsTestSet[] : BlockWithBlobsTestSet[]; + export function generateChainOfBlocksWithBlobs({ + config, forkName, count, oomProtection = false, }: { + config: ChainForkConfig; forkName: F; count: number; oomProtection?: boolean; }): BlocksWithSidecars { let parentRoot = Uint8Array.from(randomBytes(ROOT_SIZE)); let slot = slots[forkName]; - const blocks: BlocksWithSidecars = []; + const blocks: BlocksWithSidecars = []; for (; slot < slot + count; slot++) { const blockWithSidecars = isForkPostFulu(forkName) - ? generateBlockWithColumnSidecars({forkName, parentRoot, slot, oomProtection}) - : generateBlockWithBlobSidecars({forkName, parentRoot, slot, oomProtection}); + ? generateBlockWithColumnSidecars({config, parentRoot, slot, oomProtection}) + : generateBlockWithBlobSidecars({config, parentRoot, slot, oomProtection}); parentRoot = blockWithSidecars.blockRoot; - blocks.push(blockWithSidecars); + blocks.push(blockWithSidecars as any); } - return blocks; + return blocks as BlocksWithSidecars; } export type ChainOfBlockMaybeSidecars = F extends ForkPostFulu @@ -311,13 +275,14 @@ export type ChainOfBlockMaybeSidecars = F extends For : F extends ForkPostDeneb ? BlockWithBlobsTestSet[] : BlockTestSet[]; + export function generateChainOfBlockMaybeSidecars( forkName: F, count: number, oomProtection = false ): ChainOfBlockMaybeSidecars { if (isForkPostDeneb(forkName)) { - return generateChainOfBlocksWithBlobs({forkName, count, oomProtection}); + return generateChainOfBlocksWithBlobs({config, forkName, count, oomProtection}) as ChainOfBlockMaybeSidecars; } - return generateChainOfBlocks({forkName, count}); + return generateChainOfBlocks({config, count}) as ChainOfBlockMaybeSidecars; } From d7b5b668797214210b9afd5cca5b6763e7f7fb31 Mon Sep 17 00:00:00 2001 From: Cayman Date: Thu, 14 Aug 2025 12:00:19 -0400 Subject: [PATCH 008/173] chore: update chain processing --- .../src/api/impl/beacon/blocks/index.ts | 14 +- .../src/api/impl/lodestar/index.ts | 2 +- .../src/chain/blocks/blockInput/utils.ts | 28 - .../src/chain/blocks/importBlock.ts | 25 +- .../beacon-node/src/chain/blocks/index.ts | 11 +- .../beacon-node/src/chain/blocks/types.ts | 211 +---- .../src/chain/blocks/utils/chainSegment.ts | 7 +- .../src/chain/blocks/verifyBlock.ts | 19 +- .../blocks/verifyBlocksDataAvailability.ts | 177 +--- .../chain/blocks/verifyBlocksSanityChecks.ts | 7 +- .../blocks/verifyBlocksStateTransitionOnly.ts | 7 +- .../src/chain/blocks/writeBlockInputToDb.ts | 200 ++--- packages/beacon-node/src/chain/chain.ts | 27 +- packages/beacon-node/src/chain/interface.ts | 10 +- .../beacon-node/src/chain/seenCache/index.ts | 2 +- .../src/chain/seenCache/seenBlockInput.ts | 374 --------- .../chain/seenCache/seenGossipBlockInput.ts | 794 +++++++----------- .../src/metrics/metrics/lodestar.ts | 12 +- .../src/sync/utils/downloadByRange.ts | 4 +- .../src/sync/utils/downloadByRoot.ts | 4 +- packages/beacon-node/src/util/dataColumns.ts | 215 +---- 21 files changed, 506 insertions(+), 1644 deletions(-) delete mode 100644 packages/beacon-node/src/chain/seenCache/seenBlockInput.ts diff --git a/packages/beacon-node/src/api/impl/beacon/blocks/index.ts b/packages/beacon-node/src/api/impl/beacon/blocks/index.ts index 127562e6e102..8d0419e274b1 100644 --- a/packages/beacon-node/src/api/impl/beacon/blocks/index.ts +++ b/packages/beacon-node/src/api/impl/beacon/blocks/index.ts @@ -27,18 +27,8 @@ import { sszTypesFor, } from "@lodestar/types"; import {fromHex, sleep, toHex, toRootHex} from "@lodestar/utils"; -import { - BlobsSource, - BlockInput, - BlockInputAvailableData, - BlockInputBlobs, - BlockInputDataColumns, - BlockInputType, - BlockSource, - DataColumnsSource, - ImportBlockOpts, - getBlockInput, -} from "../../../../chain/blocks/types.js"; +import {BlockInput} from "../../../../chain/blocks/blockInput/index.js"; +import {ImportBlockOpts} from "../../../../chain/blocks/types.js"; import {verifyBlocksInEpoch} from "../../../../chain/blocks/verifyBlock.js"; import {BeaconChain} from "../../../../chain/chain.js"; import {BlockError, BlockErrorCode, BlockGossipError} from "../../../../chain/errors/index.js"; diff --git a/packages/beacon-node/src/api/impl/lodestar/index.ts b/packages/beacon-node/src/api/impl/lodestar/index.ts index 8393a68e7347..95175fcdd20e 100644 --- a/packages/beacon-node/src/api/impl/lodestar/index.ts +++ b/packages/beacon-node/src/api/impl/lodestar/index.ts @@ -115,7 +115,7 @@ export function getLodestarApi({ data: (chain as BeaconChain)["blockProcessor"].jobQueue.getItems().map((item) => { const [blockInputs, opts] = item.args; return { - blockSlots: blockInputs.map((blockInput) => blockInput.block.message.slot), + blockSlots: blockInputs.map((blockInput) => blockInput.slot), jobOpts: opts, addedTimeMs: item.addedTimeMs, }; diff --git a/packages/beacon-node/src/chain/blocks/blockInput/utils.ts b/packages/beacon-node/src/chain/blocks/blockInput/utils.ts index 26a9b0dc0957..c02911ad0b83 100644 --- a/packages/beacon-node/src/chain/blocks/blockInput/utils.ts +++ b/packages/beacon-node/src/chain/blocks/blockInput/utils.ts @@ -2,8 +2,6 @@ import {ChainForkConfig} from "@lodestar/config"; import {ForkName, isForkPostDeneb} from "@lodestar/params"; import {computeEpochAtSlot} from "@lodestar/state-transition"; import {Epoch, Slot} from "@lodestar/types"; -import {BlobsSource, BlockSource as BlockSourceOld} from "../types.js"; -import {BlockInputSource as BlockSource} from "./types.js"; export function isDaOutOfRange( config: ChainForkConfig, @@ -16,29 +14,3 @@ export function isDaOutOfRange( } return computeEpochAtSlot(blockSlot) < currentEpoch - config.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS; } - -export function convertNewToOldBlockSource(source: BlockSource): BlockSourceOld { - switch (source) { - case BlockSource.api: - return BlockSourceOld.api; - case BlockSource.byRoot: - return BlockSourceOld.byRoot; - case BlockSource.byRange: - return BlockSourceOld.byRange; - default: - return BlockSourceOld.gossip; - } -} - -export function convertNewToOldBlobSource(source: BlockSource): BlobsSource { - switch (source) { - case BlockSource.api: - return BlobsSource.api; - case BlockSource.byRoot: - return BlobsSource.byRoot; - case BlockSource.byRange: - return BlobsSource.byRange; - default: - return BlobsSource.gossip; - } -} diff --git a/packages/beacon-node/src/chain/blocks/importBlock.ts b/packages/beacon-node/src/chain/blocks/importBlock.ts index ca4d0a63b03b..fd7a318e4d3a 100644 --- a/packages/beacon-node/src/chain/blocks/importBlock.ts +++ b/packages/beacon-node/src/chain/blocks/importBlock.ts @@ -8,7 +8,6 @@ import { NotReorgedReason, } from "@lodestar/fork-choice"; import { - ForkName, ForkPostAltair, ForkPostElectra, ForkSeq, @@ -37,7 +36,8 @@ import {ChainEvent, ReorgEventData} from "../emitter.js"; import {ForkchoiceCaller} from "../forkChoice/index.js"; import {REPROCESS_MIN_TIME_TO_NEXT_SLOT_SEC} from "../reprocess.js"; import {toCheckpointHex} from "../stateCache/index.js"; -import {AttestationImportOpt, BlockInputType, FullyVerifiedBlock, ImportBlockOpts} from "./types.js"; +import {isBlockInputBlobs} from "./blockInput/blockInput.js"; +import {AttestationImportOpt, FullyVerifiedBlock, ImportBlockOpts} from "./types.js"; import {getCheckpointFromState} from "./utils/checkpoint.js"; import {writeBlockInputToDb} from "./writeBlockInputToDb.js"; @@ -76,7 +76,8 @@ export async function importBlock( opts: ImportBlockOpts ): Promise { const {blockInput, postState, parentBlockSlot, executionStatus, dataAvailabilityStatus} = fullyVerifiedBlock; - const {block, source} = blockInput; + const block = blockInput.getBlock(); + const source = blockInput.getBlockSource(); const {slot: blockSlot} = block.message; const blockRoot = this.config.getForkTypes(blockSlot).BeaconBlock.hashTreeRoot(block.message); const blockRootHex = toRootHex(blockRoot); @@ -88,7 +89,7 @@ export async function importBlock( const fork = this.config.getForkSeq(blockSlot); // this is just a type assertion since blockinput with dataPromise type will not end up here - if (blockInput.type === BlockInputType.dataPromise) { + if (!blockInput.hasAllData) { throw Error("Unavailable block can not be imported in forkchoice"); } @@ -115,7 +116,7 @@ export async function importBlock( // Some block event handlers require state being in state cache so need to do this before emitting EventType.block this.regen.processState(blockRootHex, postState); - this.metrics?.importBlock.bySource.inc({source}); + this.metrics?.importBlock.bySource.inc({source: source.source}); this.logger.verbose("Added block to forkchoice and state cache", {slot: blockSlot, root: blockRootHex}); // 3. Import attestations to fork choice @@ -510,15 +511,11 @@ export async function importBlock( fullyVerifiedBlock.postState.epochCtx.currentSyncCommitteeIndexed.validatorIndices ); } - // dataPromise will not end up here, but preDeneb could. In future we might also allow syncing - // out of data range blocks and import then in forkchoice although one would not be able to - // attest and propose with such head similar to optimistic sync - if ( - blockInput.type === BlockInputType.availableData && - (blockInput.blockData.fork === ForkName.deneb || blockInput.blockData.fork === ForkName.electra) - ) { - const {blobsSource} = blockInput.blockData; - this.metrics?.importBlock.blobsBySource.inc({blobsSource}); + + if (isBlockInputBlobs(blockInput)) { + for (const {source} of blockInput.getAllBlobsWithSource()) { + this.metrics?.importBlock.blobsBySource.inc({blobsSource: source}); + } } const advancedSlot = this.clock.slotWithFutureTolerance(REPROCESS_MIN_TIME_TO_NEXT_SLOT_SEC); diff --git a/packages/beacon-node/src/chain/blocks/index.ts b/packages/beacon-node/src/chain/blocks/index.ts index 0ed2cfbecb88..455a534ed54e 100644 --- a/packages/beacon-node/src/chain/blocks/index.ts +++ b/packages/beacon-node/src/chain/blocks/index.ts @@ -5,8 +5,9 @@ import {JobItemQueue, isQueueErrorAborted} from "../../util/queue/index.js"; import type {BeaconChain} from "../chain.js"; import {BlockError, BlockErrorCode, isBlockErrorAborted} from "../errors/index.js"; import {BlockProcessOpts} from "../options.js"; +import {BlockInput} from "./blockInput/index.js"; import {importBlock} from "./importBlock.js"; -import {BlockInput, FullyVerifiedBlock, ImportBlockOpts} from "./types.js"; +import {FullyVerifiedBlock, ImportBlockOpts} from "./types.js"; import {assertLinearChainSegment} from "./utils/chainSegment.js"; import {verifyBlocksInEpoch} from "./verifyBlock.js"; import {verifyBlocksSanityChecks} from "./verifyBlocksSanityChecks.js"; @@ -70,7 +71,7 @@ export async function processBlocks( // Fully verify a block to be imported immediately after. Does not produce any side-effects besides adding intermediate // states in the state cache through regen. - const {postStates, dataAvailabilityStatuses, proposerBalanceDeltas, segmentExecStatus, availableBlockInputs} = + const {postStates, dataAvailabilityStatuses, proposerBalanceDeltas, segmentExecStatus} = await verifyBlocksInEpoch.call(this, parentBlock, relevantBlocks, opts); // If segmentExecStatus has lvhForkchoice then, the entire segment should be invalid @@ -83,7 +84,7 @@ export async function processBlocks( } const {executionStatuses} = segmentExecStatus; - const fullyVerifiedBlocks = availableBlockInputs.map( + const fullyVerifiedBlocks = relevantBlocks.map( (block, i): FullyVerifiedBlock => ({ blockInput: block, postState: postStates[i], @@ -108,7 +109,7 @@ export async function processBlocks( } // above functions should only throw BlockError - const err = getBlockError(e, blocks[0].block); + const err = getBlockError(e, blocks[0].getBlock()); // TODO: De-duplicate with logic above // ChainEvent.errorBlock @@ -152,7 +153,7 @@ export async function processBlocks( await removeEagerlyPersistedBlockInputs.call(this, blocks).catch((e) => { this.logger.warn( "Error pruning eagerly imported block inputs, DB may grow in size if this error happens frequently", - {slot: blocks.map((block) => block.block.message.slot).join(",")}, + {slot: blocks.map((block) => block.getBlock().message.slot).join(",")}, e ); }); diff --git a/packages/beacon-node/src/chain/blocks/types.ts b/packages/beacon-node/src/chain/blocks/types.ts index 87131429dbda..c6ff8545af78 100644 --- a/packages/beacon-node/src/chain/blocks/types.ts +++ b/packages/beacon-node/src/chain/blocks/types.ts @@ -1,25 +1,9 @@ import type {ChainForkConfig} from "@lodestar/config"; import {MaybeValidExecutionStatus} from "@lodestar/fork-choice"; -import {type ForkPostDeneb, ForkPostFulu, ForkPreFulu, ForkSeq} from "@lodestar/params"; +import {ForkSeq} from "@lodestar/params"; import {CachedBeaconStateAllForks, DataAvailabilityStatus, computeEpochAtSlot} from "@lodestar/state-transition"; -import type {ColumnIndex, RootHex, SignedBeaconBlock, Slot, deneb, fulu} from "@lodestar/types"; - -export enum BlockInputType { - // preData is preDeneb - preData = "preData", - // data is out of available window, can be used to sync forward and keep adding to forkchoice - outOfRangeData = "outOfRangeData", - availableData = "availableData", - dataPromise = "dataPromise", -} - -/** Enum to represent where blocks come from */ -export enum BlockSource { - gossip = "gossip", - api = "api", - byRange = "req_resp_by_range", - byRoot = "req_resp_by_root", -} +import type {Slot, fulu} from "@lodestar/types"; +import {BlockInput} from "./blockInput/index.js"; export enum GossipedInputType { block = "block", @@ -27,103 +11,11 @@ export enum GossipedInputType { dataColumn = "data_column", } -interface CachedDataItem { - cacheId: number; -} -type Availability = { - availabilityPromise: Promise; - resolveAvailability: (data: T) => void; -}; - -/** - * - * Deneb Blob Format Types - * - */ -/** Enum to represent where blobs come from */ -export enum BlobsSource { - gossip = "gossip", - api = "api", - byRange = "req_resp_by_range", - byRoot = "req_resp_by_root", -} -type ForkBlobsInfo = { - fork: ForkPostDeneb & ForkPreFulu; -}; -export type BlockInputBlobs = ForkBlobsInfo & { - blobs: deneb.BlobSidecars; - blobsSource: BlobsSource; -}; -export type BlobsCacheMap = Map; -export type CachedBlobs = CachedDataItem & - ForkBlobsInfo & - Availability & { - blobsCache: BlobsCacheMap; - }; - -/** - * - * PeerDAS Column Format Types - * - */ - -export enum DataColumnsSource { - gossip = "gossip", - api = "api", - engine = "engine", - byRange = "req_resp_by_range", - byRoot = "req_resp_by_root", -} -type ForkDataColumnsInfo = { - fork: ForkPostFulu; -}; type DataColumnData = { dataColumn: fulu.DataColumnSidecar; dataColumnBytes: Uint8Array | null; }; export type DataColumnsCacheMap = Map; -export type BlockInputDataColumns = ForkDataColumnsInfo & { - // marker of that columns are to be custodied - dataColumns: fulu.DataColumnSidecars; - dataColumnsBytes: (Uint8Array | null)[]; - dataColumnsSource: DataColumnsSource; -}; -export type CachedDataColumns = CachedDataItem & - ForkDataColumnsInfo & - Availability & { - dataColumnsCache: DataColumnsCacheMap; - calledRecover: boolean; - }; - -/** - * - * Cross-Fork Data Types - * - */ - -export type BlockInputAvailableData = BlockInputBlobs | BlockInputDataColumns; -export type CachedData = CachedBlobs | CachedDataColumns; - -export type BlockInput = { - block: SignedBeaconBlock; - source: BlockSource; -} & ( - | {type: BlockInputType.preData | BlockInputType.outOfRangeData} - | ({type: BlockInputType.availableData} & { - blockData: BlockInputAvailableData; - }) - // the blobsSource here is added to BlockInputBlobs when availability is resolved - | ({type: BlockInputType.dataPromise} & { - cachedData: CachedData; - }) -); -export type NullBlockInput = { - block: null; - blockRootHex: RootHex; - blockInputPromise: Promise; -} & { - cachedData: CachedData; -}; export function blockRequiresBlobs(config: ChainForkConfig, blockSlot: Slot, clockSlot: Slot): boolean { return ( @@ -133,103 +25,6 @@ export function blockRequiresBlobs(config: ChainForkConfig, blockSlot: Slot, clo ); } -export const getBlockInput = { - preData(config: ChainForkConfig, block: SignedBeaconBlock, source: BlockSource): BlockInput { - if (config.getForkSeq(block.message.slot) >= ForkSeq.deneb) { - throw Error(`Post Deneb block slot ${block.message.slot}`); - } - return { - type: BlockInputType.preData, - block, - source, - }; - }, - - // This isn't used right now but we might enable importing blobs into forkchoice from a point - // where data is not guaranteed to be available to hopefully reach a point where we have - // available data. Hence the validator duties can't be performed on outOfRangeData - // - // This can help with some of the requests of syncing without data for some use cases for e.g. - // building states or where importing data isn't important if valid child exists like ILs - outOfRangeData(config: ChainForkConfig, block: SignedBeaconBlock, source: BlockSource): BlockInput { - if (config.getForkSeq(block.message.slot) < ForkSeq.deneb) { - throw Error(`Pre Deneb block slot ${block.message.slot}`); - } - return { - type: BlockInputType.outOfRangeData, - block, - source, - }; - }, - - availableData( - config: ChainForkConfig, - block: SignedBeaconBlock, - source: BlockSource, - blockData: BlockInputAvailableData - ): BlockInput { - if (config.getForkSeq(block.message.slot) < ForkSeq.deneb) { - throw Error(`Pre Deneb block slot ${block.message.slot}`); - } - return { - type: BlockInputType.availableData, - block, - source, - blockData, - }; - }, - - dataPromise( - config: ChainForkConfig, - block: SignedBeaconBlock, - source: BlockSource, - cachedData: CachedData - ): BlockInput { - if (config.getForkSeq(block.message.slot) < ForkSeq.deneb) { - throw Error(`Pre Deneb block slot ${block.message.slot}`); - } - return { - type: BlockInputType.dataPromise, - block, - source, - cachedData, - }; - }, -}; - -export function getBlockInputBlobs(blobsCache: BlobsCacheMap): Omit { - const blobs = []; - - for (let index = 0; index < blobsCache.size; index++) { - const blobSidecar = blobsCache.get(index); - if (blobSidecar === undefined) { - throw Error(`Missing blobSidecar at index=${index}`); - } - blobs.push(blobSidecar); - } - return {blobs}; -} - -export function getBlockInputDataColumns( - dataColumnsCache: DataColumnsCacheMap, - columnIndexes: ColumnIndex[] -): Omit { - const dataColumns = []; - const dataColumnsBytes = []; - - for (const index of columnIndexes) { - const dataColumnCache = dataColumnsCache.get(index); - if (dataColumnCache === undefined) { - // check if the index is correct as per the custody columns - throw Error(`Missing dataColumnCache at index=${index}`); - } - const {dataColumn: dataColumnSidecar, dataColumnBytes} = dataColumnCache; - dataColumns.push(dataColumnSidecar); - dataColumnsBytes.push(dataColumnBytes); - } - return {dataColumns, dataColumnsBytes}; -} - export enum AttestationImportOpt { Skip, Force, diff --git a/packages/beacon-node/src/chain/blocks/utils/chainSegment.ts b/packages/beacon-node/src/chain/blocks/utils/chainSegment.ts index eb6a5f622dcd..b205f25c2845 100644 --- a/packages/beacon-node/src/chain/blocks/utils/chainSegment.ts +++ b/packages/beacon-node/src/chain/blocks/utils/chainSegment.ts @@ -1,15 +1,16 @@ import {ChainForkConfig} from "@lodestar/config"; import {ssz} from "@lodestar/types"; import {BlockError, BlockErrorCode} from "../../errors/index.js"; -import {BlockInput} from "../types.js"; +import {BlockInput} from "../blockInput/index.js"; /** * Assert this chain segment of blocks is linear with slot numbers and hashes */ + export function assertLinearChainSegment(config: ChainForkConfig, blocks: BlockInput[]): void { for (let i = 0; i < blocks.length - 1; i++) { - const block = blocks[i].block; - const child = blocks[i + 1].block; + const block = blocks[i].getBlock(); + const child = blocks[i + 1].getBlock(); // If this block has a child in this chain segment, ensure that its parent root matches // the root of this block. if ( diff --git a/packages/beacon-node/src/chain/blocks/verifyBlock.ts b/packages/beacon-node/src/chain/blocks/verifyBlock.ts index 4324cbdab44a..b0b6414815f4 100644 --- a/packages/beacon-node/src/chain/blocks/verifyBlock.ts +++ b/packages/beacon-node/src/chain/blocks/verifyBlock.ts @@ -13,7 +13,8 @@ import type {BeaconChain} from "../chain.js"; import {BlockError, BlockErrorCode} from "../errors/index.js"; import {BlockProcessOpts} from "../options.js"; import {RegenCaller} from "../regen/index.js"; -import {BlockInput, BlockInputType, ImportBlockOpts} from "./types.js"; +import {BlockInput, DAType} from "./blockInput/index.js"; +import {ImportBlockOpts} from "./types.js"; import {DENEB_BLOWFISH_BANNER} from "./utils/blowfishBanner.js"; import {ELECTRA_GIRAFFE_BANNER} from "./utils/giraffeBanner.js"; import {CAPELLA_OWL_BANNER} from "./utils/ownBanner.js"; @@ -45,9 +46,8 @@ export async function verifyBlocksInEpoch( proposerBalanceDeltas: number[]; segmentExecStatus: SegmentExecStatus; dataAvailabilityStatuses: DataAvailabilityStatus[]; - availableBlockInputs: BlockInput[]; }> { - const blocks = blocksInput.map(({block}) => block); + const blocks = blocksInput.map((blockInput) => blockInput.getBlock()); const lastBlock = blocks.at(-1); if (!lastBlock) { throw Error("Empty partiallyVerifiedBlocks"); @@ -94,7 +94,7 @@ export async function verifyBlocksInEpoch( // batch all I/O operations to reduce overhead const [ segmentExecStatus, - {dataAvailabilityStatuses, availableTime, availableBlockInputs}, + {dataAvailabilityStatuses, availableTime}, {postStates, proposerBalanceDeltas, verifyStateTime}, {verifySignaturesTime}, ] = await Promise.all([ @@ -108,7 +108,7 @@ export async function verifyBlocksInEpoch( } as SegmentExecStatus), // data availability for the blobs - verifyBlocksDataAvailability(this, blocksInput, abortController.signal, opts), + verifyBlocksDataAvailability(blocksInput, abortController.signal), // Run state transition only // TODO: Ensure it yields to allow flushing to workers and engine API @@ -176,7 +176,7 @@ export async function verifyBlocksInEpoch( blocksInput.length === 1 && // gossip blocks have seenTimestampSec opts.seenTimestampSec !== undefined && - blocksInput[0].type !== BlockInputType.preData && + blocksInput[0].type !== DAType.PreData && executionStatuses[0] === ExecutionStatus.Valid ) { // Find the max time when the block was actually verified @@ -185,11 +185,12 @@ export async function verifyBlocksInEpoch( this.metrics?.gossipBlock.receivedToFullyVerifiedTime.observe(recvTofullyVerifedTime); const verifiedToBlobsAvailabiltyTime = Math.max(availableTime - fullyVerifiedTime, 0) / 1000; - const numBlobs = (blocksInput[0].block as deneb.SignedBeaconBlock).message.body.blobKzgCommitments.length; + const block = blocksInput[0].getBlock() as deneb.SignedBeaconBlock; + const numBlobs = block.message.body.blobKzgCommitments.length; this.metrics?.gossipBlock.verifiedToBlobsAvailabiltyTime.observe({numBlobs}, verifiedToBlobsAvailabiltyTime); this.logger.verbose("Verified blockInput fully with blobs availability", { - slot: blocksInput[0].block.message.slot, + slot: block.message.slot, recvTofullyVerifedTime, verifiedToBlobsAvailabiltyTime, type: blocksInput[0].type, @@ -204,7 +205,7 @@ export async function verifyBlocksInEpoch( ); } - return {postStates, dataAvailabilityStatuses, proposerBalanceDeltas, segmentExecStatus, availableBlockInputs}; + return {postStates, dataAvailabilityStatuses, proposerBalanceDeltas, segmentExecStatus}; } finally { abortController.abort(); } diff --git a/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts b/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts index da754d1f1245..a983ccf3b2b1 100644 --- a/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts +++ b/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts @@ -1,177 +1,34 @@ -import {ChainForkConfig} from "@lodestar/config"; -import {isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; -import {DataAvailabilityStatus, computeTimeAtSlot} from "@lodestar/state-transition"; -import {UintNum64, deneb} from "@lodestar/types"; -import {ErrorAborted, Logger} from "@lodestar/utils"; -import {Metrics} from "../../metrics/metrics.js"; -import {BlockError, BlockErrorCode} from "../errors/index.js"; -import {validateBlobSidecars} from "../validation/blobSidecar.js"; -import {validateDataColumnsSidecars} from "../validation/dataColumnSidecar.js"; -import { - BlobSidecarValidation, - BlockInput, - BlockInputAvailableData, - BlockInputBlobs, - BlockInputDataColumns, - BlockInputType, - ImportBlockOpts, - getBlockInput, -} from "./types.js"; +import {DataAvailabilityStatus} from "@lodestar/state-transition"; +import {BlockInput, DAType} from "./blockInput/index.js"; // we can now wait for full 12 seconds because unavailable block sync will try pulling // the blobs from the network anyway after 500ms of seeing the block const BLOB_AVAILABILITY_TIMEOUT = 12_000; /** - * Verifies some early cheap sanity checks on the block before running the full state transition. - * - * - Parent is known to the fork-choice - * - Check skipped slots limit - * - check_block_relevancy() - * - Block not in the future - * - Not genesis block - * - Block's slot is < Infinity - * - Not finalized slot - * - Not already known + * Verifies that all block inputs have data available. + * - Waits a max of BLOB_AVAILABILITY_TIMEOUT for all data to be available + * - Returns the time at which all data was available + * - Returns the data availability status for each block input */ export async function verifyBlocksDataAvailability( - chain: {config: ChainForkConfig; genesisTime: UintNum64; logger: Logger; metrics: Metrics | null}, blocks: BlockInput[], - signal: AbortSignal, - opts: ImportBlockOpts + signal: AbortSignal ): Promise<{ dataAvailabilityStatuses: DataAvailabilityStatus[]; availableTime: number; - availableBlockInputs: BlockInput[]; }> { - const lastBlock = blocks.at(-1); - if (!lastBlock) { - throw Error("Empty partiallyVerifiedBlocks"); - } - - const dataAvailabilityStatuses: DataAvailabilityStatus[] = []; - const seenTime = opts.seenTimestampSec !== undefined ? opts.seenTimestampSec * 1000 : Date.now(); - - const availableBlockInputs: BlockInput[] = []; - - for (const blockInput of blocks) { - if (signal.aborted) { - throw new ErrorAborted("verifyBlocksDataAvailability"); + await Promise.all(blocks.map((blockInput) => blockInput.waitForAllData(BLOB_AVAILABILITY_TIMEOUT, signal))); + const availableTime = Math.max(0, Math.max(...blocks.map((blockInput) => blockInput.getTimeComplete()))); + const dataAvailabilityStatuses: DataAvailabilityStatus[] = blocks.map((blockInput) => { + if (blockInput.type === DAType.PreData) { + return DataAvailabilityStatus.PreData; } - // Validate status of only not yet finalized blocks, we don't need yet to propogate the status - // as it is not used upstream anywhere - const {dataAvailabilityStatus, availableBlockInput} = await maybeValidateBlobs(chain, blockInput, signal, opts); - dataAvailabilityStatuses.push(dataAvailabilityStatus); - availableBlockInputs.push(availableBlockInput); - } - - const availableTime = lastBlock.type === BlockInputType.dataPromise ? Date.now() : seenTime; - if (blocks.length === 1 && opts.seenTimestampSec !== undefined && blocks[0].type !== BlockInputType.preData) { - const recvToAvailableTime = availableTime / 1000 - opts.seenTimestampSec; - const numBlobs = (blocks[0].block as deneb.SignedBeaconBlock).message.body.blobKzgCommitments.length; - - chain.metrics?.gossipBlock.receivedToBlobsAvailabilityTime.observe({numBlobs}, recvToAvailableTime); - chain.logger.verbose("Verified blobs availability", { - slot: blocks[0].block.message.slot, - recvToAvailableTime, - type: blocks[0].type, - }); - } - - return {dataAvailabilityStatuses, availableTime, availableBlockInputs}; -} - -async function maybeValidateBlobs( - chain: {config: ChainForkConfig; genesisTime: UintNum64; metrics: Metrics | null; logger: Logger}, - blockInput: BlockInput, - signal: AbortSignal, - opts: ImportBlockOpts -): Promise<{dataAvailabilityStatus: DataAvailabilityStatus; availableBlockInput: BlockInput}> { - switch (blockInput.type) { - case BlockInputType.preData: - return {dataAvailabilityStatus: DataAvailabilityStatus.PreData, availableBlockInput: blockInput}; - - case BlockInputType.outOfRangeData: - return {dataAvailabilityStatus: DataAvailabilityStatus.OutOfRange, availableBlockInput: blockInput}; - - // biome-ignore lint/suspicious/noFallthroughSwitchClause: We need fall-through behavior here - case BlockInputType.availableData: - if (opts.validBlobSidecars === BlobSidecarValidation.Full) { - return {dataAvailabilityStatus: DataAvailabilityStatus.Available, availableBlockInput: blockInput}; - } - - case BlockInputType.dataPromise: { - // run full validation - const {block} = blockInput; - const blockSlot = block.message.slot; - const {blobKzgCommitments} = (block as deneb.SignedBeaconBlock).message.body; - const beaconBlockRoot = chain.config.getForkTypes(blockSlot).BeaconBlock.hashTreeRoot(block.message); - const blockData = - blockInput.type === BlockInputType.availableData - ? blockInput.blockData - : await raceWithCutoff( - chain, - blockInput, - blockInput.cachedData.availabilityPromise as Promise, - signal - ); - - if (isForkPostFulu(blockData.fork)) { - const {dataColumns} = blockData as BlockInputDataColumns; - const skipProofsCheck = opts.validBlobSidecars === BlobSidecarValidation.Individual; - await validateDataColumnsSidecars(blockSlot, beaconBlockRoot, blobKzgCommitments, dataColumns, chain.metrics, { - skipProofsCheck, - }); - } else if (isForkPostDeneb(blockData.fork)) { - const {blobs} = blockData as BlockInputBlobs; - - // if the blob sidecars have been individually verified then we can skip kzg proof check - // but other checks to match blobs with block data still need to be performed - const skipProofsCheck = opts.validBlobSidecars === BlobSidecarValidation.Individual; - await validateBlobSidecars(blockSlot, beaconBlockRoot, blobKzgCommitments, blobs, {skipProofsCheck}); - } - - const availableBlockInput = getBlockInput.availableData( - chain.config, - blockInput.block, - blockInput.source, - blockData - ); - return {dataAvailabilityStatus: DataAvailabilityStatus.Available, availableBlockInput: availableBlockInput}; + if (blockInput.daOutOfRange) { + return DataAvailabilityStatus.OutOfRange; } - } -} - -/** - * Wait for blobs to become available with a cutoff time. If fails then throw DATA_UNAVAILABLE error - * which may try unknownblock/blobs fill (by root). - */ -async function raceWithCutoff( - chain: {config: ChainForkConfig; genesisTime: UintNum64; logger: Logger}, - blockInput: BlockInput, - availabilityPromise: Promise, - signal: AbortSignal -): Promise { - const {block} = blockInput; - const blockSlot = block.message.slot; - - const cutoffTime = - computeTimeAtSlot(chain.config, blockSlot, chain.genesisTime) * 1000 + BLOB_AVAILABILITY_TIMEOUT - Date.now(); - const cutoffTimeout = - cutoffTime > 0 - ? new Promise((_resolve, reject) => { - setTimeout(() => reject(new Error("Timeout exceeded")), cutoffTime); - signal.addEventListener("abort", () => reject(signal.reason)); - }) - : Promise.reject(new Error("Cutoff time must be greater than 0")); - chain.logger.debug("Racing for blob availabilityPromise", {blockSlot, cutoffTime}); + return DataAvailabilityStatus.Available; + }); - try { - await Promise.race([availabilityPromise, cutoffTimeout]); - } catch (_e) { - // throw unavailable so that the unknownblock/blobs can be triggered to pull the block - throw new BlockError(block, {code: BlockErrorCode.DATA_UNAVAILABLE}); - } - // we can only be here if availabilityPromise has resolved else an error will be thrown - return availabilityPromise; + return {dataAvailabilityStatuses, availableTime}; } diff --git a/packages/beacon-node/src/chain/blocks/verifyBlocksSanityChecks.ts b/packages/beacon-node/src/chain/blocks/verifyBlocksSanityChecks.ts index 7d52b506bfda..9280b1f13e86 100644 --- a/packages/beacon-node/src/chain/blocks/verifyBlocksSanityChecks.ts +++ b/packages/beacon-node/src/chain/blocks/verifyBlocksSanityChecks.ts @@ -6,7 +6,8 @@ import {toRootHex} from "@lodestar/utils"; import {IClock} from "../../util/clock.js"; import {BlockError, BlockErrorCode} from "../errors/index.js"; import {IChainOptions} from "../options.js"; -import {BlockInput, ImportBlockOpts} from "./types.js"; +import {BlockInput} from "./blockInput/index.js"; +import {ImportBlockOpts} from "./types.js"; /** * Verifies some early cheap sanity checks on the block before running the full state transition. @@ -44,7 +45,7 @@ export function verifyBlocksSanityChecks( let parentBlock: ProtoBlock | null = null; for (const blockInput of blocks) { - const {block} = blockInput; + const block = blockInput.getBlock(); const blockSlot = block.message.slot; const blockHash = toRootHex(chain.config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message)); @@ -85,7 +86,7 @@ export function verifyBlocksSanityChecks( let parentBlockSlot: Slot; if (relevantLastBlock) { - parentBlockSlot = relevantLastBlock.block.message.slot; + parentBlockSlot = relevantLastBlock.getBlock().message.slot; } else { // When importing a block segment, only the first NON-IGNORED block must be known to the fork-choice. const parentRoot = toRootHex(block.message.parentRoot); diff --git a/packages/beacon-node/src/chain/blocks/verifyBlocksStateTransitionOnly.ts b/packages/beacon-node/src/chain/blocks/verifyBlocksStateTransitionOnly.ts index 6e526f4bcde7..16b96af9bbed 100644 --- a/packages/beacon-node/src/chain/blocks/verifyBlocksStateTransitionOnly.ts +++ b/packages/beacon-node/src/chain/blocks/verifyBlocksStateTransitionOnly.ts @@ -12,7 +12,8 @@ import {nextEventLoop} from "../../util/eventLoop.js"; import {BlockError, BlockErrorCode} from "../errors/index.js"; import {BlockProcessOpts} from "../options.js"; import {ValidatorMonitor} from "../validatorMonitor.js"; -import {BlockInput, ImportBlockOpts} from "./types.js"; +import {BlockInput} from "./blockInput/index.js"; +import {ImportBlockOpts} from "./types.js"; /** * Verifies 1 or more blocks are fully valid running the full state transition; from a linear sequence of blocks. @@ -38,7 +39,7 @@ export async function verifyBlocksStateTransitionOnly( for (let i = 0; i < blocks.length; i++) { const {validProposerSignature, validSignatures} = opts; - const {block} = blocks[i]; + const block = blocks[i].getBlock(); const preState = i === 0 ? preState0 : postStates[i - 1]; const dataAvailabilityStatus = dataAvailabilityStatuses[i]; @@ -99,7 +100,7 @@ export async function verifyBlocksStateTransitionOnly( const verifyStateTime = Date.now(); if (blocks.length === 1 && opts.seenTimestampSec !== undefined) { - const slot = blocks[0].block.message.slot; + const slot = blocks[0].getBlock().message.slot; const recvToValidation = verifyStateTime / 1000 - opts.seenTimestampSec; const validationTime = recvToValidation - recvToValLatency; diff --git a/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts b/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts index 68b7e8dbf7cd..79b984a4ea3c 100644 --- a/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts +++ b/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts @@ -1,9 +1,12 @@ -import {ForkName, NUMBER_OF_COLUMNS, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; -import {fulu, ssz} from "@lodestar/types"; -import {toRootHex} from "@lodestar/utils"; +import {KeyValue} from "@lodestar/db"; +import {NUMBER_OF_COLUMNS} from "@lodestar/params"; +import {SignedBeaconBlock, fulu, ssz} from "@lodestar/types"; +import {prettyPrintIndices, toRootHex} from "@lodestar/utils"; import {toHex} from "@lodestar/utils"; +import {BlobSidecarsWrapper} from "../../db/repositories/blobSidecars.js"; +import {DataColumnSidecarsWrapper} from "../../db/repositories/dataColumnSidecars.js"; import {BeaconChain} from "../chain.js"; -import {BlockInput, BlockInputBlobs, BlockInputDataColumns, BlockInputType} from "./types.js"; +import {BlockInput, isBlockInputBlobs, isBlockInputColumns} from "./blockInput/index.js"; /** * Persists block input data to DB. This operation must be eventually completed if a block is imported to the fork-choice. @@ -12,96 +15,100 @@ import {BlockInput, BlockInputBlobs, BlockInputDataColumns, BlockInputType} from * This operation may be performed before, during or after importing to the fork-choice. As long as errors * are handled properly for eventual consistency. */ -export async function writeBlockInputToDb(this: BeaconChain, blocksInput: BlockInput[]): Promise { - const fnPromises: Promise[] = []; +export async function writeBlockInputToDb(this: BeaconChain, blocksInputs: BlockInput[]): Promise { + // track all these objects for a few batch db operations + const putBlocks: KeyValue[] = []; + const putSerializedBlocks: KeyValue[] = []; + const putBlobSidecars: KeyValue[] = []; + const putDataColumnSidecars: KeyValue[] = []; + // track slots for logging + const slots: number[] = []; - for (const blockInput of blocksInput) { - const {block} = blockInput; + for (const blockInput of blocksInputs) { + const block = blockInput.getBlock(); + const slot = block.message.slot; + slots.push(slot); const blockRoot = this.config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message); const blockRootHex = toRootHex(blockRoot); const blockBytes = this.serializedCache.get(block); if (blockBytes) { // skip serializing data if we already have it this.metrics?.importBlock.persistBlockWithSerializedDataCount.inc(); - fnPromises.push(this.db.block.putBinary(this.db.block.getId(block), blockBytes)); + putSerializedBlocks.push({key: this.db.block.getId(block), value: blockBytes}); } else { this.metrics?.importBlock.persistBlockNoSerializedDataCount.inc(); - fnPromises.push(this.db.block.add(block)); + putBlocks.push({key: this.db.block.getId(block), value: block}); } + this.logger.debug("Persist block to hot DB", { - slot: block.message.slot, + slot, root: blockRootHex, inputType: blockInput.type, }); - if (blockInput.type === BlockInputType.availableData || blockInput.type === BlockInputType.dataPromise) { - const blockData = - blockInput.type === BlockInputType.availableData - ? blockInput.blockData - : await blockInput.cachedData.availabilityPromise; - - // NOTE: Old data is pruned on archive - if (isForkPostFulu(blockData.fork)) { - const {custodyConfig} = this; - const {custodyColumnsIndex, custodyColumns} = custodyConfig; - const blobsLen = (block.message as fulu.BeaconBlock).body.blobKzgCommitments.length; - let dataColumnsLen: number; - let dataColumnsIndex: Uint8Array; - if (blobsLen === 0) { - dataColumnsLen = 0; - dataColumnsIndex = new Uint8Array(NUMBER_OF_COLUMNS); - } else { - dataColumnsLen = custodyColumns.length; - dataColumnsIndex = custodyColumnsIndex; - } + // NOTE: Old data is pruned on archive + if (isBlockInputColumns(blockInput)) { + const {custodyConfig} = this; + const {custodyColumnsIndex, custodyColumns} = custodyConfig; + const blobsLen = (block.message as fulu.BeaconBlock).body.blobKzgCommitments.length; + let dataColumnsLen: number; + let dataColumnsIndex: Uint8Array; + if (blobsLen === 0) { + dataColumnsLen = 0; + dataColumnsIndex = new Uint8Array(NUMBER_OF_COLUMNS); + } else { + dataColumnsLen = custodyColumns.length; + dataColumnsIndex = custodyColumnsIndex; + } - const blockDataColumns = (blockData as BlockInputDataColumns).dataColumns; - const dataColumnSidecars = blockDataColumns.filter((dataColumnSidecar) => - custodyColumns.includes(dataColumnSidecar.index) + const dataColumnSidecars = blockInput.getCustodyColumns(); + if (dataColumnSidecars.length !== dataColumnsLen) { + throw Error( + `Invalid dataColumnSidecars=${dataColumnSidecars.length} for custody expected custodyColumnsLen=${dataColumnsLen}` ); - if (dataColumnSidecars.length !== dataColumnsLen) { - throw Error( - `Invalid dataColumnSidecars=${dataColumnSidecars.length} for custody expected custodyColumnsLen=${dataColumnsLen}` - ); - } + } - const dataColumnsSize = - ssz.fulu.DataColumnSidecar.minSize + - blobsLen * (ssz.fulu.Cell.fixedSize + ssz.deneb.KZGCommitment.fixedSize + ssz.deneb.KZGProof.fixedSize); - const slot = block.message.slot; - const writeData = { - blockRoot, - slot, - dataColumnsLen, - dataColumnsSize, - dataColumnsIndex, - dataColumnSidecars, - }; - fnPromises.push(this.db.dataColumnSidecars.add(writeData)); + const dataColumnsSize = + ssz.fulu.DataColumnSidecar.minSize + + blobsLen * (ssz.fulu.Cell.fixedSize + ssz.deneb.KZGCommitment.fixedSize + ssz.deneb.KZGProof.fixedSize); + const writeData = { + blockRoot, + slot, + dataColumnsLen, + dataColumnsSize, + dataColumnsIndex, + dataColumnSidecars, + }; + putDataColumnSidecars.push({key: this.db.dataColumnSidecars.getId(writeData), value: writeData}); - this.logger.debug("Persisted dataColumnSidecars to hot DB", { - dataColumnsSize, - dataColumnsLen, - dataColumnSidecars: dataColumnSidecars.length, - slot: block.message.slot, - root: blockRootHex, - }); - } else if (isForkPostDeneb(blockData.fork)) { - const blobSidecars = (blockData as BlockInputBlobs).blobs; - fnPromises.push(this.db.blobSidecars.add({blockRoot, slot: block.message.slot, blobSidecars})); - this.logger.debug("Persisted blobSidecars to hot DB", { - blobsLen: blobSidecars.length, - slot: block.message.slot, - root: blockRootHex, - }); - } + this.logger.debug("Persisted dataColumnSidecars to hot DB", { + dataColumnsSize, + dataColumnsLen, + dataColumnSidecars: dataColumnSidecars.length, + slot, + root: blockRootHex, + }); + } else if (isBlockInputBlobs(blockInput)) { + const blobSidecars = blockInput.getBlobs(); + const wrapper = {blockRoot, slot, blobSidecars}; + putBlobSidecars.push({key: this.db.blobSidecars.getId(wrapper), value: wrapper}); + this.logger.debug("Persisted blobSidecars to hot DB", { + blobsLen: blobSidecars.length, + slot, + root: blockRootHex, + }); } } - await Promise.all(fnPromises); + await Promise.all([ + this.db.block.batchPut(putBlocks), + this.db.block.batchPutBinary(putSerializedBlocks), + this.db.blobSidecars.batchPut(putBlobSidecars), + this.db.dataColumnSidecars.batchPut(putDataColumnSidecars), + ]); this.logger.debug("Persisted blocksInput to db", { - blocksInput: blocksInput.length, - slots: blocksInput.map((blockInput) => blockInput.block.message.slot).join(" "), + blocksInput: blocksInputs.length, + slots: prettyPrintIndices(slots), }); } @@ -114,43 +121,38 @@ export async function removeEagerlyPersistedBlockInputs(this: BeaconChain, block const dataColumnsToRemove = []; for (const blockInput of blockInputs) { - const {block, type} = blockInput; + const block = blockInput.getBlock(); const slot = block.message.slot; const blockRoot = this.config.getForkTypes(slot).BeaconBlock.hashTreeRoot(block.message); const blockRootHex = toHex(blockRoot); if (!this.forkChoice.hasBlockHex(blockRootHex)) { blockToRemove.push(block); - if (type === BlockInputType.availableData) { - const {blockData} = blockInput; - if (blockData.fork === ForkName.deneb || blockData.fork === ForkName.electra) { - const blobSidecars = blockData.blobs; - blobsToRemove.push({blockRoot, slot, blobSidecars}); - } else { - const {custodyConfig} = this; - const {custodyColumnsIndex: dataColumnsIndex, custodyColumns} = custodyConfig; - const dataColumnsLen = custodyColumns.length; - const dataColumnSidecars = (blockData as BlockInputDataColumns).dataColumns.filter((dataColumnSidecar) => - custodyColumns.includes(dataColumnSidecar.index) + if (isBlockInputColumns(blockInput)) { + const {custodyConfig} = this; + const {custodyColumnsIndex: dataColumnsIndex, custodyColumns} = custodyConfig; + const dataColumnsLen = custodyColumns.length; + const dataColumnSidecars = blockInput.getCustodyColumns(); + if (dataColumnSidecars.length !== dataColumnsLen) { + throw Error( + `Invalid dataColumnSidecars=${dataColumnSidecars.length} for custody expected custodyColumnsLen=${dataColumnsLen}` ); - if (dataColumnSidecars.length !== dataColumnsLen) { - throw Error( - `Invalid dataColumnSidecars=${dataColumnSidecars.length} for custody expected custodyColumnsLen=${dataColumnsLen}` - ); - } + } - const blobsLen = (block.message as fulu.BeaconBlock).body.blobKzgCommitments.length; - const dataColumnsSize = ssz.fulu.Cell.fixedSize * blobsLen; + const blobsLen = (block.message as fulu.BeaconBlock).body.blobKzgCommitments.length; + const dataColumnsSize = ssz.fulu.Cell.fixedSize * blobsLen; - dataColumnsToRemove.push({ - blockRoot, - slot, - dataColumnsLen, - dataColumnsSize, - dataColumnsIndex, - dataColumnSidecars, - }); - } + dataColumnsToRemove.push({ + blockRoot, + slot, + dataColumnsLen, + dataColumnsSize, + dataColumnsIndex, + dataColumnSidecars, + }); + } else if (isBlockInputBlobs(blockInput)) { + const blobSidecars = blockInput.getBlobs(); + blobsToRemove.push({blockRoot, slot, blobSidecars}); } } } diff --git a/packages/beacon-node/src/chain/chain.ts b/packages/beacon-node/src/chain/chain.ts index 85c08e74317c..5950f8972d78 100644 --- a/packages/beacon-node/src/chain/chain.ts +++ b/packages/beacon-node/src/chain/chain.ts @@ -60,8 +60,8 @@ import {SerializedCache} from "../util/serializedCache.js"; import {ArchiveStore} from "./archiveStore/archiveStore.js"; import {CheckpointBalancesCache} from "./balancesCache.js"; import {BeaconProposerCache} from "./beaconProposerCache.js"; +import {BlockInput} from "./blocks/blockInput/index.js"; import {BlockProcessor, ImportBlockOpts} from "./blocks/index.js"; -import {BlockInput} from "./blocks/types.js"; import {BlsMultiThreadWorkerPool, BlsSingleThreadVerifier, IBlsVerifier} from "./bls/index.js"; import {ChainEvent, ChainEventEmitter} from "./emitter.js"; import {ForkchoiceCaller, initializeForkChoice} from "./forkChoice/index.js"; @@ -98,11 +98,10 @@ import { SeenContributionAndProof, SeenSyncCommitteeMessages, } from "./seenCache/index.js"; -import {SeenGossipBlockInput} from "./seenCache/index.js"; import {SeenAggregatedAttestations} from "./seenCache/seenAggregateAndProof.js"; import {SeenAttestationDatas} from "./seenCache/seenAttestationData.js"; import {SeenBlockAttesters} from "./seenCache/seenBlockAttesters.js"; -import {SeenBlockInputCache} from "./seenCache/seenBlockInput.js"; +import {SeenBlockInput} from "./seenCache/seenGossipBlockInput.js"; import {ShufflingCache} from "./shufflingCache.js"; import {BlockStateCacheImpl} from "./stateCache/blockStateCacheImpl.js"; import {DbCPStateDatastore} from "./stateCache/datastore/db.js"; @@ -159,8 +158,8 @@ export class BeaconChain implements IBeaconChain { readonly seenSyncCommitteeMessages = new SeenSyncCommitteeMessages(); readonly seenContributionAndProof: SeenContributionAndProof; readonly seenAttestationDatas: SeenAttestationDatas; - readonly seenGossipBlockInput: SeenGossipBlockInput; - readonly seenBlockInputCache: SeenBlockInputCache; + readonly seenGossipBlockInput: SeenBlockInput; + readonly seenBlockInputCache: SeenBlockInput; // Seen cache for liveness checks readonly seenBlockAttesters = new SeenBlockAttesters(); @@ -290,17 +289,19 @@ export class BeaconChain implements IBeaconChain { initialCustodyGroupCount, }); - this.seenGossipBlockInput = new SeenGossipBlockInput( - this.custodyConfig, - this.executionEngine, - emitter, + this.seenGossipBlockInput = new SeenBlockInput({ + config: this.config, + custodyConfig: this.custodyConfig, + chainEvents: emitter, clock, - logger - ); + logger, + metrics, + signal, + }); this.beaconProposerCache = new BeaconProposerCache(opts); this.checkpointBalancesCache = new CheckpointBalancesCache(); - this.seenBlockInputCache = new SeenBlockInputCache({ + this.seenBlockInputCache = new SeenBlockInput({ config, custodyConfig: this.custodyConfig, clock, @@ -454,7 +455,7 @@ export class BeaconChain implements IBeaconChain { } seenBlock(blockRoot: RootHex): boolean { - return this.seenGossipBlockInput.hasBlock(blockRoot) || this.forkChoice.hasBlockHex(blockRoot); + return this.seenGossipBlockInput.has(blockRoot) || this.forkChoice.hasBlockHex(blockRoot); } regenCanAcceptWork(): boolean { diff --git a/packages/beacon-node/src/chain/interface.ts b/packages/beacon-node/src/chain/interface.ts index db539304bc1f..b7d52aef59eb 100644 --- a/packages/beacon-node/src/chain/interface.ts +++ b/packages/beacon-node/src/chain/interface.ts @@ -38,7 +38,8 @@ import {SerializedCache} from "../util/serializedCache.js"; import {IArchiveStore} from "./archiveStore/interface.js"; import {CheckpointBalancesCache} from "./balancesCache.js"; import {BeaconProposerCache, ProposerPreparationData} from "./beaconProposerCache.js"; -import {BlockInput, ImportBlockOpts} from "./blocks/types.js"; +import {BlockInput} from "./blocks/blockInput/index.js"; +import {ImportBlockOpts} from "./blocks/types.js"; import {IBlsVerifier} from "./bls/index.js"; import {ChainEventEmitter} from "./emitter.js"; import {ForkchoiceCaller} from "./forkChoice/index.js"; @@ -59,11 +60,10 @@ import { SeenContributionAndProof, SeenSyncCommitteeMessages, } from "./seenCache/index.js"; -import {SeenGossipBlockInput} from "./seenCache/index.js"; import {SeenAggregatedAttestations} from "./seenCache/seenAggregateAndProof.js"; import {SeenAttestationDatas} from "./seenCache/seenAttestationData.js"; import {SeenBlockAttesters} from "./seenCache/seenBlockAttesters.js"; -import {SeenBlockInputCache} from "./seenCache/seenBlockInput.js"; +import {SeenBlockInput} from "./seenCache/seenGossipBlockInput.js"; import {ShufflingCache} from "./shufflingCache.js"; import {ValidatorMonitor} from "./validatorMonitor.js"; @@ -128,8 +128,8 @@ export interface IBeaconChain { readonly seenSyncCommitteeMessages: SeenSyncCommitteeMessages; readonly seenContributionAndProof: SeenContributionAndProof; readonly seenAttestationDatas: SeenAttestationDatas; - readonly seenBlockInputCache: SeenBlockInputCache; - readonly seenGossipBlockInput: SeenGossipBlockInput; + readonly seenBlockInputCache: SeenBlockInput; + readonly seenGossipBlockInput: SeenBlockInput; // Seen cache for liveness checks readonly seenBlockAttesters: SeenBlockAttesters; diff --git a/packages/beacon-node/src/chain/seenCache/index.ts b/packages/beacon-node/src/chain/seenCache/index.ts index 250e6581c312..2aa218fc20fb 100644 --- a/packages/beacon-node/src/chain/seenCache/index.ts +++ b/packages/beacon-node/src/chain/seenCache/index.ts @@ -2,4 +2,4 @@ export {SeenAggregators, SeenAttesters} from "./seenAttesters.js"; export {SeenBlockProposers} from "./seenBlockProposers.js"; export {SeenSyncCommitteeMessages} from "./seenCommittee.js"; export {SeenContributionAndProof} from "./seenCommitteeContribution.js"; -export {SeenGossipBlockInput} from "./seenGossipBlockInput.js"; +export {SeenBlockInput} from "./seenGossipBlockInput.js"; diff --git a/packages/beacon-node/src/chain/seenCache/seenBlockInput.ts b/packages/beacon-node/src/chain/seenCache/seenBlockInput.ts deleted file mode 100644 index 66175c7ae641..000000000000 --- a/packages/beacon-node/src/chain/seenCache/seenBlockInput.ts +++ /dev/null @@ -1,374 +0,0 @@ -import {ChainForkConfig} from "@lodestar/config"; -import {CheckpointWithHex} from "@lodestar/fork-choice"; -import {ForkName, isForkPostDeneb} from "@lodestar/params"; -import {computeStartSlotAtEpoch} from "@lodestar/state-transition"; -import {RootHex, SignedBeaconBlock, Slot, deneb, fulu} from "@lodestar/types"; -import {LodestarError, Logger, toRootHex} from "@lodestar/utils"; -import {Metrics} from "../../metrics/metrics.js"; -import {IClock} from "../../util/clock.js"; -import {CustodyConfig} from "../../util/dataColumns.js"; -import { - BlockInputBlobs, - BlockInputColumns, - BlockInputPreData, - DAType, - ForkBlobsDA, - IBlockInput, - LogMetaBasic, - LogMetaBlobs, - LogMetaColumns, - SourceMeta, - isBlockInputBlobs, - isBlockInputColumns, - isDaOutOfRange, -} from "../blocks/blockInput/index.js"; -import {ChainEvent, ChainEventEmitter} from "../emitter.js"; - -const MAX_BLOCK_INPUT_CACHE_SIZE = 5; - -export type SeenBlockInputCacheModules = { - config: ChainForkConfig; - clock: IClock; - chainEvents: ChainEventEmitter; - signal: AbortSignal; - custodyConfig: CustodyConfig; - metrics: Metrics | null; - logger?: Logger; -}; - -export type GetByBlobOptions = { - throwErrorIfAlreadyKnown?: boolean; -}; - -/** - * Consumers that create BlockInputs or change types of old BlockInputs - * - * - gossipHandlers (block and blob) - * - beaconBlocksMaybeBlobsByRange - * - unavailableBeaconBlobsByRoot (beaconBlocksMaybeBlobsByRoot) - * - publishBlock in the beacon/blocks/index.ts API - * https://github.com/ChainSafe/lodestar/blob/unstable/packages/beacon-node/src/api/impl/beacon/blocks/index.ts#L62 - * - maybeValidateBlobs in verifyBlocksDataAvailability (is_data_available spec function) - * https://github.com/ChainSafe/lodestar/blob/unstable/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts#L111 - * - * - * Pruning management for SeenBlockInputCache - * ------------------------------------------ - * There are four cases for how pruning needs to be handled - * - Normal operation following head via gossip (and/or reqresp). For this situation the consumer (process pipeline or - * caller of processBlock) will call the `prune` method to remove any processed BlockInputs from the cache. This will - * also remove any ancestors of the processed BlockInput as that will also need to have been successfully processed - * for import to work correctly - * - onFinalized event handler will help to prune any non-canonical forks once the chain finalizes. Any block-slots that - * are before the finalized checkpoint will be pruned. - * - Range-sync periods. The range process uses this cache to store and sync blocks with DA data as the chain is pulled - * from peers. We pull batches, by epoch, so 32 slots are pulled at a time and several batches are pulled concurrently. - * It is important to set the MAX_BLOCK_INPUT_CACHE_SIZE high enough to support range sync activities. Currently the - * value is set for 5 batches of 32 slots. As process block is called (similar to following head) the BlockInput and - * its ancestors will be pruned. - * - Non-Finality times. This is a bit more tricky. There can be long periods of non-finality and storing everything - * will cause OOM. The pruneToMax will help ensure a hard limit on the number of stored blocks (with DA) that are held - * in memory at any one time. The value for MAX_BLOCK_INPUT_CACHE_SIZE is set to accommodate range-sync but in - * practice this value may need to be massaged in the future if we find issues when debugging non-finality - */ - -export class SeenBlockInputCache { - private readonly config: ChainForkConfig; - private readonly custodyConfig: CustodyConfig; - private readonly clock: IClock; - private readonly chainEvents: ChainEventEmitter; - private readonly signal: AbortSignal; - private readonly metrics: Metrics | null; - private readonly logger?: Logger; - private blockInputs = new Map(); - - constructor({config, custodyConfig, clock, chainEvents, signal, metrics, logger}: SeenBlockInputCacheModules) { - this.config = config; - this.custodyConfig = custodyConfig; - this.clock = clock; - this.chainEvents = chainEvents; - this.signal = signal; - this.metrics = metrics; - this.logger = logger; - - if (metrics) { - metrics.seenCache.blockInput.blockInputCount.addCollect(() => - metrics.seenCache.blockInput.blockInputCount.set(this.blockInputs.size) - ); - } - - this.chainEvents.on(ChainEvent.forkChoiceFinalized, this.onFinalized); - this.signal.addEventListener("abort", () => { - this.chainEvents.off(ChainEvent.forkChoiceFinalized, this.onFinalized); - }); - } - - has(rootHex: RootHex): boolean { - return this.blockInputs.has(rootHex); - } - - get(rootHex: RootHex): IBlockInput | undefined { - return this.blockInputs.get(rootHex); - } - - /** - * Removes the single BlockInput from the cache - */ - remove(rootHex: RootHex): void { - this.blockInputs.delete(rootHex); - } - - /** - * Removes a processed BlockInput from the cache and also removes any ancestors of processed blocks - */ - prune(rootHex: RootHex): void { - let blockInput = this.blockInputs.get(rootHex); - let parentRootHex = blockInput?.parentRootHex; - while (blockInput) { - this.blockInputs.delete(blockInput.blockRootHex); - blockInput = this.blockInputs.get(parentRootHex ?? ""); - parentRootHex = blockInput?.parentRootHex; - } - this.pruneToMaxSize(); - } - - onFinalized = (checkpoint: CheckpointWithHex) => { - const cutoffSlot = computeStartSlotAtEpoch(checkpoint.epoch); - for (const [rootHex, blockInput] of this.blockInputs) { - if (blockInput.slot < cutoffSlot) { - this.blockInputs.delete(rootHex); - } - } - this.pruneToMaxSize(); - }; - - getByBlock({block, source, seenTimestampSec, peerIdStr}: SourceMeta & {block: SignedBeaconBlock}): IBlockInput { - const blockRoot = this.config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message); - const blockRootHex = toRootHex(blockRoot); - - // TODO(peerDAS): Why is it necessary to static cast this here. All conditional paths result in a valid value so should be defined correctly below - let blockInput = this.blockInputs.get(blockRootHex) as IBlockInput; - if (!blockInput) { - const {forkName, daOutOfRange} = this.buildCommonProps(block.message.slot); - if (!isForkPostDeneb(forkName)) { - blockInput = BlockInputPreData.createFromBlock({ - block, - blockRootHex, - daOutOfRange, - forkName, - source: { - source, - seenTimestampSec, - peerIdStr, - }, - }); - } - // else if (isForkPostFulu(forkName)) { - // blockInput = new BlockInputColumns.createFromBlock({ - // block, - // blockRootHex, - // daOutOfRange, - // forkName, - // custodyColumns: this.custodyConfig.custodyColumns, - // sampledColumns: this.custodyConfig.sampledColumns, - // source: { - // source, - // seenTimestampSec, - // peerIdStr - // } - // }) - // } - else { - blockInput = BlockInputBlobs.createFromBlock({ - block: block as SignedBeaconBlock, - blockRootHex, - daOutOfRange, - forkName, - source: { - source, - seenTimestampSec, - peerIdStr, - }, - }); - } - this.blockInputs.set(blockInput.blockRootHex, blockInput); - } - - if (!blockInput.hasBlock()) { - blockInput.addBlock({block, blockRootHex, source: {source, seenTimestampSec, peerIdStr}}); - } else { - this.logger?.debug("Attempt to cache block but is already cached on BlockInput", blockInput.getLogMeta()); - this.metrics?.seenCache.blockInput.duplicateBlockCount.inc({source}); - } - - return blockInput; - } - - getByBlob( - {blobSidecar, source, seenTimestampSec, peerIdStr}: SourceMeta & {blobSidecar: deneb.BlobSidecar}, - opts: GetByBlobOptions = {} - ): BlockInputBlobs { - const blockRoot = this.config - .getForkTypes(blobSidecar.signedBlockHeader.message.slot) - .BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message); - const blockRootHex = toRootHex(blockRoot); - - // TODO(peerDAS): Why is it necessary to static cast this here. All conditional paths result in a valid value so should be defined correctly below - let blockInput = this.blockInputs.get(blockRootHex) as IBlockInput; - let created = false; - if (!blockInput) { - created = true; - const {forkName, daOutOfRange} = this.buildCommonProps(blobSidecar.signedBlockHeader.message.slot); - blockInput = BlockInputBlobs.createFromBlob({ - blobSidecar, - blockRootHex, - daOutOfRange, - forkName, - source, - seenTimestampSec, - peerIdStr, - }); - this.metrics?.seenCache.blockInput.createdByBlob.inc(); - this.blockInputs.set(blockRootHex, blockInput); - } - - if (!isBlockInputBlobs(blockInput)) { - throw new SeenBlockInputCacheError( - { - code: SeenBlockInputCacheErrorCode.WRONG_BLOCK_INPUT_TYPE, - cachedType: blockInput.type, - requestedType: DAType.Blobs, - ...blockInput.getLogMeta(), - }, - `BlockInputType mismatch adding blobIndex=${blobSidecar.index}` - ); - } - - if (!blockInput.hasBlob(blobSidecar.index)) { - blockInput.addBlob({blobSidecar, blockRootHex, source, seenTimestampSec, peerIdStr}); - } else if (!created) { - this.logger?.debug( - `Attempt to cache blob index #${blobSidecar.index} but is already cached on BlockInput`, - blockInput.getLogMeta() - ); - this.metrics?.seenCache.blockInput.duplicateBlobCount.inc({source}); - if (opts.throwErrorIfAlreadyKnown) { - throw new SeenBlockInputCacheError({ - code: SeenBlockInputCacheErrorCode.GOSSIP_BLOB_ALREADY_KNOWN, - ...blockInput.getLogMeta(), - }); - } - } - - return blockInput; - } - - getByColumn( - {columnSidecar, seenTimestampSec, source, peerIdStr}: SourceMeta & {columnSidecar: fulu.DataColumnSidecar}, - opts: GetByBlobOptions = {} - ): BlockInputColumns { - const blockRoot = this.config - .getForkTypes(columnSidecar.signedBlockHeader.message.slot) - .BeaconBlockHeader.hashTreeRoot(columnSidecar.signedBlockHeader.message); - const blockRootHex = toRootHex(blockRoot); - - let blockInput = this.blockInputs.get(blockRootHex); - let created = false; - if (!blockInput) { - created = true; - const {forkName, daOutOfRange} = this.buildCommonProps(columnSidecar.signedBlockHeader.message.slot); - blockInput = BlockInputColumns.createFromColumn({ - columnSidecar, - blockRootHex, - daOutOfRange, - forkName, - source, - seenTimestampSec, - peerIdStr, - custodyColumns: this.custodyConfig.custodyColumns, - sampledColumns: this.custodyConfig.sampledColumns, - }); - this.metrics?.seenCache.blockInput.createdByBlob.inc(); - this.blockInputs.set(blockRootHex, blockInput); - } - - if (!isBlockInputColumns(blockInput)) { - throw new SeenBlockInputCacheError( - { - code: SeenBlockInputCacheErrorCode.WRONG_BLOCK_INPUT_TYPE, - cachedType: blockInput.type, - requestedType: DAType.Columns, - ...blockInput.getLogMeta(), - }, - `BlockInputType mismatch adding columnIndex=${columnSidecar.index}` - ); - } - - if (!blockInput.hasColumn(columnSidecar.index)) { - blockInput.addColumn({columnSidecar, blockRootHex, source, seenTimestampSec, peerIdStr}); - } else if (!created) { - this.logger?.debug( - `Attempt to cache column index #${columnSidecar.index} but is already cached on BlockInput`, - blockInput.getLogMeta() - ); - this.metrics?.seenCache.blockInput.duplicateColumnCount.inc({source}); - if (opts.throwErrorIfAlreadyKnown) { - throw new SeenBlockInputCacheError({ - code: SeenBlockInputCacheErrorCode.GOSSIP_COLUMN_ALREADY_KNOWN, - ...blockInput.getLogMeta(), - }); - } - } - - return blockInput; - } - - private buildCommonProps(slot: Slot): { - daOutOfRange: boolean; - forkName: ForkName; - } { - const forkName = this.config.getForkName(slot); - return { - forkName, - daOutOfRange: isDaOutOfRange(this.config, forkName, slot, this.clock.currentEpoch), - }; - } - - /** - * Use custom implementation of pruneSetToMax to allow for sorting by slot - * and deleting via key/rootHex - */ - private pruneToMaxSize() { - let itemsToDelete = this.blockInputs.size - MAX_BLOCK_INPUT_CACHE_SIZE; - - if (itemsToDelete > 0) { - const sorted = [...this.blockInputs.entries()].sort((a, b) => b[1].slot - a[1].slot); - for (const [rootHex] of sorted) { - this.blockInputs.delete(rootHex); - itemsToDelete--; - if (itemsToDelete <= 0) return; - } - } - } -} - -enum SeenBlockInputCacheErrorCode { - WRONG_BLOCK_INPUT_TYPE = "BLOCK_INPUT_CACHE_ERROR_WRONG_BLOCK_INPUT_TYPE", - GOSSIP_BLOB_ALREADY_KNOWN = "BLOCK_INPUT_CACHE_ERROR_GOSSIP_BLOB_ALREADY_KNOWN", - GOSSIP_COLUMN_ALREADY_KNOWN = "BLOCK_INPUT_CACHE_ERROR_GOSSIP_COLUMN_ALREADY_KNOWN", -} - -type SeenBlockInputCacheErrorType = - | (LogMetaBasic & { - code: SeenBlockInputCacheErrorCode.WRONG_BLOCK_INPUT_TYPE; - cachedType: DAType; - requestedType: DAType; - }) - | (LogMetaBlobs & { - code: SeenBlockInputCacheErrorCode.GOSSIP_BLOB_ALREADY_KNOWN; - }) - | (LogMetaColumns & { - code: SeenBlockInputCacheErrorCode.GOSSIP_COLUMN_ALREADY_KNOWN; - }); - -class SeenBlockInputCacheError extends LodestarError {} diff --git a/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts b/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts index d1e60d887cbe..b62c8db8a770 100644 --- a/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts +++ b/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts @@ -1,544 +1,374 @@ -import {toHexString} from "@chainsafe/ssz"; import {ChainForkConfig} from "@lodestar/config"; -import {ForkName, NUMBER_OF_COLUMNS, isForkPostDeneb} from "@lodestar/params"; -import {RootHex, SignedBeaconBlock, deneb, fulu, ssz} from "@lodestar/types"; -import {Logger, pruneSetToMax} from "@lodestar/utils"; - -import {IExecutionEngine} from "../../execution/index.js"; -import {Metrics} from "../../metrics/index.js"; +import {CheckpointWithHex} from "@lodestar/fork-choice"; +import {ForkName, isForkPostDeneb} from "@lodestar/params"; +import {computeStartSlotAtEpoch} from "@lodestar/state-transition"; +import {RootHex, SignedBeaconBlock, Slot, deneb, fulu} from "@lodestar/types"; +import {LodestarError, Logger, toRootHex} from "@lodestar/utils"; +import {Metrics} from "../../metrics/metrics.js"; import {IClock} from "../../util/clock.js"; +import {CustodyConfig} from "../../util/dataColumns.js"; import { - CustodyConfig, - RecoverResult, - getDataColumnsFromExecution, - hasSampledDataColumns, - recoverDataColumnSidecars, -} from "../../util/dataColumns.js"; -import {callInNextEventLoop} from "../../util/eventLoop.js"; -import { - BlobsSource, - BlockInput, BlockInputBlobs, - BlockInputDataColumns, - BlockSource, - CachedData, - CachedDataColumns, - DataColumnsSource, - GossipedInputType, - NullBlockInput, - getBlockInput, - getBlockInputBlobs, - getBlockInputDataColumns, -} from "../blocks/types.js"; + BlockInputColumns, + BlockInputPreData, + DAType, + ForkBlobsDA, + IBlockInput, + LogMetaBasic, + LogMetaBlobs, + LogMetaColumns, + SourceMeta, + isBlockInputBlobs, + isBlockInputColumns, + isDaOutOfRange, +} from "../blocks/blockInput/index.js"; import {ChainEvent, ChainEventEmitter} from "../emitter.js"; -import {DataColumnSidecarErrorCode, DataColumnSidecarGossipError} from "../errors/dataColumnSidecarError.js"; -import {GossipAction} from "../errors/gossipValidation.js"; - -export enum BlockInputAvailabilitySource { - GOSSIP = "gossip", - RECOVERED = "recovered", - UNKNOWN_SYNC = "unknown_sync", -} - -type GossipedBlockInput = - | {type: GossipedInputType.block; signedBlock: SignedBeaconBlock} - | {type: GossipedInputType.blob; blobSidecar: deneb.BlobSidecar} - | { - type: GossipedInputType.dataColumn; - dataColumnSidecar: fulu.DataColumnSidecar; - dataColumnBytes: Uint8Array | null; - }; -// TODO(fulu): dedup with gossipHandlers.ts -const BLOCK_AVAILABILITY_CUTOFF_MS = 3_000; +const MAX_BLOCK_INPUT_CACHE_SIZE = 5; -export type BlockInputCacheType = { - fork: ForkName; - block?: SignedBeaconBlock; - cachedData?: CachedData; - // block promise and its callback cached for delayed resolution - blockInputPromise: Promise; - resolveBlockInput: (blockInput: BlockInput) => void; +export type SeenBlockInputCacheModules = { + config: ChainForkConfig; + clock: IClock; + chainEvents: ChainEventEmitter; + signal: AbortSignal; + custodyConfig: CustodyConfig; + metrics: Metrics | null; + logger?: Logger; }; -type GossipBlockInputResponseWithBlock = { - blockInput: BlockInput; - blockInputMeta: - | {pending: GossipedInputType.blob | null; haveBlobs: number; expectedBlobs: number} - | {pending: GossipedInputType.dataColumn | null; haveColumns: number; expectedColumns: number}; +export type GetByBlobOptions = { + throwErrorIfAlreadyKnown?: boolean; }; -type BlockInputPendingBlock = {pending: GossipedInputType.block}; -export type BlockInputMetaPendingBlockWithBlobs = BlockInputPendingBlock & {haveBlobs: number; expectedBlobs: null}; -type BlockInputMetaPendingBlockWithColumns = BlockInputPendingBlock & {haveColumns: number; expectedColumns: null}; - -type GossipBlockInputResponseWithNullBlock = { - blockInput: NullBlockInput; - blockInputMeta: BlockInputMetaPendingBlockWithBlobs | BlockInputMetaPendingBlockWithColumns; -}; - -type GossipBlockInputResponse = GossipBlockInputResponseWithBlock | GossipBlockInputResponseWithNullBlock; - -const MAX_GOSSIPINPUT_CACHE = 5; - /** - * For predeneb, SeenGossipBlockInput only tracks and caches block so that we don't need to download known block - * roots. From deneb, it serves same purpose plus tracks and caches the live blobs and blocks on the network to - * solve data availability for the blockInput. If no block has been seen yet for some already seen blobs, it - * responds will null, but on the first block or the consequent blobs it responds with blobs promise till all blobs - * become available. + * Consumers that create BlockInputs or change types of old BlockInputs + * + * - gossipHandlers (block and blob) + * - beaconBlocksMaybeBlobsByRange + * - unavailableBeaconBlobsByRoot (beaconBlocksMaybeBlobsByRoot) + * - publishBlock in the beacon/blocks/index.ts API + * https://github.com/ChainSafe/lodestar/blob/unstable/packages/beacon-node/src/api/impl/beacon/blocks/index.ts#L62 + * - maybeValidateBlobs in verifyBlocksDataAvailability (is_data_available spec function) + * https://github.com/ChainSafe/lodestar/blob/unstable/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts#L111 + * * - * One can start processing block on blobs promise blockInput response and can await on the promise before - * fully importing the block. The blobs promise is gets resolved as soon as all blobs corresponding to that - * block are seen by SeenGossipBlockInput + * Pruning management for SeenBlockInputCache + * ------------------------------------------ + * There are four cases for how pruning needs to be handled + * - Normal operation following head via gossip (and/or reqresp). For this situation the consumer (process pipeline or + * caller of processBlock) will call the `prune` method to remove any processed BlockInputs from the cache. This will + * also remove any ancestors of the processed BlockInput as that will also need to have been successfully processed + * for import to work correctly + * - onFinalized event handler will help to prune any non-canonical forks once the chain finalizes. Any block-slots that + * are before the finalized checkpoint will be pruned. + * - Range-sync periods. The range process uses this cache to store and sync blocks with DA data as the chain is pulled + * from peers. We pull batches, by epoch, so 32 slots are pulled at a time and several batches are pulled concurrently. + * It is important to set the MAX_BLOCK_INPUT_CACHE_SIZE high enough to support range sync activities. Currently the + * value is set for 5 batches of 32 slots. As process block is called (similar to following head) the BlockInput and + * its ancestors will be pruned. + * - Non-Finality times. This is a bit more tricky. There can be long periods of non-finality and storing everything + * will cause OOM. The pruneToMax will help ensure a hard limit on the number of stored blocks (with DA) that are held + * in memory at any one time. The value for MAX_BLOCK_INPUT_CACHE_SIZE is set to accommodate range-sync but in + * practice this value may need to be massaged in the future if we find issues when debugging non-finality */ -export class SeenGossipBlockInput { - private readonly blockInputCache = new Map(); + +export class SeenBlockInput { + private readonly config: ChainForkConfig; private readonly custodyConfig: CustodyConfig; - private readonly executionEngine: IExecutionEngine; private readonly clock: IClock; - private readonly emitter: ChainEventEmitter; - private readonly logger: Logger; - - constructor( - custodyConfig: CustodyConfig, - executionEngine: IExecutionEngine, - emitter: ChainEventEmitter, - clock: IClock, - logger: Logger - ) { + private readonly chainEvents: ChainEventEmitter; + private readonly signal: AbortSignal; + private readonly metrics: Metrics | null; + private readonly logger?: Logger; + private blockInputs = new Map(); + + constructor({config, custodyConfig, clock, chainEvents, signal, metrics, logger}: SeenBlockInputCacheModules) { + this.config = config; this.custodyConfig = custodyConfig; - this.executionEngine = executionEngine; this.clock = clock; - this.emitter = emitter; + this.chainEvents = chainEvents; + this.signal = signal; + this.metrics = metrics; this.logger = logger; + + if (metrics) { + metrics.seenCache.blockInput.blockInputCount.addCollect(() => + metrics.seenCache.blockInput.blockInputCount.set(this.blockInputs.size) + ); + } + + this.chainEvents.on(ChainEvent.forkChoiceFinalized, this.onFinalized); + this.signal.addEventListener("abort", () => { + this.chainEvents.off(ChainEvent.forkChoiceFinalized, this.onFinalized); + }); } - globalCacheId = 0; - prune(): void { - pruneSetToMax(this.blockInputCache, MAX_GOSSIPINPUT_CACHE); + has(rootHex: RootHex): boolean { + return this.blockInputs.has(rootHex); } - hasBlock(blockRoot: RootHex): boolean { - return this.blockInputCache.has(blockRoot); + get(rootHex: RootHex): IBlockInput | undefined { + return this.blockInputs.get(rootHex); } /** - * Intended to be used for gossip validation, specifically this check: - * [IGNORE] The sidecar is the first sidecar for the tuple (block_header.slot, block_header.proposer_index, - * sidecar.index) with valid header signature, sidecar inclusion proof, and kzg proof + * Removes the single BlockInput from the cache */ - hasDataColumnSidecar(sidecar: fulu.DataColumnSidecar) { - const blockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(sidecar.signedBlockHeader.message); - const blockRootHex = toHexString(blockRoot); + remove(rootHex: RootHex): void { + this.blockInputs.delete(rootHex); + } - const blockCache = this.blockInputCache.get(blockRootHex); - if (blockCache === undefined) { - return false; - } - if (blockCache.cachedData === undefined || blockCache.cachedData.fork !== ForkName.fulu) { - return false; - } - const existingSidecar = blockCache.cachedData.dataColumnsCache.get(sidecar.index); - if (!existingSidecar) { - return false; + /** + * Removes a processed BlockInput from the cache and also removes any ancestors of processed blocks + */ + prune(rootHex: RootHex): void { + let blockInput = this.blockInputs.get(rootHex); + let parentRootHex = blockInput?.parentRootHex; + while (blockInput) { + this.blockInputs.delete(blockInput.blockRootHex); + blockInput = this.blockInputs.get(parentRootHex ?? ""); + parentRootHex = blockInput?.parentRootHex; } - return ( - sidecar.signedBlockHeader.message.slot === existingSidecar.dataColumn.signedBlockHeader.message.slot && - sidecar.index === existingSidecar.dataColumn.index && - sidecar.signedBlockHeader.message.proposerIndex === - existingSidecar.dataColumn.signedBlockHeader.message.proposerIndex - ); + this.pruneToMaxSize(); } - getGossipBlockInput( - config: ChainForkConfig, - gossipedInput: GossipedBlockInput, - metrics: Metrics | null - ): GossipBlockInputResponse { - let blockHex: RootHex; - let blockCache: BlockInputCacheType; - let fork: ForkName; - - if (gossipedInput.type === GossipedInputType.block) { - const {signedBlock} = gossipedInput; - fork = config.getForkName(signedBlock.message.slot); - - blockHex = toHexString( - config.getForkTypes(signedBlock.message.slot).BeaconBlock.hashTreeRoot(signedBlock.message) - ); - blockCache = this.blockInputCache.get(blockHex) ?? getEmptyBlockInputCacheEntry(fork, ++this.globalCacheId); - - blockCache.block = signedBlock; - } else if (gossipedInput.type === GossipedInputType.blob) { - const {blobSidecar} = gossipedInput; - const blockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message); - fork = config.getForkName(blobSidecar.signedBlockHeader.message.slot); - - blockHex = toHexString(blockRoot); - blockCache = this.blockInputCache.get(blockHex) ?? getEmptyBlockInputCacheEntry(fork, ++this.globalCacheId); - if (blockCache.cachedData?.fork !== ForkName.deneb && blockCache.cachedData?.fork !== ForkName.electra) { - throw Error(`blob data at non deneb/electra fork=${blockCache.fork}`); + onFinalized = (checkpoint: CheckpointWithHex) => { + const cutoffSlot = computeStartSlotAtEpoch(checkpoint.epoch); + for (const [rootHex, blockInput] of this.blockInputs) { + if (blockInput.slot < cutoffSlot) { + this.blockInputs.delete(rootHex); } - - // TODO: freetheblobs check if its the same blob or a duplicate and throw/take actions - blockCache.cachedData?.blobsCache.set(blobSidecar.index, blobSidecar); - } else if (gossipedInput.type === GossipedInputType.dataColumn) { - const {dataColumnSidecar, dataColumnBytes} = gossipedInput; - const blockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(dataColumnSidecar.signedBlockHeader.message); - fork = config.getForkName(dataColumnSidecar.signedBlockHeader.message.slot); - - blockHex = toHexString(blockRoot); - blockCache = this.blockInputCache.get(blockHex) ?? getEmptyBlockInputCacheEntry(fork, ++this.globalCacheId); - if (blockCache.cachedData?.fork !== ForkName.fulu) { - throw Error(`data column data at non fulu fork=${blockCache.fork}`); + } + this.pruneToMaxSize(); + }; + + getByBlock({block, source, seenTimestampSec, peerIdStr}: SourceMeta & {block: SignedBeaconBlock}): IBlockInput { + const blockRoot = this.config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message); + const blockRootHex = toRootHex(blockRoot); + + // TODO(peerDAS): Why is it necessary to static cast this here. All conditional paths result in a valid value so should be defined correctly below + let blockInput = this.blockInputs.get(blockRootHex) as IBlockInput; + if (!blockInput) { + const {forkName, daOutOfRange} = this.buildCommonProps(block.message.slot); + if (!isForkPostDeneb(forkName)) { + blockInput = BlockInputPreData.createFromBlock({ + block, + blockRootHex, + daOutOfRange, + forkName, + source: { + source, + seenTimestampSec, + peerIdStr, + }, + }); } - - if (this.hasDataColumnSidecar(dataColumnSidecar)) { - throw new DataColumnSidecarGossipError(GossipAction.IGNORE, { - code: DataColumnSidecarErrorCode.ALREADY_KNOWN, - slot: dataColumnSidecar.signedBlockHeader.message.slot, - columnIdx: dataColumnSidecar.index, + // else if (isForkPostFulu(forkName)) { + // blockInput = new BlockInputColumns.createFromBlock({ + // block, + // blockRootHex, + // daOutOfRange, + // forkName, + // custodyColumns: this.custodyConfig.custodyColumns, + // sampledColumns: this.custodyConfig.sampledColumns, + // source: { + // source, + // seenTimestampSec, + // peerIdStr + // } + // }) + // } + else { + blockInput = BlockInputBlobs.createFromBlock({ + block: block as SignedBeaconBlock, + blockRootHex, + daOutOfRange, + forkName, + source: { + source, + seenTimestampSec, + peerIdStr, + }, }); } + this.blockInputs.set(blockInput.blockRootHex, blockInput); + } - blockCache.cachedData?.dataColumnsCache.set(dataColumnSidecar.index, { - dataColumn: dataColumnSidecar, - // easily splice out the unsigned message as blob is a fixed length type - dataColumnBytes: dataColumnBytes?.slice(0, dataColumnBytes.length) ?? null, - }); + if (!blockInput.hasBlock()) { + blockInput.addBlock({block, blockRootHex, source: {source, seenTimestampSec, peerIdStr}}); } else { - // somehow helps resolve typescript that all types have been exausted - throw Error("Invalid gossipedInput type"); + this.logger?.debug("Attempt to cache block but is already cached on BlockInput", blockInput.getLogMeta()); + this.metrics?.seenCache.blockInput.duplicateBlockCount.inc({source}); } - if (!this.blockInputCache.has(blockHex)) { - this.blockInputCache.set(blockHex, blockCache); - callInNextEventLoop(() => { - getDataColumnsFromExecution(config, this.custodyConfig, this.executionEngine, this.emitter, blockCache, metrics) - .then((_success) => { - // TODO: (@matthewkeil) add metrics collection point here - }) - .catch((error) => { - this.logger.warn("Error getting data columns from execution", {blockHex}, error); - }); + return blockInput; + } + + getByBlob( + {blobSidecar, source, seenTimestampSec, peerIdStr}: SourceMeta & {blobSidecar: deneb.BlobSidecar}, + opts: GetByBlobOptions = {} + ): BlockInputBlobs { + const blockRoot = this.config + .getForkTypes(blobSidecar.signedBlockHeader.message.slot) + .BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message); + const blockRootHex = toRootHex(blockRoot); + + // TODO(peerDAS): Why is it necessary to static cast this here. All conditional paths result in a valid value so should be defined correctly below + let blockInput = this.blockInputs.get(blockRootHex) as IBlockInput; + let created = false; + if (!blockInput) { + created = true; + const {forkName, daOutOfRange} = this.buildCommonProps(blobSidecar.signedBlockHeader.message.slot); + blockInput = BlockInputBlobs.createFromBlob({ + blobSidecar, + blockRootHex, + daOutOfRange, + forkName, + source, + seenTimestampSec, + peerIdStr, }); + this.metrics?.seenCache.blockInput.createdByBlob.inc(); + this.blockInputs.set(blockRootHex, blockInput); } - const {block: signedBlock, blockInputPromise, resolveBlockInput, cachedData} = blockCache; - - if (signedBlock !== undefined) { - if (!isForkPostDeneb(fork)) { - return { - blockInput: getBlockInput.preData(config, signedBlock, BlockSource.gossip), - blockInputMeta: {pending: null, haveBlobs: 0, expectedBlobs: 0}, - }; - } - - if (cachedData === undefined || !isForkPostDeneb(cachedData.fork)) { - throw Error("Missing or Invalid fork cached Data for post-deneb block"); - } - - if (cachedData.fork === ForkName.deneb || cachedData.fork === ForkName.electra) { - const {blobsCache, resolveAvailability} = cachedData; - - // block is available, check if all blobs have shown up - const {slot, body} = signedBlock.message; - const {blobKzgCommitments} = body as deneb.BeaconBlockBody; - const blockInfo = `blockHex=${blockHex}, slot=${slot}`; - - if (blobKzgCommitments.length < blobsCache.size) { - throw Error( - `Received more blobs=${blobsCache.size} than commitments=${blobKzgCommitments.length} for ${blockInfo}` - ); - } - - if (blobKzgCommitments.length === blobsCache.size) { - const allBlobs = getBlockInputBlobs(blobsCache); - const {blobs} = allBlobs; - const blockData = { - fork: cachedData.fork, - ...allBlobs, - blobsSource: BlobsSource.gossip, - }; - resolveAvailability(blockData); - metrics?.syncUnknownBlock.resolveAvailabilitySource.inc({source: BlockInputAvailabilitySource.GOSSIP}); - - const blockInput = getBlockInput.availableData(config, signedBlock, BlockSource.gossip, blockData); - - resolveBlockInput(blockInput); - return { - blockInput, - blockInputMeta: {pending: null, haveBlobs: blobs.length, expectedBlobs: blobKzgCommitments.length}, - }; - } - - const blockInput = getBlockInput.dataPromise(config, signedBlock, BlockSource.gossip, cachedData); - - resolveBlockInput(blockInput); - return { - blockInput, - blockInputMeta: { - pending: GossipedInputType.blob, - haveBlobs: blobsCache.size, - expectedBlobs: blobKzgCommitments.length, - }, - }; - } - - if (cachedData.fork === ForkName.fulu) { - const {dataColumnsCache, resolveAvailability, calledRecover} = cachedData as CachedDataColumns; - - // block is available, check if all blobs have shown up - const {slot} = signedBlock.message; - const blockInfo = `blockHex=${blockHex}, slot=${slot}`; - - if (NUMBER_OF_COLUMNS < dataColumnsCache.size) { - throw Error( - `Received more dataColumns=${dataColumnsCache.size} than columns=${NUMBER_OF_COLUMNS} for ${blockInfo}` - ); - } - - // get the custody columns and see if we have got all the requisite columns - const blobKzgCommitmentsLen = (signedBlock.message.body as deneb.BeaconBlockBody).blobKzgCommitments.length; - if (blobKzgCommitmentsLen === 0) { - const blockData: BlockInputDataColumns = { - fork: cachedData.fork, - dataColumns: [], - dataColumnsBytes: [], - dataColumnsSource: DataColumnsSource.gossip, - }; - resolveAvailability(blockData); - metrics?.syncUnknownBlock.resolveAvailabilitySource.inc({source: BlockInputAvailabilitySource.GOSSIP}); - - const blockInput = getBlockInput.availableData(config, signedBlock, BlockSource.gossip, blockData); - - resolveBlockInput(blockInput); - return { - blockInput, - blockInputMeta: {pending: null, haveColumns: 0, expectedColumns: 0}, - }; - } - - const resolveAvailabilityAndBlockInput = (source: BlockInputAvailabilitySource) => { - const allDataColumns = getBlockInputDataColumns(dataColumnsCache, this.custodyConfig.sampledColumns); - const blockData: BlockInputDataColumns = { - fork: cachedData.fork, - ...allDataColumns, - dataColumnsSource: DataColumnsSource.gossip, - }; - resolveAvailability(blockData); - // TODO(das): should not use syncUnknownBlock metrics here - metrics?.syncUnknownBlock.resolveAvailabilitySource.inc({source}); - metrics?.dataColumns.bySource.inc({source: DataColumnsSource.gossip}); - - const blockInput = getBlockInput.availableData(config, signedBlock, BlockSource.gossip, blockData); - resolveBlockInput(blockInput); - return blockInput; - }; - - const columnCount = dataColumnsCache.size; - if ( - // only try to recover all columns with "--supernode" - this.custodyConfig.sampledColumns.length === NUMBER_OF_COLUMNS && - columnCount >= NUMBER_OF_COLUMNS / 2 && - columnCount < NUMBER_OF_COLUMNS && - !calledRecover && - // doing recover right away is not efficient because it may delay data_column_sidecar validation - this.clock.secFromSlot(slot) * 1000 >= BLOCK_AVAILABILITY_CUTOFF_MS - ) { - // should call once per slot - cachedData.calledRecover = true; - callInNextEventLoop(async () => { - const logCtx = { - blockHex, - slot, - dataColumns: dataColumnsCache.size, - }; - const recoverResult = await recoverDataColumnSidecars(dataColumnsCache, this.clock, metrics).catch((e) => { - this.logger.error("Error recovering data column sidecars", logCtx, e); - return RecoverResult.Failed; - }); - metrics?.recoverDataColumnSidecars.reconstructionResult.inc({result: recoverResult}); - switch (recoverResult) { - case RecoverResult.SuccessResolved: { - resolveAvailabilityAndBlockInput(BlockInputAvailabilitySource.RECOVERED); - // Publish columns if and only if subscribed to them - const sampledColumns = this.custodyConfig.sampledColumns.map((columnIndex) => { - const dataColumn = dataColumnsCache.get(columnIndex)?.dataColumn; - if (!dataColumn) { - throw Error(`After recover, missing data column for index=${columnIndex} in cache`); - } - return dataColumn; - }); - - // for columns that we already seen, it will be ignored through `ignoreDuplicatePublishError` gossip option - this.emitter.emit(ChainEvent.publishDataColumns, sampledColumns); - this.logger.verbose("Recovered data column sidecars and resolved availability", logCtx); - break; - } - case RecoverResult.SuccessLate: - this.logger.verbose("Recovered data column sidecars but it's late to resolve availability", logCtx); - break; - case RecoverResult.Failed: - this.logger.verbose("Failed to recover data column sidecars", logCtx); - break; - case RecoverResult.NotAttemptedFull: - this.logger.verbose("Did not attempt because we have full column sidecars", logCtx); - break; - case RecoverResult.NotAttemptedLessThanHalf: - this.logger.verbose("Did not attempt because we have too few column sidecars", logCtx); - break; - default: - break; - } - }); - } - if (hasSampledDataColumns(this.custodyConfig, dataColumnsCache)) { - const blockInput = resolveAvailabilityAndBlockInput(BlockInputAvailabilitySource.GOSSIP); - const allDataColumns = getBlockInputDataColumns(dataColumnsCache, this.custodyConfig.sampledColumns); - const {dataColumns} = allDataColumns; - return { - blockInput, - blockInputMeta: { - pending: null, - haveColumns: dataColumns.length, - expectedColumns: this.custodyConfig.sampledColumns.length, - }, - }; - } - - const blockInput = getBlockInput.dataPromise(config, signedBlock, BlockSource.gossip, cachedData); - - resolveBlockInput(blockInput); - return { - blockInput, - blockInputMeta: { - pending: GossipedInputType.dataColumn, - haveColumns: dataColumnsCache.size, - expectedColumns: this.custodyConfig.sampledColumns.length, - }, - }; - } - - throw Error(`Invalid fork=${fork}`); + if (!isBlockInputBlobs(blockInput)) { + throw new SeenBlockInputCacheError( + { + code: SeenBlockInputCacheErrorCode.WRONG_BLOCK_INPUT_TYPE, + cachedType: blockInput.type, + requestedType: DAType.Blobs, + ...blockInput.getLogMeta(), + }, + `BlockInputType mismatch adding blobIndex=${blobSidecar.index}` + ); } - // will need to wait for the block to showup - if (cachedData === undefined) { - throw Error("Missing cachedData for deneb+ blobs"); + if (!blockInput.hasBlob(blobSidecar.index)) { + blockInput.addBlob({blobSidecar, blockRootHex, source, seenTimestampSec, peerIdStr}); + } else if (!created) { + this.logger?.debug( + `Attempt to cache blob index #${blobSidecar.index} but is already cached on BlockInput`, + blockInput.getLogMeta() + ); + this.metrics?.seenCache.blockInput.duplicateBlobCount.inc({source}); + if (opts.throwErrorIfAlreadyKnown) { + throw new SeenBlockInputCacheError({ + code: SeenBlockInputCacheErrorCode.GOSSIP_BLOB_ALREADY_KNOWN, + ...blockInput.getLogMeta(), + }); + } } - if (cachedData.fork === ForkName.deneb || cachedData.fork === ForkName.electra) { - const {blobsCache} = cachedData; + return blockInput; + } - return { - blockInput: { - block: null, - blockRootHex: blockHex, - cachedData, - blockInputPromise, - }, - blockInputMeta: {pending: GossipedInputType.block, haveBlobs: blobsCache.size, expectedBlobs: null}, - }; + getByColumn( + {columnSidecar, seenTimestampSec, source, peerIdStr}: SourceMeta & {columnSidecar: fulu.DataColumnSidecar}, + opts: GetByBlobOptions = {} + ): BlockInputColumns { + const blockRoot = this.config + .getForkTypes(columnSidecar.signedBlockHeader.message.slot) + .BeaconBlockHeader.hashTreeRoot(columnSidecar.signedBlockHeader.message); + const blockRootHex = toRootHex(blockRoot); + + let blockInput = this.blockInputs.get(blockRootHex); + let created = false; + if (!blockInput) { + created = true; + const {forkName, daOutOfRange} = this.buildCommonProps(columnSidecar.signedBlockHeader.message.slot); + blockInput = BlockInputColumns.createFromColumn({ + columnSidecar, + blockRootHex, + daOutOfRange, + forkName, + source, + seenTimestampSec, + peerIdStr, + custodyColumns: this.custodyConfig.custodyColumns, + sampledColumns: this.custodyConfig.sampledColumns, + }); + this.metrics?.seenCache.blockInput.createdByBlob.inc(); + this.blockInputs.set(blockRootHex, blockInput); } - if (fork === ForkName.fulu) { - const {dataColumnsCache} = cachedData as CachedDataColumns; - - return { - blockInput: { - block: null, - blockRootHex: blockHex, - cachedData, - blockInputPromise, + if (!isBlockInputColumns(blockInput)) { + throw new SeenBlockInputCacheError( + { + code: SeenBlockInputCacheErrorCode.WRONG_BLOCK_INPUT_TYPE, + cachedType: blockInput.type, + requestedType: DAType.Columns, + ...blockInput.getLogMeta(), }, - blockInputMeta: {pending: GossipedInputType.block, haveColumns: dataColumnsCache.size, expectedColumns: null}, - }; + `BlockInputType mismatch adding columnIndex=${columnSidecar.index}` + ); } - throw Error(`invalid fork=${fork} data not implemented`); - - /** - * TODO: @matthewkeil this code was unreachable. Commented to remove lint error but need to verify the condition - * again to make sure this is not necessary before deleting it - * - * DO NOT DELETE until verified can be removed - */ - // will need to wait for the block to showup - // if (cachedData === undefined) { - // throw Error("Missing cachedData for deneb+ blobs"); - // } - // const {blobsCache} = cachedData as CachedBlobs; - - // return { - // blockInput: { - // block: null, - // blockRootHex: blockHex, - // cachedData: cachedData as CachedData, - // blockInputPromise, - // }, - // blockInputMeta: {pending: GossipedInputType.block, haveBlobs: blobsCache.size, expectedBlobs: null}, - // }; - } -} + if (!blockInput.hasColumn(columnSidecar.index)) { + blockInput.addColumn({columnSidecar, blockRootHex, source, seenTimestampSec, peerIdStr}); + } else if (!created) { + this.logger?.debug( + `Attempt to cache column index #${columnSidecar.index} but is already cached on BlockInput`, + blockInput.getLogMeta() + ); + this.metrics?.seenCache.blockInput.duplicateColumnCount.inc({source}); + if (opts.throwErrorIfAlreadyKnown) { + throw new SeenBlockInputCacheError({ + code: SeenBlockInputCacheErrorCode.GOSSIP_COLUMN_ALREADY_KNOWN, + ...blockInput.getLogMeta(), + }); + } + } -export function getEmptyBlockInputCacheEntry(fork: ForkName, globalCacheId: number): BlockInputCacheType { - // Capture both the promise and its callbacks for blockInput and final availability - // It is not spec'ed but in tests in Firefox and NodeJS the promise constructor is run immediately - let resolveBlockInput: ((block: BlockInput) => void) | null = null; - const blockInputPromise = new Promise((resolveCB) => { - resolveBlockInput = resolveCB; - }); - if (resolveBlockInput === null) { - throw Error("Promise Constructor was not executed immediately"); + return blockInput; } - if (!isForkPostDeneb(fork)) { - return {fork, blockInputPromise, resolveBlockInput}; - } - - if (fork === ForkName.deneb || fork === ForkName.electra) { - let resolveAvailability: ((blobs: BlockInputBlobs) => void) | null = null; - const availabilityPromise = new Promise((resolveCB) => { - resolveAvailability = resolveCB; - }); - - if (resolveAvailability === null) { - throw Error("Promise Constructor was not executed immediately"); - } - const blobsCache = new Map(); - const cachedData: CachedData = { - fork, - blobsCache, - availabilityPromise, - resolveAvailability, - cacheId: ++globalCacheId, + private buildCommonProps(slot: Slot): { + daOutOfRange: boolean; + forkName: ForkName; + } { + const forkName = this.config.getForkName(slot); + return { + forkName, + daOutOfRange: isDaOutOfRange(this.config, forkName, slot, this.clock.currentEpoch), }; - return {fork, blockInputPromise, resolveBlockInput, cachedData}; } - if (fork === ForkName.fulu) { - let resolveAvailability: ((blobs: BlockInputDataColumns) => void) | null = null; - const availabilityPromise = new Promise((resolveCB) => { - resolveAvailability = resolveCB; - }); - - if (resolveAvailability === null) { - throw Error("Promise Constructor was not executed immediately"); + /** + * Use custom implementation of pruneSetToMax to allow for sorting by slot + * and deleting via key/rootHex + */ + private pruneToMaxSize() { + let itemsToDelete = this.blockInputs.size - MAX_BLOCK_INPUT_CACHE_SIZE; + + if (itemsToDelete > 0) { + const sorted = [...this.blockInputs.entries()].sort((a, b) => b[1].slot - a[1].slot); + for (const [rootHex] of sorted) { + this.blockInputs.delete(rootHex); + itemsToDelete--; + if (itemsToDelete <= 0) return; + } } - - const dataColumnsCache = new Map(); - const cachedData: CachedData = { - fork, - dataColumnsCache, - availabilityPromise, - resolveAvailability, - cacheId: ++globalCacheId, - calledRecover: false, - }; - return {fork, blockInputPromise, resolveBlockInput, cachedData}; } +} - throw Error(`Invalid fork=${fork} for getEmptyBlockInputCacheEntry`); +enum SeenBlockInputCacheErrorCode { + WRONG_BLOCK_INPUT_TYPE = "BLOCK_INPUT_CACHE_ERROR_WRONG_BLOCK_INPUT_TYPE", + GOSSIP_BLOB_ALREADY_KNOWN = "BLOCK_INPUT_CACHE_ERROR_GOSSIP_BLOB_ALREADY_KNOWN", + GOSSIP_COLUMN_ALREADY_KNOWN = "BLOCK_INPUT_CACHE_ERROR_GOSSIP_COLUMN_ALREADY_KNOWN", } + +type SeenBlockInputCacheErrorType = + | (LogMetaBasic & { + code: SeenBlockInputCacheErrorCode.WRONG_BLOCK_INPUT_TYPE; + cachedType: DAType; + requestedType: DAType; + }) + | (LogMetaBlobs & { + code: SeenBlockInputCacheErrorCode.GOSSIP_BLOB_ALREADY_KNOWN; + }) + | (LogMetaColumns & { + code: SeenBlockInputCacheErrorCode.GOSSIP_COLUMN_ALREADY_KNOWN; + }); + +class SeenBlockInputCacheError extends LodestarError {} diff --git a/packages/beacon-node/src/metrics/metrics/lodestar.ts b/packages/beacon-node/src/metrics/metrics/lodestar.ts index 4bf65eb7d0f6..d741f68f57ce 100644 --- a/packages/beacon-node/src/metrics/metrics/lodestar.ts +++ b/packages/beacon-node/src/metrics/metrics/lodestar.ts @@ -1,6 +1,5 @@ import {NotReorgedReason} from "@lodestar/fork-choice"; import {BlockInputSource} from "../../chain/blocks/blockInput/index.js"; -import {BlobsSource, BlockSource, DataColumnsSource} from "../../chain/blocks/types.js"; import {JobQueueItemType} from "../../chain/bls/index.js"; import {AttestationErrorCode, BlockErrorCode} from "../../chain/errors/index.js"; import { @@ -11,7 +10,6 @@ import {InsertOutcome} from "../../chain/opPools/types.js"; import {RegenCaller, RegenFnName} from "../../chain/regen/interface.js"; import {ReprocessStatus} from "../../chain/reprocess.js"; import {RejectReason} from "../../chain/seenCache/seenAttestationData.js"; -import {BlockInputAvailabilitySource} from "../../chain/seenCache/seenGossipBlockInput.js"; import {CacheItemType} from "../../chain/stateCache/types.js"; import {OpSource} from "../../chain/validatorMonitor.js"; import {ExecutionPayloadStatus} from "../../execution/index.js"; @@ -543,7 +541,7 @@ export function createLodestarMetrics( help: "Time elapsed between block slot time and the time block received via unknown block sync", buckets: [0.5, 1, 2, 4, 6, 12], }), - resolveAvailabilitySource: register.gauge<{source: BlockInputAvailabilitySource}>({ + resolveAvailabilitySource: register.gauge<{source: BlockInputSource}>({ name: "lodestar_sync_blockinput_availability_source", help: "Total number of blocks whose data availability was resolved", labelNames: ["source"], @@ -745,12 +743,12 @@ export function createLodestarMetrics( }), }, dataColumns: { - bySource: register.gauge<{source: DataColumnsSource}>({ + bySource: register.gauge<{source: BlockInputSource}>({ name: "lodestar_data_columns_by_source", help: "Number of received data columns by source", labelNames: ["source"], }), - elapsedTimeTillReceived: register.histogram<{source: DataColumnsSource}>({ + elapsedTimeTillReceived: register.histogram<{source: BlockInputSource}>({ name: "lodestar_data_column_elapsed_time_till_received_seconds", help: "Time elapsed between block slot time and the time data column received", labelNames: ["source"], @@ -781,12 +779,12 @@ export function createLodestarMetrics( name: "lodestar_import_block_set_head_after_first_interval_total", help: "Total times an imported block is set as head after the first slot interval", }), - bySource: register.gauge<{source: BlockSource}>({ + bySource: register.gauge<{source: BlockInputSource}>({ name: "lodestar_import_block_by_source_total", help: "Total number of imported blocks by source", labelNames: ["source"], }), - blobsBySource: register.gauge<{blobsSource: BlobsSource}>({ + blobsBySource: register.gauge<{blobsSource: BlockInputSource}>({ name: "lodestar_import_blobs_by_source_total", help: "Total number of imported blobs by source", labelNames: ["blobsSource"], diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index f75d08d6a6b2..757976af8d18 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -4,7 +4,7 @@ import {DataAvailabilityStatus} from "@lodestar/state-transition"; import {RootHex, SignedBeaconBlock, Slot, deneb, fulu, phase0} from "@lodestar/types"; import {LodestarError, Logger, prettyBytes, prettyPrintIndices} from "@lodestar/utils"; import {BlockInputSource, IBlockInput} from "../../chain/blocks/blockInput/index.js"; -import {SeenBlockInputCache} from "../../chain/seenCache/seenBlockInput.js"; +import {SeenBlockInput} from "../../chain/seenCache/seenGossipBlockInput.js"; import {INetwork, prettyPrintPeerIdStr} from "../../network/index.js"; import {linspace} from "../../util/numpy.js"; import {PeerIdStr} from "../../util/peerId.js"; @@ -23,7 +23,7 @@ export type DownloadByRangeResponses = { export type DownloadAndCacheByRangeProps = DownloadByRangeRequests & { config: ChainForkConfig; - cache: SeenBlockInputCache; + cache: SeenBlockInput; network: INetwork; logger: Logger; peerIdStr: string; diff --git a/packages/beacon-node/src/sync/utils/downloadByRoot.ts b/packages/beacon-node/src/sync/utils/downloadByRoot.ts index b86429f48d0f..df8c6d475316 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRoot.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRoot.ts @@ -4,7 +4,7 @@ import {signedBlockToSignedHeader} from "@lodestar/state-transition"; import {deneb} from "@lodestar/types"; import {LodestarError, fromHex, prettyBytes, toHex} from "@lodestar/utils"; import {BlockInputSource, DAType, IBlockInput, isBlockInputBlobs} from "../../chain/blocks/blockInput/index.js"; -import {SeenBlockInputCache} from "../../chain/seenCache/seenBlockInput.js"; +import {SeenBlockInput} from "../../chain/seenCache/seenGossipBlockInput.js"; import {IExecutionEngine} from "../../execution/index.js"; import {INetwork} from "../../network/index.js"; import {computeInclusionProof} from "../../util/blobs.js"; @@ -19,7 +19,7 @@ import { export type DownloadBlockInputByRootProps = { config: ChainForkConfig; network: INetwork; - cache: SeenBlockInputCache; + cache: SeenBlockInput; executionEngine?: IExecutionEngine; pending: BlockInputSyncCacheItem; peerIdStr: PeerIdStr; diff --git a/packages/beacon-node/src/util/dataColumns.ts b/packages/beacon-node/src/util/dataColumns.ts index de8852bedfec..553962e53a2e 100644 --- a/packages/beacon-node/src/util/dataColumns.ts +++ b/packages/beacon-node/src/util/dataColumns.ts @@ -1,34 +1,12 @@ import {digest} from "@chainsafe/as-sha256"; import {ChainForkConfig} from "@lodestar/config"; -import { - DATA_COLUMN_SIDECAR_SUBNET_COUNT, - ForkName, - NUMBER_OF_COLUMNS, - NUMBER_OF_CUSTODY_GROUPS, -} from "@lodestar/params"; +import {DATA_COLUMN_SIDECAR_SUBNET_COUNT, NUMBER_OF_COLUMNS, NUMBER_OF_CUSTODY_GROUPS} from "@lodestar/params"; import {signedBlockToSignedHeader} from "@lodestar/state-transition"; import {ColumnIndex, CustodyIndex, SignedBeaconBlockHeader, deneb, fulu} from "@lodestar/types"; import {ssz} from "@lodestar/types"; import {bytesToBigInt} from "@lodestar/utils"; -import { - BlockInputDataColumns, - BlockSource, - DataColumnsCacheMap, - DataColumnsSource, - getBlockInput, - getBlockInputDataColumns, -} from "../chain/blocks/types.js"; -import {ChainEvent, ChainEventEmitter} from "../chain/emitter.js"; -import {BlockInputCacheType} from "../chain/seenCache/seenGossipBlockInput.js"; -import {IExecutionEngine} from "../execution/engine/interface.js"; -import {Metrics} from "../metrics/metrics.js"; import {NodeId} from "../network/subnets/index.js"; -import { - computeKzgCommitmentsInclusionProof, - kzgCommitmentToVersionedHash, - recoverDataColumnSidecars as recover, -} from "./blobs.js"; -import {IClock} from "./clock.js"; +import {computeKzgCommitmentsInclusionProof} from "./blobs.js"; import {kzg} from "./kzg.js"; export enum RecoverResult { @@ -322,192 +300,3 @@ export function getDataColumnSidecarsFromColumnSidecar( cellsAndKzgProofs ); } - -/** - * If we receive more than half of NUMBER_OF_COLUMNS (64) we should recover all remaining columns - */ -export async function recoverDataColumnSidecars( - dataColumnCache: DataColumnsCacheMap, - clock: IClock, - metrics: Metrics | null -): Promise { - const columnCount = dataColumnCache.size; - if (columnCount >= NUMBER_OF_COLUMNS) { - // We have all columns - return RecoverResult.NotAttemptedFull; - } - - if (columnCount < NUMBER_OF_COLUMNS / 2) { - // We don't have enough columns to recover - return RecoverResult.NotAttemptedLessThanHalf; - } - - const partialColumns = dataColumnCache.size; - metrics?.recoverDataColumnSidecars.custodyBeforeReconstruction.set(partialColumns); - const partialSidecars = new Map(); - for (const [columnIndex, {dataColumn}] of dataColumnCache.entries()) { - // the more columns we put, the slower the recover - if (partialSidecars.size >= NUMBER_OF_COLUMNS / 2) { - break; - } - partialSidecars.set(columnIndex, dataColumn); - } - - const timer = metrics?.peerDas.dataColumnsReconstructionTime.startTimer(); - // if this function throws, we catch at the consumer side - const fullSidecars = await recover(partialSidecars); - timer?.(); - if (fullSidecars == null) { - return RecoverResult.Failed; - } - - const firstDataColumn = dataColumnCache.values().next().value?.dataColumn; - if (firstDataColumn == null) { - // should not happen because we checked the size of the cache before this - throw new Error("No data column found in cache to recover from"); - } - - const slot = firstDataColumn.signedBlockHeader.message.slot; - const secFromSlot = clock.secFromSlot(slot); - metrics?.recoverDataColumnSidecars.elapsedTimeTillReconstructed.observe(secFromSlot); - - if (dataColumnCache.size === NUMBER_OF_COLUMNS) { - // either gossip or getBlobsV2 resolved availability while we were recovering - return RecoverResult.SuccessLate; - } - - // We successfully recovered the data columns, update the cache - for (let columnIndex = 0; columnIndex < NUMBER_OF_COLUMNS; columnIndex++) { - if (dataColumnCache.has(columnIndex)) { - // We already have this column - continue; - } - - const sidecar = fullSidecars[columnIndex]; - if (sidecar === undefined) { - throw new Error(`full sidecars is undefined at index ${columnIndex}`); - } - dataColumnCache.set(columnIndex, {dataColumn: sidecar, dataColumnBytes: null}); - metrics?.peerDas.reconstructedColumns.inc(NUMBER_OF_COLUMNS - partialColumns); - } - - return RecoverResult.SuccessResolved; -} - -export function hasSampledDataColumns(custodyConfig: CustodyConfig, dataColumnCache: DataColumnsCacheMap): boolean { - return ( - dataColumnCache.size >= custodyConfig.sampledColumns.length && - custodyConfig.sampledColumns.reduce((acc, columnIndex) => acc && dataColumnCache.has(columnIndex), true) - ); -} - -export async function getDataColumnsFromExecution( - config: ChainForkConfig, - custodyConfig: CustodyConfig, - executionEngine: IExecutionEngine, - emitter: ChainEventEmitter, - blockCache: BlockInputCacheType, - metrics: Metrics | null -): Promise { - if (blockCache.fork !== ForkName.fulu) { - return false; - } - - if (!blockCache.cachedData) { - // this condition should never get hit... just a sanity check - throw new Error("invalid blockCache"); - } - - if (blockCache.cachedData.fork !== ForkName.fulu) { - return false; - } - - // If already have all columns, exit - if (hasSampledDataColumns(custodyConfig, blockCache.cachedData.dataColumnsCache)) { - return true; - } - - let commitments: undefined | Uint8Array[]; - if (blockCache.block) { - const block = blockCache.block as fulu.SignedBeaconBlock; - commitments = block.message.body.blobKzgCommitments; - } else { - const firstSidecar = blockCache.cachedData.dataColumnsCache.values().next().value; - commitments = firstSidecar?.dataColumn.kzgCommitments; - } - - if (!commitments) { - throw new Error("blockInputCache missing both block and cachedData"); - } - - // Return if block has no blobs - if (commitments.length === 0) { - return true; - } - - // Process KZG commitments into versioned hashes - const versionedHashes: Uint8Array[] = commitments.map(kzgCommitmentToVersionedHash); - - // Get blobs from execution engine - metrics?.peerDas.getBlobsV2Requests.inc(); - const timer = metrics?.peerDas.getBlobsV2RequestDuration.startTimer(); - const blobs = await executionEngine.getBlobs(blockCache.fork, versionedHashes); - timer?.(); - - // Execution engine was unable to find one or more blobs - if (blobs === null) { - return false; - } - metrics?.peerDas.getBlobsV2Responses.inc(); - - // Return if we received all data columns while waiting for getBlobs - if (hasSampledDataColumns(custodyConfig, blockCache.cachedData.dataColumnsCache)) { - return true; - } - - let dataColumnSidecars: fulu.DataColumnSidecars; - const cellsAndProofs = getCellsAndProofs(blobs); - if (blockCache.block) { - dataColumnSidecars = getDataColumnSidecarsFromBlock( - config, - blockCache.block as fulu.SignedBeaconBlock, - cellsAndProofs - ); - } else { - const firstSidecar = blockCache.cachedData.dataColumnsCache.values().next().value; - if (!firstSidecar) { - throw new Error("blockInputCache missing both block and data column sidecar"); - } - dataColumnSidecars = getDataColumnSidecarsFromColumnSidecar(firstSidecar.dataColumn, cellsAndProofs); - } - - // Publish columns if and only if subscribed to them - const sampledColumns = custodyConfig.sampledColumns.map((columnIndex) => dataColumnSidecars[columnIndex]); - - // for columns that we already seen, it will be ignored through `ignoreDuplicatePublishError` gossip option - emitter.emit(ChainEvent.publishDataColumns, sampledColumns); - - for (const column of sampledColumns) { - blockCache.cachedData.dataColumnsCache.set(column.index, {dataColumn: column, dataColumnBytes: null}); - } - - const allDataColumns = getBlockInputDataColumns(blockCache.cachedData.dataColumnsCache, custodyConfig.sampledColumns); - // TODO: Add metrics - // metrics?.syncUnknownBlock.resolveAvailabilitySource.inc({source: BlockInputAvailabilitySource.GOSSIP}); - const blockData: BlockInputDataColumns = { - fork: blockCache.cachedData.fork, - ...allDataColumns, - dataColumnsSource: DataColumnsSource.engine, - }; - const partialColumns = blockCache.cachedData.dataColumnsCache.size; - blockCache.cachedData.resolveAvailability(blockData); - metrics?.dataColumns.bySource.inc({source: DataColumnsSource.engine}, NUMBER_OF_COLUMNS - partialColumns); - - if (blockCache.block !== undefined) { - const blockInput = getBlockInput.availableData(config, blockCache.block, BlockSource.gossip, blockData); - - blockCache.resolveBlockInput(blockInput); - } - - return true; -} From ec8727f5dda3861ed92aea007b014c22a819babc Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Fri, 15 Aug 2025 00:41:38 +0700 Subject: [PATCH 009/173] Revert "chore: fix some test build issues" This reverts commit 0be6543c0d79d170a5f092cce3d133c2755e6120. --- .../chain/seenCache/seenBlockInput.test.ts | 11 +- .../unit/sync/utils/downloadByRange.test.ts | 18 +- .../beacon-node/test/utils/blocksAndData.ts | 169 +++++++++++------- 3 files changed, 113 insertions(+), 85 deletions(-) diff --git a/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts b/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts index ed0bcd181674..1016859c49cf 100644 --- a/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts +++ b/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts @@ -1,4 +1,3 @@ -import {generateKeyPair} from "@libp2p/crypto/keys"; import {createChainForkConfig, defaultChainConfig} from "@lodestar/config"; import {ForkName, ForkPostCapella, ForkPostDeneb} from "@lodestar/params"; import {computeStartSlotAtEpoch, signedBlockToSignedHeader} from "@lodestar/state-transition"; @@ -14,12 +13,10 @@ import { } from "../../../../src/chain/blocks/blockInput/index.js"; import {ChainEvent, ChainEventEmitter} from "../../../../src/chain/emitter.js"; import {SeenBlockInputCache} from "../../../../src/chain/seenCache/seenBlockInput.js"; -import {computeNodeIdFromPrivateKey} from "../../../../src/network/subnets/index.js"; import {Clock} from "../../../../src/util/clock.js"; -import {CustodyConfig} from "../../../../src/util/dataColumns.js"; import {testLogger} from "../../../utils/logger.js"; -describe("SeenBlockInputCache", async () => { +describe("SeenBlockInputCache", () => { let cache: SeenBlockInputCache; let abortController: AbortController; let chainEvents: ChainEventEmitter; @@ -35,9 +32,6 @@ describe("SeenBlockInputCache", async () => { ELECTRA_FORK_EPOCH, FULU_FORK_EPOCH, }); - const privateKey = await generateKeyPair("secp256k1"); - const nodeId = computeNodeIdFromPrivateKey(privateKey); - const custodyConfig = new CustodyConfig({config, nodeId}); const slots: Record = { capella: computeStartSlotAtEpoch(CAPELLA_FORK_EPOCH), @@ -110,14 +104,13 @@ describe("SeenBlockInputCache", async () => { } const logger = testLogger(); - beforeEach(async () => { + beforeEach(() => { chainEvents = new ChainEventEmitter(); abortController = new AbortController(); const signal = abortController.signal; const genesisTime = Math.floor(Date.now() / 1000); cache = new SeenBlockInputCache({ config, - custodyConfig, clock: new Clock({config, genesisTime, signal}), chainEvents, signal, diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts index ef9034b86ad7..381e942dc9f2 100644 --- a/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts @@ -1,10 +1,11 @@ +import {ChainForkConfig, createChainForkConfig, defaultChainConfig} from "@lodestar/config"; import {ForkName} from "@lodestar/params"; import {DataAvailabilityStatus} from "@lodestar/state-transition"; import {SignedBeaconBlock, WithBytes, deneb, ssz} from "@lodestar/types"; import {Mock, beforeAll, beforeEach, describe, expect, it, vi} from "vitest"; -import {ChainEventEmitter} from "../../../../src/chain/index.js"; -import {SeenBlockInputCache} from "../../../../src/chain/seenCache/seenBlockInput.js"; -import {INetwork} from "../../../../src/network/index.js"; +import {ChainEventEmitter} from "../../../../../src/chain/index.js"; +import {SeenBlockInputCache} from "../../../../../src/chain/seenCache/seenBlockInput.js"; +import {INetwork} from "../../../../../src/network/index.js"; import { DownloadByRangeRequests, DownloadByRangeResponses, @@ -12,10 +13,10 @@ import { compareBlockByRangeRequestAndResponse, requestByRange, validateRequests, -} from "../../../../src/sync/utils/downloadByRange.js"; -import {Clock} from "../../../../src/util/clock.js"; -import {getMockedLogger} from "../../../../test/mocks/loggerMock.js"; -import {config, custodyConfig, generateChainOfBlockMaybeSidecars, slots} from "../../../utils/blocksAndData.js"; +} from "../../../../../src/sync/range/utils/downloadByRange.js"; +import {Clock} from "../../../../../src/util/clock.js"; +import {getMockedLogger} from "../../../../../test/mocks/loggerMock.js"; +import {buildBatchOfBlockWithBlobs, config, slots} from "../../../../utils/blocksAndData.js"; describe("downloadByRange", () => { const peerIdStr = "0x1234567890abcdef"; @@ -39,7 +40,7 @@ describe("downloadByRange", () => { blocksRequest: [{startSlot, count, step: 1}], blobsRequest: [{count, startSlot}], }; - const blockAndBlobs = generateChainOfBlockMaybeSidecars(ForkName.deneb, startSlot, count, minBlobs, maxBlobs); + const blockAndBlobs = buildBatchOfBlockWithBlobs(ForkName.deneb, startSlot, count, minBlobs, maxBlobs); const blobSidecars = blockAndBlobs.flatMap(({blobSidecars}) => blobSidecars); networkResponse = { blocks: blockAndBlobs.map(({block}) => ({bytes: new Uint8Array(), data: block})), @@ -56,7 +57,6 @@ describe("downloadByRange", () => { const signal = abortController.signal; cache = new SeenBlockInputCache({ config, - custodyConfig, clock: new Clock({config, signal, genesisTime: Math.floor(Date.now() / 1000)}), chainEvents: new ChainEventEmitter(), signal, diff --git a/packages/beacon-node/test/utils/blocksAndData.ts b/packages/beacon-node/test/utils/blocksAndData.ts index 997cd2c8ede1..438ae969c02a 100644 --- a/packages/beacon-node/test/utils/blocksAndData.ts +++ b/packages/beacon-node/test/utils/blocksAndData.ts @@ -1,15 +1,35 @@ import {randomBytes} from "node:crypto"; import {SIGNATURE_LENGTH_UNCOMPRESSED} from "@chainsafe/blst"; -import {BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT} from "@crate-crypto/node-eth-kzg"; -import {generateKeyPair} from "@libp2p/crypto/keys"; -import {ChainForkConfig, createChainForkConfig, defaultChainConfig} from "@lodestar/config"; -import {ForkPostCapella, ForkPostDeneb, ForkPostFulu, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; -import {computeStartSlotAtEpoch, signedBlockToSignedHeader} from "@lodestar/state-transition"; -import {SignedBeaconBlock, Slot, deneb, fulu, ssz} from "@lodestar/types"; +import { + BYTES_PER_BLOB, + BYTES_PER_COMMITMENT, + BYTES_PER_FIELD_ELEMENT, + BYTES_PER_PROOF, +} from "@crate-crypto/node-eth-kzg"; +import {createChainForkConfig, defaultChainConfig} from "@lodestar/config"; +import { + ForkName, + ForkPostCapella, + ForkPostDeneb, + ForkPostFulu, + NUMBER_OF_COLUMNS, + isForkPostDeneb, + isForkPostFulu, +} from "@lodestar/params"; +import { + blindedOrFullBlockToHeader, + blockToHeader, + computeStartSlotAtEpoch, + signedBlockToSignedHeader, +} from "@lodestar/state-transition"; +import {BeaconBlock, SignedBeaconBlock, Slot, deneb, fulu, ssz} from "@lodestar/types"; import {toRootHex} from "@lodestar/utils"; -import {computeNodeIdFromPrivateKey} from "../../src/network/subnets/index.js"; -import {computeInclusionProof} from "../../src/util/blobs.js"; -import {CustodyConfig, getDataColumnSidecarsFromBlock} from "../../src/util/dataColumns.js"; +import {VersionedHashes} from "../../src/execution/index.js"; +import { + computeInclusionProof, + computeKzgCommitmentsInclusionProof, + kzgCommitmentToVersionedHash, +} from "../../src/util/blobs.js"; import {kzg} from "../../src/util/kzg.js"; import {ROOT_SIZE} from "../../src/util/sszBytes.js"; @@ -24,9 +44,6 @@ export const config = createChainForkConfig({ ELECTRA_FORK_EPOCH, FULU_FORK_EPOCH, }); -export const privateKey = await generateKeyPair("secp256k1"); -export const nodeId = computeNodeIdFromPrivateKey(privateKey); -export const custodyConfig = new CustodyConfig({config, nodeId}); export const slots: Record = { capella: computeStartSlotAtEpoch(CAPELLA_FORK_EPOCH), @@ -67,13 +84,13 @@ function generateProposerIndex(min = 0, max = 100_000): number { return generateRandomInt(max, min); } -function generateBeaconBlock({ - config, +function generateBeaconBlock({ + forkName, slot, parentRoot, -}: {config: ChainForkConfig; slot?: Slot; parentRoot?: Uint8Array}): SignedBeaconBlock { - const block = config.getForkTypes(slot ?? 0).SignedBeaconBlock.defaultValue(); - block.message.slot = slot ? slot : 0; +}: {forkName: F; slot?: Slot; parentRoot?: Uint8Array}): SignedBeaconBlock { + const block = ssz[forkName].SignedBeaconBlock.defaultValue(); + block.message.slot = slot ? slot : slots[forkName]; block.message.parentRoot = parentRoot ? parentRoot : Uint8Array.from(randomBytes(ROOT_SIZE)); block.message.stateRoot = Uint8Array.from(randomBytes(ROOT_SIZE)); block.message.proposerIndex = generateProposerIndex(); @@ -81,23 +98,25 @@ function generateBeaconBlock({ return block; } -function generateRoots( - config: ChainForkConfig, - block: SignedBeaconBlock +function generateRoots( + forkName: F, + block: SignedBeaconBlock ): { blockRoot: Uint8Array; rootHex: string; } { - const blockRoot = config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message); + const blockRoot = ssz[forkName].BeaconBlock.hashTreeRoot(block.message); const rootHex = toRootHex(blockRoot); + const signed = ssz[forkName].SignedBeaconBlock.defaultValue(); return { + block: signed, blockRoot, rootHex, }; } function generateBlobSidecars( - config: ChainForkConfig, + forkName: ForkPostDeneb, block: SignedBeaconBlock, count: number, oomProtection = false @@ -111,16 +130,12 @@ function generateBlobSidecars( const signedBlockHeader = signedBlockToSignedHeader(config, block); for (let index = 0; index < count; index++) { - const blobSidecar = ssz.deneb.BlobSidecar.defaultValue(); + const blobSidecar = ssz[forkName].BlobSidecar.defaultValue(); blobSidecar.index = index; blobSidecar.signedBlockHeader = signedBlockHeader; - blobSidecar.blob = generateRandomBlob(); + blobSidecar.blob = generateRandomBlob(index); blobSidecar.kzgCommitment = kzg.blobToKzgCommitment(blobSidecar.blob); - blobSidecar.kzgCommitmentInclusionProof = computeInclusionProof( - config.getForkName(block.message.slot), - block.message.body, - index - ); + blobSidecar.kzgCommitmentInclusionProof = computeInclusionProof(forkName, block.message.body, index); blobSidecar.kzgProof = kzg.computeBlobKzgProof(blobSidecar.blob, blobSidecar.kzgCommitment); if (oomProtection) { @@ -142,19 +157,41 @@ function generateBlobSidecars( } function generateColumnSidecars( - config: ChainForkConfig, + forkName: F, block: SignedBeaconBlock, - numberOfBlobs: number + numberOfBlobs: number, + oomProtection = false ): { block: SignedBeaconBlock; columnSidecars: fulu.DataColumnSidecars; } { const blobs = Array.from({length: numberOfBlobs}, () => generateRandomBlob()); const kzgCommitments = blobs.map((blob) => kzg.blobToKzgCommitment(blob)); - block.message.body.blobKzgCommitments = kzgCommitments; + block.body.blobKzgCommitments = kzgCommitments; + const signedBlockHeader = signedBlockToSignedHeader(config, block); const cellsAndProofs = blobs.map((blob) => kzg.computeCellsAndKzgProofs(blob)); - const columnSidecars = getDataColumnSidecarsFromBlock(config, block, cellsAndProofs); + const kzgCommitmentsInclusionProof = Array.from({length: blobs.length}, () => + computeKzgCommitmentsInclusionProof(forkName, block.body) + ); + + const columnSidecars = Array.from({length: NUMBER_OF_COLUMNS}, (_, columnIndex) => { + const column = oomProtection + ? [] + : Array.from({length: blobs.length}, (_, rowNumber) => cellsAndProofs[rowNumber].cells[columnIndex]); + const kzgProofs = Array.from( + {length: blobs.length}, + (_, rowNumber) => cellsAndProofs[rowNumber].proofs[columnIndex] + ); + return { + index: columnIndex, + column, + kzgCommitments, + kzgProofs, + signedBlockHeader, + kzgCommitmentsInclusionProof, + }; + }); return { block, @@ -168,19 +205,19 @@ export type BlockTestSet = { rootHex: string; }; -export function generateChainOfBlocks({ - config, +export function generateChainOfBlocks({ + forkName, count, -}: {config: ChainForkConfig; count: number}): BlockTestSet[] { +}: {forkName: F; count: number}): BlockTestSet[] { let parentRoot = Uint8Array.from(randomBytes(ROOT_SIZE)); - - const blocks: BlockTestSet[] = []; - for (let slot = 0; slot < count; slot++) { - const block = generateBeaconBlock({config, parentRoot, slot}); - const {blockRoot, rootHex} = generateRoots(config, block); + let slot = slots[forkName]; + const blocks: BlockTestSet[] = []; + for (; slot < slot + count; slot++) { + const block = generateBeaconBlock({forkName, parentRoot, slot}); + const {blockRoot, rootHex} = generateRoots(forkName, block); parentRoot = block.message.parentRoot; blocks.push({ - block: block as SignedBeaconBlock, + block, blockRoot, rootHex, }); @@ -194,22 +231,23 @@ export type BlockWithColumnsTestSet = BlockTestSet & columnSidecars: fulu.DataColumnSidecars; }; -export function generateBlockWithBlobSidecars({ - config, +export function generateBlockWithBlobSidecars({ + forkName, slot, parentRoot, + oomProtection = false, }: { - config: ChainForkConfig; + forkName: F; parentRoot?: Uint8Array; slot?: Slot; oomProtection?: boolean; -}): BlockWithBlobsTestSet { +}): BlockWithBlobsTestSet { const {block, blobSidecars} = generateBlobSidecars( - config, - generateBeaconBlock({config, parentRoot, slot}) as SignedBeaconBlock, + forkName, + generateBeaconBlock({forkName, parentRoot, slot}), generateRandomInt(1, 6) ); - const {blockRoot, rootHex} = generateRoots(config, block); + const {blockRoot, rootHex} = generateRoots(forkName, block); return { block, blobSidecars, @@ -218,22 +256,23 @@ export function generateBlockWithBlobSidecars({ }; } -export function generateBlockWithColumnSidecars({ - config, +export function generateBlockWithColumnSidecars({ + forkName, slot, parentRoot, + oomProtection = false, }: { - config: ChainForkConfig; + forkName: F; parentRoot?: Uint8Array; slot?: Slot; oomProtection?: boolean; -}): BlockWithColumnsTestSet { +}): BlockWithColumnsTestSet { const {block, columnSidecars} = generateColumnSidecars( - config, - generateBeaconBlock({config, parentRoot, slot}) as SignedBeaconBlock, + forkName, + generateBeaconBlock({forkName, parentRoot, slot}), generateRandomInt(1, 6) ); - const {blockRoot, rootHex} = generateRoots(config, block); + const {blockRoot, rootHex} = generateRoots(forkName, block); return { block, columnSidecars, @@ -245,29 +284,26 @@ export function generateBlockWithColumnSidecars({ export type BlocksWithSidecars = F extends ForkPostFulu ? BlockWithColumnsTestSet[] : BlockWithBlobsTestSet[]; - export function generateChainOfBlocksWithBlobs({ - config, forkName, count, oomProtection = false, }: { - config: ChainForkConfig; forkName: F; count: number; oomProtection?: boolean; }): BlocksWithSidecars { let parentRoot = Uint8Array.from(randomBytes(ROOT_SIZE)); let slot = slots[forkName]; - const blocks: BlocksWithSidecars = []; + const blocks: BlocksWithSidecars = []; for (; slot < slot + count; slot++) { const blockWithSidecars = isForkPostFulu(forkName) - ? generateBlockWithColumnSidecars({config, parentRoot, slot, oomProtection}) - : generateBlockWithBlobSidecars({config, parentRoot, slot, oomProtection}); + ? generateBlockWithColumnSidecars({forkName, parentRoot, slot, oomProtection}) + : generateBlockWithBlobSidecars({forkName, parentRoot, slot, oomProtection}); parentRoot = blockWithSidecars.blockRoot; - blocks.push(blockWithSidecars as any); + blocks.push(blockWithSidecars); } - return blocks as BlocksWithSidecars; + return blocks; } export type ChainOfBlockMaybeSidecars = F extends ForkPostFulu @@ -275,14 +311,13 @@ export type ChainOfBlockMaybeSidecars = F extends For : F extends ForkPostDeneb ? BlockWithBlobsTestSet[] : BlockTestSet[]; - export function generateChainOfBlockMaybeSidecars( forkName: F, count: number, oomProtection = false ): ChainOfBlockMaybeSidecars { if (isForkPostDeneb(forkName)) { - return generateChainOfBlocksWithBlobs({config, forkName, count, oomProtection}) as ChainOfBlockMaybeSidecars; + return generateChainOfBlocksWithBlobs({forkName, count, oomProtection}); } - return generateChainOfBlocks({config, count}) as ChainOfBlockMaybeSidecars; + return generateChainOfBlocks({forkName, count}); } From 3b045989a411dedc2fbcb0b1f710b25b2d0b017c Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Fri, 15 Aug 2025 01:08:21 +0700 Subject: [PATCH 010/173] chore: fix some build issues (reverts some of the details from previous attempt) --- .../chain/seenCache/seenBlockInput.test.ts | 9 +++- .../unit/sync/utils/downloadByRange.test.ts | 18 ++++---- .../beacon-node/test/utils/blocksAndData.ts | 45 ++++++++----------- 3 files changed, 36 insertions(+), 36 deletions(-) diff --git a/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts b/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts index 1016859c49cf..5f4011bac9e6 100644 --- a/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts +++ b/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts @@ -1,3 +1,4 @@ +import {generateKeyPair} from "@libp2p/crypto/keys"; import {createChainForkConfig, defaultChainConfig} from "@lodestar/config"; import {ForkName, ForkPostCapella, ForkPostDeneb} from "@lodestar/params"; import {computeStartSlotAtEpoch, signedBlockToSignedHeader} from "@lodestar/state-transition"; @@ -13,10 +14,12 @@ import { } from "../../../../src/chain/blocks/blockInput/index.js"; import {ChainEvent, ChainEventEmitter} from "../../../../src/chain/emitter.js"; import {SeenBlockInputCache} from "../../../../src/chain/seenCache/seenBlockInput.js"; +import {computeNodeIdFromPrivateKey} from "../../../../src/network/subnets/index.js"; import {Clock} from "../../../../src/util/clock.js"; +import {CustodyConfig} from "../../../../src/util/dataColumns.js"; import {testLogger} from "../../../utils/logger.js"; -describe("SeenBlockInputCache", () => { +describe("SeenBlockInputCache", async () => { let cache: SeenBlockInputCache; let abortController: AbortController; let chainEvents: ChainEventEmitter; @@ -32,6 +35,9 @@ describe("SeenBlockInputCache", () => { ELECTRA_FORK_EPOCH, FULU_FORK_EPOCH, }); + const privateKey = await generateKeyPair("secp256k1"); + const nodeId = computeNodeIdFromPrivateKey(privateKey); + const custodyConfig = new CustodyConfig({config, nodeId}); const slots: Record = { capella: computeStartSlotAtEpoch(CAPELLA_FORK_EPOCH), @@ -111,6 +117,7 @@ describe("SeenBlockInputCache", () => { const genesisTime = Math.floor(Date.now() / 1000); cache = new SeenBlockInputCache({ config, + custodyConfig, clock: new Clock({config, genesisTime, signal}), chainEvents, signal, diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts index 381e942dc9f2..ef9034b86ad7 100644 --- a/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts @@ -1,11 +1,10 @@ -import {ChainForkConfig, createChainForkConfig, defaultChainConfig} from "@lodestar/config"; import {ForkName} from "@lodestar/params"; import {DataAvailabilityStatus} from "@lodestar/state-transition"; import {SignedBeaconBlock, WithBytes, deneb, ssz} from "@lodestar/types"; import {Mock, beforeAll, beforeEach, describe, expect, it, vi} from "vitest"; -import {ChainEventEmitter} from "../../../../../src/chain/index.js"; -import {SeenBlockInputCache} from "../../../../../src/chain/seenCache/seenBlockInput.js"; -import {INetwork} from "../../../../../src/network/index.js"; +import {ChainEventEmitter} from "../../../../src/chain/index.js"; +import {SeenBlockInputCache} from "../../../../src/chain/seenCache/seenBlockInput.js"; +import {INetwork} from "../../../../src/network/index.js"; import { DownloadByRangeRequests, DownloadByRangeResponses, @@ -13,10 +12,10 @@ import { compareBlockByRangeRequestAndResponse, requestByRange, validateRequests, -} from "../../../../../src/sync/range/utils/downloadByRange.js"; -import {Clock} from "../../../../../src/util/clock.js"; -import {getMockedLogger} from "../../../../../test/mocks/loggerMock.js"; -import {buildBatchOfBlockWithBlobs, config, slots} from "../../../../utils/blocksAndData.js"; +} from "../../../../src/sync/utils/downloadByRange.js"; +import {Clock} from "../../../../src/util/clock.js"; +import {getMockedLogger} from "../../../../test/mocks/loggerMock.js"; +import {config, custodyConfig, generateChainOfBlockMaybeSidecars, slots} from "../../../utils/blocksAndData.js"; describe("downloadByRange", () => { const peerIdStr = "0x1234567890abcdef"; @@ -40,7 +39,7 @@ describe("downloadByRange", () => { blocksRequest: [{startSlot, count, step: 1}], blobsRequest: [{count, startSlot}], }; - const blockAndBlobs = buildBatchOfBlockWithBlobs(ForkName.deneb, startSlot, count, minBlobs, maxBlobs); + const blockAndBlobs = generateChainOfBlockMaybeSidecars(ForkName.deneb, startSlot, count, minBlobs, maxBlobs); const blobSidecars = blockAndBlobs.flatMap(({blobSidecars}) => blobSidecars); networkResponse = { blocks: blockAndBlobs.map(({block}) => ({bytes: new Uint8Array(), data: block})), @@ -57,6 +56,7 @@ describe("downloadByRange", () => { const signal = abortController.signal; cache = new SeenBlockInputCache({ config, + custodyConfig, clock: new Clock({config, signal, genesisTime: Math.floor(Date.now() / 1000)}), chainEvents: new ChainEventEmitter(), signal, diff --git a/packages/beacon-node/test/utils/blocksAndData.ts b/packages/beacon-node/test/utils/blocksAndData.ts index 438ae969c02a..c77b4a56d957 100644 --- a/packages/beacon-node/test/utils/blocksAndData.ts +++ b/packages/beacon-node/test/utils/blocksAndData.ts @@ -1,14 +1,9 @@ import {randomBytes} from "node:crypto"; import {SIGNATURE_LENGTH_UNCOMPRESSED} from "@chainsafe/blst"; +import {BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT} from "@crate-crypto/node-eth-kzg"; +import {generateKeyPair} from "@libp2p/crypto/keys"; +import {ChainForkConfig, createChainForkConfig, defaultChainConfig} from "@lodestar/config"; import { - BYTES_PER_BLOB, - BYTES_PER_COMMITMENT, - BYTES_PER_FIELD_ELEMENT, - BYTES_PER_PROOF, -} from "@crate-crypto/node-eth-kzg"; -import {createChainForkConfig, defaultChainConfig} from "@lodestar/config"; -import { - ForkName, ForkPostCapella, ForkPostDeneb, ForkPostFulu, @@ -16,20 +11,12 @@ import { isForkPostDeneb, isForkPostFulu, } from "@lodestar/params"; -import { - blindedOrFullBlockToHeader, - blockToHeader, - computeStartSlotAtEpoch, - signedBlockToSignedHeader, -} from "@lodestar/state-transition"; -import {BeaconBlock, SignedBeaconBlock, Slot, deneb, fulu, ssz} from "@lodestar/types"; +import {computeStartSlotAtEpoch, signedBlockToSignedHeader} from "@lodestar/state-transition"; +import {SignedBeaconBlock, Slot, deneb, fulu, ssz} from "@lodestar/types"; import {toRootHex} from "@lodestar/utils"; -import {VersionedHashes} from "../../src/execution/index.js"; -import { - computeInclusionProof, - computeKzgCommitmentsInclusionProof, - kzgCommitmentToVersionedHash, -} from "../../src/util/blobs.js"; +import {computeNodeIdFromPrivateKey} from "../../src/network/subnets/index.js"; +import {computeInclusionProof, computeKzgCommitmentsInclusionProof} from "../../src/util/blobs.js"; +import {CustodyConfig, getDataColumnSidecarsFromBlock} from "../../src/util/dataColumns.js"; import {kzg} from "../../src/util/kzg.js"; import {ROOT_SIZE} from "../../src/util/sszBytes.js"; @@ -44,6 +31,9 @@ export const config = createChainForkConfig({ ELECTRA_FORK_EPOCH, FULU_FORK_EPOCH, }); +export const privateKey = await generateKeyPair("secp256k1"); +export const nodeId = computeNodeIdFromPrivateKey(privateKey); +export const custodyConfig = new CustodyConfig({config, nodeId}); export const slots: Record = { capella: computeStartSlotAtEpoch(CAPELLA_FORK_EPOCH), @@ -94,6 +84,7 @@ function generateBeaconBlock({ block.message.parentRoot = parentRoot ? parentRoot : Uint8Array.from(randomBytes(ROOT_SIZE)); block.message.stateRoot = Uint8Array.from(randomBytes(ROOT_SIZE)); block.message.proposerIndex = generateProposerIndex(); + // signature is obviously not valid so can generate it now instead of after commitments are attached block.signature = Uint8Array.from(randomBytes(SIGNATURE_LENGTH_UNCOMPRESSED)); return block; } @@ -107,9 +98,7 @@ function generateRoots( } { const blockRoot = ssz[forkName].BeaconBlock.hashTreeRoot(block.message); const rootHex = toRootHex(blockRoot); - const signed = ssz[forkName].SignedBeaconBlock.defaultValue(); return { - block: signed, blockRoot, rootHex, }; @@ -167,7 +156,7 @@ function generateColumnSidecars( } { const blobs = Array.from({length: numberOfBlobs}, () => generateRandomBlob()); const kzgCommitments = blobs.map((blob) => kzg.blobToKzgCommitment(blob)); - block.body.blobKzgCommitments = kzgCommitments; + block.message.body.blobKzgCommitments = kzgCommitments; const signedBlockHeader = signedBlockToSignedHeader(config, block); const cellsAndProofs = blobs.map((blob) => kzg.computeCellsAndKzgProofs(blob)); @@ -245,7 +234,8 @@ export function generateBlockWithBlobSidecars({ const {block, blobSidecars} = generateBlobSidecars( forkName, generateBeaconBlock({forkName, parentRoot, slot}), - generateRandomInt(1, 6) + generateRandomInt(1, 6), + oomProtection ); const {blockRoot, rootHex} = generateRoots(forkName, block); return { @@ -270,7 +260,8 @@ export function generateBlockWithColumnSidecars({ const {block, columnSidecars} = generateColumnSidecars( forkName, generateBeaconBlock({forkName, parentRoot, slot}), - generateRandomInt(1, 6) + generateRandomInt(1, 6), + oomProtection ); const {blockRoot, rootHex} = generateRoots(forkName, block); return { @@ -284,6 +275,7 @@ export function generateBlockWithColumnSidecars({ export type BlocksWithSidecars = F extends ForkPostFulu ? BlockWithColumnsTestSet[] : BlockWithBlobsTestSet[]; + export function generateChainOfBlocksWithBlobs({ forkName, count, @@ -311,6 +303,7 @@ export type ChainOfBlockMaybeSidecars = F extends For : F extends ForkPostDeneb ? BlockWithBlobsTestSet[] : BlockTestSet[]; + export function generateChainOfBlockMaybeSidecars( forkName: F, count: number, From c1bf66bbd234f3c24dccf365b14333223e3052bb Mon Sep 17 00:00:00 2001 From: Cayman Date: Fri, 15 Aug 2025 08:06:14 -0400 Subject: [PATCH 011/173] chore: fix up src/api --- .../src/api/impl/beacon/blocks/index.ts | 85 ++++++++++--------- .../chain/seenCache/seenGossipBlockInput.ts | 5 +- 2 files changed, 46 insertions(+), 44 deletions(-) diff --git a/packages/beacon-node/src/api/impl/beacon/blocks/index.ts b/packages/beacon-node/src/api/impl/beacon/blocks/index.ts index edaf126af244..2461ac15245a 100644 --- a/packages/beacon-node/src/api/impl/beacon/blocks/index.ts +++ b/packages/beacon-node/src/api/impl/beacon/blocks/index.ts @@ -28,7 +28,7 @@ import { sszTypesFor, } from "@lodestar/types"; import {fromHex, sleep, toHex, toRootHex} from "@lodestar/utils"; -import {BlockInput} from "../../../../chain/blocks/blockInput/index.js"; +import {BlockInputSource, isBlockInputBlobs, isBlockInputColumns} from "../../../../chain/blocks/blockInput/index.js"; import {ImportBlockOpts} from "../../../../chain/blocks/types.js"; import {verifyBlocksInEpoch} from "../../../../chain/blocks/verifyBlock.js"; import {BeaconChain} from "../../../../chain/chain.js"; @@ -80,10 +80,14 @@ export function getBeaconBlockApi({ const fork = config.getForkName(slot); const blockRoot = toRootHex(chain.config.getForkTypes(slot).BeaconBlock.hashTreeRoot(signedBlock.message)); - let blockForImport: BlockInput, blobSidecars: deneb.BlobSidecars, dataColumnSidecars: fulu.DataColumnSidecars; + const blockForImport = chain.seenBlockInputCache.getByBlock({ + block: signedBlock, + source: BlockInputSource.api, + seenTimestampSec, + }); + let blobSidecars: deneb.BlobSidecars, dataColumnSidecars: fulu.DataColumnSidecars; if (isDenebBlockContents(signedBlockContents)) { - let blockData: BlockInputAvailableData; if (isForkPostFulu(fork)) { const timer = metrics?.peerDas.dataColumnSidecarComputationTime.startTimer(); // If the block was produced by this node, we will already have computed cells @@ -101,30 +105,32 @@ export function getBeaconBlockApi({ cellsAndProofs ); timer?.(); - blockData = { - fork, - dataColumns: dataColumnSidecars, - dataColumnsBytes: dataColumnSidecars.map(() => null), - dataColumnsSource: DataColumnsSource.api, - } as BlockInputDataColumns; blobSidecars = []; } else if (isForkPostDeneb(fork)) { blobSidecars = getBlobSidecars(config, signedBlock, signedBlockContents.blobs, signedBlockContents.kzgProofs); - blockData = { - fork, - blobs: blobSidecars, - blobsSource: BlobsSource.api, - } as BlockInputBlobs; dataColumnSidecars = []; } else { throw Error(`Invalid data fork=${fork} for publish`); } - - blockForImport = getBlockInput.availableData(config, signedBlock, BlockSource.api, blockData); } else { blobSidecars = []; dataColumnSidecars = []; - blockForImport = getBlockInput.preData(config, signedBlock, BlockSource.api); + } + + if (dataColumnSidecars.length > 0 && isBlockInputColumns(blockForImport)) { + for (const dataColumnSidecar of dataColumnSidecars) { + blockForImport.addColumn({ + blockRootHex: blockRoot, + columnSidecar: dataColumnSidecar, + source: BlockInputSource.api, + seenTimestampSec, + }); + } + } + if (blobSidecars.length > 0 && isBlockInputBlobs(blockForImport)) { + for (const blobSidecar of blobSidecars) { + blockForImport.addBlob({blockRootHex: blockRoot, blobSidecar, source: BlockInputSource.api, seenTimestampSec}); + } } // check what validations have been requested before broadcasting and publishing the block @@ -232,18 +238,16 @@ export function getBeaconBlockApi({ // Simple implementation of a pending block queue. Keeping the block here recycles the API logic, and keeps the // REST request promise without any extra infrastructure. - const msToBlockSlot = - computeTimeAtSlot(config, blockForImport.block.message.slot, chain.genesisTime) * 1000 - Date.now(); + const msToBlockSlot = computeTimeAtSlot(config, slot, chain.genesisTime) * 1000 - Date.now(); if (msToBlockSlot <= MAX_API_CLOCK_DISPARITY_MS && msToBlockSlot > 0) { // If block is a bit early, hold it in a promise. Equivalent to a pending queue. await sleep(msToBlockSlot); } // TODO: Validate block - const delaySec = - seenTimestampSec - (chain.genesisTime + blockForImport.block.message.slot * config.SECONDS_PER_SLOT); + const delaySec = seenTimestampSec - (chain.genesisTime + slot * config.SECONDS_PER_SLOT); metrics?.gossipBlock.elapsedTimeTillReceived.observe({source: OpSource.api}, delaySec); - chain.validatorMonitor?.registerBeaconBlock(OpSource.api, delaySec, blockForImport.block.message); + chain.validatorMonitor?.registerBeaconBlock(OpSource.api, delaySec, signedBlock.message); chain.logger.info("Publishing block", valLogMeta); const publishPromises = [ @@ -302,28 +306,25 @@ export function getBeaconBlockApi({ chain.emitter.emit(routes.events.EventType.blockGossip, {slot, block: blockRoot}); } - if (blockForImport.type === BlockInputType.availableData) { - if (isForkPostFulu(blockForImport.blockData.fork)) { - const {dataColumns} = blockForImport.blockData as BlockInputDataColumns; - metrics?.dataColumns.bySource.inc({source: DataColumnsSource.api}, dataColumns.length); - - if (chain.emitter.listenerCount(routes.events.EventType.dataColumnSidecar)) { - for (const dataColumnSidecar of dataColumns) { - chain.emitter.emit(routes.events.EventType.dataColumnSidecar, { - blockRoot, - slot, - index: dataColumnSidecar.index, - kzgCommitments: dataColumnSidecar.kzgCommitments.map(toHex), - }); - } + if (isBlockInputColumns(blockForImport)) { + const dataColumns = blockForImport.getAllColumns(); + metrics?.dataColumns.bySource.inc({source: BlockInputSource.api}, dataColumns.length); + + if (chain.emitter.listenerCount(routes.events.EventType.dataColumnSidecar)) { + for (const dataColumnSidecar of dataColumns) { + chain.emitter.emit(routes.events.EventType.dataColumnSidecar, { + blockRoot, + slot, + index: dataColumnSidecar.index, + kzgCommitments: dataColumnSidecar.kzgCommitments.map(toHex), + }); } - } else if ( - isForkPostDeneb(blockForImport.blockData.fork) && - chain.emitter.listenerCount(routes.events.EventType.blobSidecar) - ) { - const {blobs} = blockForImport.blockData as BlockInputBlobs; + } + } else if (isBlockInputBlobs(blockForImport) && chain.emitter.listenerCount(routes.events.EventType.blobSidecar)) { + const blobSidecars = blockForImport.getBlobs(); - for (const blobSidecar of blobs) { + if (chain.emitter.listenerCount(routes.events.EventType.blobSidecar)) { + for (const blobSidecar of blobSidecars) { const {index, kzgCommitment} = blobSidecar; chain.emitter.emit(routes.events.EventType.blobSidecar, { blockRoot, diff --git a/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts b/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts index b62c8db8a770..edf6903ba08f 100644 --- a/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts +++ b/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts @@ -8,6 +8,7 @@ import {Metrics} from "../../metrics/metrics.js"; import {IClock} from "../../util/clock.js"; import {CustodyConfig} from "../../util/dataColumns.js"; import { + BlockInput, BlockInputBlobs, BlockInputColumns, BlockInputPreData, @@ -142,7 +143,7 @@ export class SeenBlockInput { this.pruneToMaxSize(); }; - getByBlock({block, source, seenTimestampSec, peerIdStr}: SourceMeta & {block: SignedBeaconBlock}): IBlockInput { + getByBlock({block, source, seenTimestampSec, peerIdStr}: SourceMeta & {block: SignedBeaconBlock}): BlockInput { const blockRoot = this.config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message); const blockRootHex = toRootHex(blockRoot); @@ -201,7 +202,7 @@ export class SeenBlockInput { this.metrics?.seenCache.blockInput.duplicateBlockCount.inc({source}); } - return blockInput; + return blockInput as BlockInput; } getByBlob( From 17846cb575b5252a6497099eb3b5c4f4cedbdd40 Mon Sep 17 00:00:00 2001 From: Cayman Date: Fri, 15 Aug 2025 08:31:45 -0400 Subject: [PATCH 012/173] chore: halfway thru gossip handlers --- packages/beacon-node/src/network/events.ts | 5 +- .../src/network/processor/gossipHandlers.ts | 145 +++++++----------- 2 files changed, 61 insertions(+), 89 deletions(-) diff --git a/packages/beacon-node/src/network/events.ts b/packages/beacon-node/src/network/events.ts index feae977e20e4..d8fc63dc3b13 100644 --- a/packages/beacon-node/src/network/events.ts +++ b/packages/beacon-node/src/network/events.ts @@ -1,7 +1,7 @@ import {EventEmitter} from "node:events"; import {PeerId, TopicValidatorResult} from "@libp2p/interface"; import {CustodyIndex, RootHex, Status} from "@lodestar/types"; -import {BlockInput, NullBlockInput} from "../chain/blocks/types.js"; +import {BlockInput} from "../chain/blocks/blockInput/index.js"; import {PeerIdStr} from "../util/peerId.js"; import {StrictEventEmitterSingleArg} from "../util/strictEvents.js"; import {EventDirection} from "../util/workerEvents.js"; @@ -35,9 +35,8 @@ export type NetworkEventData = { }; [NetworkEvent.peerDisconnected]: {peer: PeerIdStr}; [NetworkEvent.reqRespRequest]: {request: RequestTypedContainer; peer: PeerId}; - [NetworkEvent.unknownBlockParent]: {blockInput: BlockInput; peer: PeerIdStr}; + [NetworkEvent.unknownBlockInput]: {blockInput: BlockInput; peer: PeerIdStr}; [NetworkEvent.unknownBlock]: {rootHex: RootHex; peer?: PeerIdStr}; - [NetworkEvent.unknownBlockInput]: {blockInput: BlockInput | NullBlockInput; peer?: PeerIdStr}; [NetworkEvent.pendingGossipsubMessage]: PendingGossipsubMessage; [NetworkEvent.gossipMessageValidationResult]: { msgId: string; diff --git a/packages/beacon-node/src/network/processor/gossipHandlers.ts b/packages/beacon-node/src/network/processor/gossipHandlers.ts index b8997dc70bf4..2bf2f40c70af 100644 --- a/packages/beacon-node/src/network/processor/gossipHandlers.ts +++ b/packages/beacon-node/src/network/processor/gossipHandlers.ts @@ -15,15 +15,8 @@ import { sszTypesFor, } from "@lodestar/types"; import {LogLevel, Logger, prettyBytes, toHex, toRootHex} from "@lodestar/utils"; -import { - BlobSidecarValidation, - BlockInput, - BlockInputAvailableData, - BlockInputType, - DataColumnsSource, - GossipedInputType, - NullBlockInput, -} from "../../chain/blocks/types.js"; +import {BlockInput, BlockInputSource} from "../../chain/blocks/blockInput/index.js"; +import {BlobSidecarValidation, GossipedInputType} from "../../chain/blocks/types.js"; import { AttestationError, AttestationErrorCode, @@ -135,31 +128,20 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand // always set block to seen cache for all forks so that we don't need to download it // TODO: validate block before adding to cache // tracked in https://github.com/ChainSafe/lodestar/issues/7957 - const blockInputRes = chain.seenGossipBlockInput.getGossipBlockInput( - config, - { - type: GossipedInputType.block, - signedBlock, - }, - metrics - ); - const blockInput = blockInputRes.blockInput; - // blockInput can't be returned null, improve by enforcing via return types - if (blockInput.block === null) { - throw Error( - `Invalid null blockInput returned by getGossipBlockInput for type=${GossipedInputType.block} blockHex=${blockShortHex} slot=${slot}` - ); - } - const blockInputMeta = - config.getForkSeq(signedBlock.message.slot) >= ForkSeq.deneb ? blockInputRes.blockInputMeta : {}; + const blockInput = chain.seenGossipBlockInput.getByBlock({ + block: signedBlock, + source: BlockInputSource.gossip, + seenTimestampSec, + peerIdStr, + }); + + const blockInputMeta = blockInput.getLogMeta(); const logCtx = { - slot: slot, - root: blockShortHex, + ...blockInputMeta, currentSlot: chain.clock.currentSlot, peerId: peerIdStr, delaySec, - ...blockInputMeta, recvToValLatency, }; @@ -186,7 +168,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand // Don't trigger this yet if full block and blobs haven't arrived yet if (e.type.code === BlockErrorCode.PARENT_UNKNOWN && blockInput !== null) { logger.debug("Gossip block has error", {slot, root: blockShortHex, code: e.type.code}); - events.emit(NetworkEvent.unknownBlockParent, {blockInput, peer: peerIdStr}); + events.emit(NetworkEvent.unknownBlockInput, {blockInput, peer: peerIdStr}); } if (e.action === GossipAction.REJECT) { @@ -203,7 +185,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand subnet: SubnetID, peerIdStr: string, seenTimestampSec: number - ): Promise { + ): Promise { const blobBlockHeader = blobSidecar.signedBlockHeader.message; const slot = blobBlockHeader.slot; const fork = config.getForkName(slot); @@ -215,14 +197,12 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand try { await validateGossipBlobSidecar(fork, chain, blobSidecar, subnet); - const {blockInput, blockInputMeta} = chain.seenGossipBlockInput.getGossipBlockInput( - config, - { - type: GossipedInputType.blob, - blobSidecar, - }, - metrics - ); + const blockInput = chain.seenGossipBlockInput.getByBlob({ + blobSidecar, + source: BlockInputSource.gossip, + seenTimestampSec, + peerIdStr, + }); const recvToValidation = Date.now() / 1000 - seenTimestampSec; const validationTime = recvToValidation - recvToValLatency; @@ -240,13 +220,11 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand } logger.debug("Received gossip blob", { - slot: slot, - root: blockShortHex, + ...blockInput.getLogMeta(), currentSlot: chain.clock.currentSlot, peerId: peerIdStr, delaySec, subnet, - ...blockInputMeta, recvToValLatency, recvToValidation, validationTime, @@ -276,33 +254,29 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand async function validateBeaconDataColumn( dataColumnSidecar: fulu.DataColumnSidecar, - dataColumnBytes: Uint8Array, + _dataColumnBytes: Uint8Array, gossipSubnet: SubnetID, peerIdStr: string, seenTimestampSec: number - ): Promise { + ): Promise { metrics?.peerDas.dataColumnSidecarProcessingRequests.inc(); const verificationTimer = metrics?.peerDas.dataColumnSidecarGossipVerificationTime.startTimer(); const dataColumnBlockHeader = dataColumnSidecar.signedBlockHeader.message; const slot = dataColumnBlockHeader.slot; const blockRootHex = toRootHex(ssz.phase0.BeaconBlockHeader.hashTreeRoot(dataColumnBlockHeader)); - const blockShortHex = prettyBytes(blockRootHex); const delaySec = chain.clock.secFromSlot(slot, seenTimestampSec); const recvToValLatency = Date.now() / 1000 - seenTimestampSec; try { await validateGossipDataColumnSidecar(chain, dataColumnSidecar, gossipSubnet, metrics); - const {blockInput, blockInputMeta} = chain.seenGossipBlockInput.getGossipBlockInput( - config, - { - type: GossipedInputType.dataColumn, - dataColumnSidecar, - dataColumnBytes, - }, - metrics - ); + const blockInput = chain.seenGossipBlockInput.getByColumn({ + columnSidecar: dataColumnSidecar, + source: BlockInputSource.gossip, + seenTimestampSec, + peerIdStr, + }); const recvToValidation = Date.now() / 1000 - seenTimestampSec; const validationTime = recvToValidation - recvToValLatency; @@ -319,14 +293,12 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand }); logger.debug("Received gossip dataColumn", { - slot: slot, - root: blockShortHex, + ...blockInput.getLogMeta(), currentSlot: chain.clock.currentSlot, peerId: peerIdStr, delaySec, gossipSubnet, columnIndex: dataColumnSidecar.index, - ...blockInputMeta, recvToValLatency, recvToValidation, validationTime, @@ -350,20 +322,21 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand } function handleValidBeaconBlock(blockInput: BlockInput, peerIdStr: string, seenTimestampSec: number): void { - const signedBlock = blockInput.block; + const signedBlock = blockInput.getBlock(); + const slot = signedBlock.message.slot; // Handler - MUST NOT `await`, to allow validation result to be propagated - const delaySec = seenTimestampSec - (chain.genesisTime + signedBlock.message.slot * config.SECONDS_PER_SLOT); + const delaySec = seenTimestampSec - (chain.genesisTime + slot * config.SECONDS_PER_SLOT); metrics?.gossipBlock.elapsedTimeTillReceived.observe({source: OpSource.gossip}, delaySec); chain.validatorMonitor?.registerBeaconBlock(OpSource.gossip, delaySec, signedBlock.message); - // if blobs are not yet fully available start an aggressive blob pull - if (blockInput.type === BlockInputType.dataPromise) { + // if data is not yet fully available start an aggressive pull + if (!blockInput.hasAllData()) { events.emit(NetworkEvent.unknownBlockInput, {blockInput, peer: peerIdStr}); - } else if (blockInput.type === BlockInputType.availableData) { + } else if (blockInput.hasBlockAndAllData()) { metrics?.blockInputFetchStats.totalDataAvailableBlockInputs.inc(); metrics?.blockInputFetchStats.totalDataAvailableBlockInputBlobs.inc( - (blockInput.block.message as deneb.BeaconBlock).body.blobKzgCommitments.length + (signedBlock.message as deneb.BeaconBlock).body.blobKzgCommitments.length ); } @@ -390,9 +363,9 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand }) .then(() => { // Returns the delay between the start of `block.slot` and `current time` - const delaySec = chain.clock.secFromSlot(signedBlock.message.slot); + const delaySec = chain.clock.secFromSlot(slot); metrics?.gossipBlock.elapsedTimeTillProcessed.observe(delaySec); - chain.seenGossipBlockInput.prune(); + chain.seenGossipBlockInput.prune(blockInput.blockRootHex); }) .catch((e) => { // Adjust verbosity based on error type @@ -401,7 +374,6 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand if (e instanceof BlockError) { switch (e.type.code) { case BlockErrorCode.DATA_UNAVAILABLE: { - const slot = signedBlock.message.slot; const forkTypes = config.getForkTypes(slot); const rootHex = toRootHex(forkTypes.BeaconBlock.hashTreeRoot(signedBlock.message)); @@ -431,24 +403,19 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand logLevel = LogLevel.error; } metrics?.gossipBlock.processBlockErrors.inc({error: e instanceof BlockError ? e.type.code : "NOT_BLOCK_ERROR"}); - logger[logLevel]("Error receiving block", {slot: signedBlock.message.slot, peer: peerIdStr}, e as Error); - chain.seenGossipBlockInput.prune(); + logger[logLevel]("Error receiving block", {slot, peer: peerIdStr}, e as Error); + chain.seenGossipBlockInput.prune(blockInput.blockRootHex); }); - if (blockInput.type === BlockInputType.dataPromise) { - const blockSlot = blockInput.block.message.slot; + if (!blockInput.hasAllData()) { // if blobs are not yet fully available start an aggressive blob pull chain.logger.debug("Block under processing is not available, racing with cutoff to add to unknownBlockInput", { - blockSlot, + blockSlot: slot, }); - raceWithCutoff( - chain, - blockSlot, - blockInput.cachedData.availabilityPromise as Promise, - BLOCK_AVAILABILITY_CUTOFF_MS - ).catch((_e) => { + const cutoffTimeMs = getCutoffTimeMs(chain, slot, BLOCK_AVAILABILITY_CUTOFF_MS); + blockInput.waitForAllData(cutoffTimeMs).catch((_e) => { chain.logger.debug("Block under processing not yet available, racing with cutoff to add to unknownBlockInput", { - blockSlot, + blockSlot: slot, }); events.emit(NetworkEvent.unknownBlockInput, {blockInput, peer: peerIdStr}); return null; @@ -486,18 +453,14 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand throw new GossipActionError(GossipAction.REJECT, {code: "PRE_DENEB_BLOCK"}); } const blockInput = await validateBeaconBlob(blobSidecar, topic.subnet, peerIdStr, seenTimestampSec); - if (blockInput.block !== null) { - if (blockInput.type === BlockInputType.dataPromise) { + if (blockInput.hasBlock()) { + if (!blockInput.hasAllData()) { chain.logger.debug("Block corresponding to blob is available but waiting for data availability", { blobSlot, index, }); - await raceWithCutoff( - chain, - blobSlot, - blockInput.cachedData.availabilityPromise as Promise, - BLOCK_AVAILABILITY_CUTOFF_MS - ).catch((_e) => { + const cutoffTimeMs = getCutoffTimeMs(chain, blobSlot, BLOCK_AVAILABILITY_CUTOFF_MS); + await blockInput.waitForAllData(cutoffTimeMs).catch((_e) => { chain.logger.debug("Block under processing not yet fully available adding to unknownBlockInput", { blobSlot, }); @@ -994,6 +957,16 @@ export async function validateGossipFnRetryUnknownRoot( } } +function getCutoffTimeMs( + chain: {config: ChainForkConfig; genesisTime: UintNum64; logger: Logger}, + blockSlot: Slot, + cutoffMsFromSlotStart: number +): number { + return Math.max( + computeTimeAtSlot(chain.config, blockSlot, chain.genesisTime) * 1000 + cutoffMsFromSlotStart - Date.now(), + 0 + ); +} async function raceWithCutoff( chain: {config: ChainForkConfig; genesisTime: UintNum64; logger: Logger}, blockSlot: Slot, From 5bde0984034fdf2a75c2a3097d0995791356fa86 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Fri, 15 Aug 2025 21:00:35 +0700 Subject: [PATCH 013/173] fix: add fulu case to SeenBlockInput.getByBlock --- .../chain/seenCache/seenGossipBlockInput.ts | 34 +++++++++---------- 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts b/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts index edf6903ba08f..89c424c05d27 100644 --- a/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts +++ b/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts @@ -1,6 +1,6 @@ import {ChainForkConfig} from "@lodestar/config"; import {CheckpointWithHex} from "@lodestar/fork-choice"; -import {ForkName, isForkPostDeneb} from "@lodestar/params"; +import {ForkName, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; import {computeStartSlotAtEpoch} from "@lodestar/state-transition"; import {RootHex, SignedBeaconBlock, Slot, deneb, fulu} from "@lodestar/types"; import {LodestarError, Logger, toRootHex} from "@lodestar/utils"; @@ -163,23 +163,21 @@ export class SeenBlockInput { peerIdStr, }, }); - } - // else if (isForkPostFulu(forkName)) { - // blockInput = new BlockInputColumns.createFromBlock({ - // block, - // blockRootHex, - // daOutOfRange, - // forkName, - // custodyColumns: this.custodyConfig.custodyColumns, - // sampledColumns: this.custodyConfig.sampledColumns, - // source: { - // source, - // seenTimestampSec, - // peerIdStr - // } - // }) - // } - else { + } else if (isForkPostFulu(forkName)) { + blockInput = BlockInputColumns.createFromBlock({ + block, + blockRootHex, + daOutOfRange, + forkName, + custodyColumns: this.custodyConfig.custodyColumns, + sampledColumns: this.custodyConfig.sampledColumns, + source: { + source, + seenTimestampSec, + peerIdStr, + }, + }); + } else { blockInput = BlockInputBlobs.createFromBlock({ block: block as SignedBeaconBlock, blockRootHex, From 4d97aafc1654d2e8cc3a408064ca6d5ca975c9be Mon Sep 17 00:00:00 2001 From: Cayman Date: Fri, 15 Aug 2025 11:05:56 -0400 Subject: [PATCH 014/173] chore: finish updating gossip handlers --- .../src/network/processor/gossipHandlers.ts | 178 ++++-------------- 1 file changed, 35 insertions(+), 143 deletions(-) diff --git a/packages/beacon-node/src/network/processor/gossipHandlers.ts b/packages/beacon-node/src/network/processor/gossipHandlers.ts index 2bf2f40c70af..5f7e826b5f4d 100644 --- a/packages/beacon-node/src/network/processor/gossipHandlers.ts +++ b/packages/beacon-node/src/network/processor/gossipHandlers.ts @@ -16,7 +16,7 @@ import { } from "@lodestar/types"; import {LogLevel, Logger, prettyBytes, toHex, toRootHex} from "@lodestar/utils"; import {BlockInput, BlockInputSource} from "../../chain/blocks/blockInput/index.js"; -import {BlobSidecarValidation, GossipedInputType} from "../../chain/blocks/types.js"; +import {BlobSidecarValidation} from "../../chain/blocks/types.js"; import { AttestationError, AttestationErrorCode, @@ -169,6 +169,8 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand if (e.type.code === BlockErrorCode.PARENT_UNKNOWN && blockInput !== null) { logger.debug("Gossip block has error", {slot, root: blockShortHex, code: e.type.code}); events.emit(NetworkEvent.unknownBlockInput, {blockInput, peer: peerIdStr}); + // throw error (don't prune the blockInput) + throw e; } if (e.action === GossipAction.REJECT) { @@ -176,6 +178,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand } } + chain.seenGossipBlockInput.prune(blockRootHex); throw e; } } @@ -330,10 +333,13 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand const delaySec = seenTimestampSec - (chain.genesisTime + slot * config.SECONDS_PER_SLOT); metrics?.gossipBlock.elapsedTimeTillReceived.observe({source: OpSource.gossip}, delaySec); chain.validatorMonitor?.registerBeaconBlock(OpSource.gossip, delaySec, signedBlock.message); - // if data is not yet fully available start an aggressive pull - if (!blockInput.hasAllData()) { + if (!blockInput.hasBlockAndAllData()) { + chain.logger.debug("Received gossip block, attempting fetch of unavailable data", { + ...blockInput.getLogMeta(), + }); + // The data is not yet fully available, immediately trigger an aggressive pull via unknown block sync events.emit(NetworkEvent.unknownBlockInput, {blockInput, peer: peerIdStr}); - } else if (blockInput.hasBlockAndAllData()) { + } else { metrics?.blockInputFetchStats.totalDataAvailableBlockInputs.inc(); metrics?.blockInputFetchStats.totalDataAvailableBlockInputBlobs.inc( (signedBlock.message as deneb.BeaconBlock).body.blobKzgCommitments.length @@ -374,11 +380,6 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand if (e instanceof BlockError) { switch (e.type.code) { case BlockErrorCode.DATA_UNAVAILABLE: { - const forkTypes = config.getForkTypes(slot); - const rootHex = toRootHex(forkTypes.BeaconBlock.hashTreeRoot(signedBlock.message)); - - events.emit(NetworkEvent.unknownBlock, {rootHex, peer: peerIdStr}); - // Error is quite frequent and not critical logLevel = LogLevel.debug; break; @@ -404,23 +405,9 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand } metrics?.gossipBlock.processBlockErrors.inc({error: e instanceof BlockError ? e.type.code : "NOT_BLOCK_ERROR"}); logger[logLevel]("Error receiving block", {slot, peer: peerIdStr}, e as Error); + // TODO(fulu): Revisit when we prune block inputs chain.seenGossipBlockInput.prune(blockInput.blockRootHex); }); - - if (!blockInput.hasAllData()) { - // if blobs are not yet fully available start an aggressive blob pull - chain.logger.debug("Block under processing is not available, racing with cutoff to add to unknownBlockInput", { - blockSlot: slot, - }); - const cutoffTimeMs = getCutoffTimeMs(chain, slot, BLOCK_AVAILABILITY_CUTOFF_MS); - blockInput.waitForAllData(cutoffTimeMs).catch((_e) => { - chain.logger.debug("Block under processing not yet available, racing with cutoff to add to unknownBlockInput", { - blockSlot: slot, - }); - events.emit(NetworkEvent.unknownBlockInput, {blockInput, peer: peerIdStr}); - return null; - }); - } } return { @@ -453,66 +440,20 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand throw new GossipActionError(GossipAction.REJECT, {code: "PRE_DENEB_BLOCK"}); } const blockInput = await validateBeaconBlob(blobSidecar, topic.subnet, peerIdStr, seenTimestampSec); - if (blockInput.hasBlock()) { - if (!blockInput.hasAllData()) { - chain.logger.debug("Block corresponding to blob is available but waiting for data availability", { - blobSlot, - index, - }); - const cutoffTimeMs = getCutoffTimeMs(chain, blobSlot, BLOCK_AVAILABILITY_CUTOFF_MS); - await blockInput.waitForAllData(cutoffTimeMs).catch((_e) => { - chain.logger.debug("Block under processing not yet fully available adding to unknownBlockInput", { - blobSlot, - }); - events.emit(NetworkEvent.unknownBlockInput, {blockInput, peer: peerIdStr}); - }); - } - } else { - // wait for the block to arrive till some cutoff else emit unknownBlockInput event - chain.logger.debug("Block not yet available, racing with cutoff", {blobSlot, index}); - const normalBlockInput = await raceWithCutoff( - chain, - blobSlot, - blockInput.blockInputPromise, - BLOCK_AVAILABILITY_CUTOFF_MS - ).catch((_e) => { - return null; + if (!blockInput.hasBlockAndAllData()) { + const cutoffTimeMs = getCutoffTimeMs(chain, blobSlot, BLOCK_AVAILABILITY_CUTOFF_MS); + chain.logger.debug("Received gossip blob, waiting for full data availability", { + msToWait: cutoffTimeMs, + blobIndex: index, + ...blockInput.getLogMeta(), }); - - if (normalBlockInput !== null) { - // we can directly send it for processing but block gossip handler will queue it up anyway - // if we see any issues later, we can send it to handleValidBeaconBlock - // - // handleValidBeaconBlock(normalBlockInput, peerIdStr, seenTimestampSec); - // - // however we can emit the event which will atleast add the peer to the list of peers to pull - // data from - if (normalBlockInput.type === BlockInputType.dataPromise) { - chain.logger.debug("Block corresponding to blob is now available but waiting for data availability", { - blobSlot, - index, - }); - await raceWithCutoff( - chain, - blobSlot, - normalBlockInput.cachedData.availabilityPromise as Promise, - BLOCK_AVAILABILITY_CUTOFF_MS - ).catch((_e) => { - chain.logger.debug("Block under processing not yet fully available adding to unknownBlockInput", { - blobSlot, - }); - events.emit(NetworkEvent.unknownBlockInput, {blockInput: normalBlockInput, peer: peerIdStr}); - }); - } else { - chain.logger.debug("Block corresponding to blob is now available for processing", {blobSlot, index}); - } - } else { - chain.logger.debug( - "Block corresponding to blob not available till BLOCK_AVAILABILITY_CUTOFF_MS adding to unknownBlockInput", - {blobSlot, index} - ); + blockInput.waitForAllData(cutoffTimeMs).catch((_e) => { + chain.logger.debug("Received gossip blob, attempting fetch of unavailable data", { + blobIndex: index, + ...blockInput.getLogMeta(), + }); events.emit(NetworkEvent.unknownBlockInput, {blockInput, peer: peerIdStr}); - } + }); } }, @@ -531,7 +472,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand throw new GossipActionError(GossipAction.REJECT, {code: "PRE_FULU_BLOCK"}); } const delaySec = chain.clock.secFromSlot(dataColumnSlot, seenTimestampSec); - metrics?.dataColumns.elapsedTimeTillReceived.observe({source: DataColumnsSource.gossip}, delaySec); + metrics?.dataColumns.elapsedTimeTillReceived.observe({source: BlockInputSource.gossip}, delaySec); const blockInput = await validateBeaconDataColumn( dataColumnSidecar, serializedData, @@ -539,69 +480,20 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand peerIdStr, seenTimestampSec ); - if (blockInput.block !== null) { - if (blockInput.type === BlockInputType.dataPromise) { - chain.logger.debug("Block corresponding to data column is available but waiting for data availability", { - dataColumnSlot, - index, - }); - await raceWithCutoff( - chain, - dataColumnSlot, - blockInput.cachedData.availabilityPromise as Promise, - BLOCK_AVAILABILITY_CUTOFF_MS - ).catch((_e) => { - chain.logger.debug("Block under processing not yet fully available adding to unknownBlockInput", { - dataColumnSlot, - }); - events.emit(NetworkEvent.unknownBlockInput, {blockInput, peer: peerIdStr}); - }); - } - } else { - // wait for the block to arrive till some cutoff else emit unknownBlockInput event - chain.logger.debug("Block not yet available, racing with cutoff", {dataColumnSlot, index}); - const normalBlockInput = await raceWithCutoff( - chain, - dataColumnSlot, - blockInput.blockInputPromise, - BLOCK_AVAILABILITY_CUTOFF_MS - ).catch((_e) => { - return null; + if (!blockInput.hasBlockAndAllData()) { + const cutoffTimeMs = getCutoffTimeMs(chain, dataColumnSlot, BLOCK_AVAILABILITY_CUTOFF_MS); + chain.logger.debug("Received gossip data column, waiting for full data availability", { + msToWait: cutoffTimeMs, + dataColumnIndex: index, + ...blockInput.getLogMeta(), }); - - if (normalBlockInput !== null) { - if (normalBlockInput.type === BlockInputType.dataPromise) { - chain.logger.debug( - "Block corresponding to data column is now available but waiting for data availability", - { - dataColumnSlot, - index, - } - ); - await raceWithCutoff( - chain, - dataColumnSlot, - normalBlockInput.cachedData.availabilityPromise as Promise, - BLOCK_AVAILABILITY_CUTOFF_MS - ).catch((_e) => { - chain.logger.debug("Block under processing not yet fully available adding to unknownBlockInput", { - dataColumnSlot, - }); - events.emit(NetworkEvent.unknownBlockInput, {blockInput: normalBlockInput, peer: peerIdStr}); - }); - } else { - chain.logger.debug("Block corresponding to data column is now available for processing", { - dataColumnSlot, - index, - }); - } - } else { - chain.logger.debug("Block not available till BLOCK_AVAILABILITY_CUTOFF_MS", { - dataColumnSlot, - index, + blockInput.waitForAllData(cutoffTimeMs).catch((_e) => { + chain.logger.debug("Received gossip data column, attempting fetch of unavailable data", { + dataColumnIndex: index, + ...blockInput.getLogMeta(), }); events.emit(NetworkEvent.unknownBlockInput, {blockInput, peer: peerIdStr}); - } + }); } }, From 3fe3851026549bc8859229d1a21d459df6e7358e Mon Sep 17 00:00:00 2001 From: Cayman Date: Fri, 15 Aug 2025 11:06:56 -0400 Subject: [PATCH 015/173] chore: remove stray function --- .../src/network/processor/gossipHandlers.ts | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/packages/beacon-node/src/network/processor/gossipHandlers.ts b/packages/beacon-node/src/network/processor/gossipHandlers.ts index 5f7e826b5f4d..246097caae93 100644 --- a/packages/beacon-node/src/network/processor/gossipHandlers.ts +++ b/packages/beacon-node/src/network/processor/gossipHandlers.ts @@ -859,18 +859,3 @@ function getCutoffTimeMs( 0 ); } -async function raceWithCutoff( - chain: {config: ChainForkConfig; genesisTime: UintNum64; logger: Logger}, - blockSlot: Slot, - availabilityPromise: Promise, - cutoffMsFromSlotStart: number -): Promise { - const cutoffTimeMs = Math.max( - computeTimeAtSlot(chain.config, blockSlot, chain.genesisTime) * 1000 + cutoffMsFromSlotStart - Date.now(), - 0 - ); - const cutoffTimeout = new Promise((_resolve, reject) => setTimeout(reject, cutoffTimeMs)); - await Promise.race([availabilityPromise, cutoffTimeout]); - // we can only be here if availabilityPromise has resolved else an error will be thrown - return availabilityPromise; -} From 60c66fecced3c8ddd642ea041d43715851d77c8d Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Fri, 15 Aug 2025 23:48:02 +0700 Subject: [PATCH 016/173] feat: update how range sync Batch works --- packages/beacon-node/src/sync/range/batch.ts | 193 ++++++++++++------ packages/beacon-node/src/sync/range/chain.ts | 26 ++- .../src/sync/range/utils/peerBalancer.ts | 20 +- 3 files changed, 154 insertions(+), 85 deletions(-) diff --git a/packages/beacon-node/src/sync/range/batch.ts b/packages/beacon-node/src/sync/range/batch.ts index 6e8e8d6d6560..6da6182376d8 100644 --- a/packages/beacon-node/src/sync/range/batch.ts +++ b/packages/beacon-node/src/sync/range/batch.ts @@ -1,8 +1,9 @@ import {ChainForkConfig} from "@lodestar/config"; -import {ForkSeq} from "@lodestar/params"; -import {Epoch, RootHex, phase0} from "@lodestar/types"; +import {ForkName, ForkSeq, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; +import {Epoch, RootHex, Slot, deneb, fulu, phase0} from "@lodestar/types"; import {LodestarError} from "@lodestar/utils"; -import {BlockInput} from "../../chain/blocks/types.js"; +import {isBlockInputColumns} from "../../chain/blocks/blockInput/blockInput.js"; +import {IBlockInput} from "../../chain/blocks/blockInput/types.js"; import {BlockError, BlockErrorCode} from "../../chain/errors/index.js"; import {PartialDownload} from "../../network/reqresp/beaconBlocksMaybeBlobsByRange.js"; import {PeerIdStr} from "../../util/peerId.js"; @@ -33,15 +34,15 @@ export enum BatchStatus { export type Attempt = { /** The peer that made the attempt */ - peer: PeerIdStr; + peers: PeerIdStr[]; /** The hash of the blocks of the attempt */ hash: RootHex; }; export type BatchState = - | {status: BatchStatus.AwaitingDownload; partialDownload: PartialDownload} - | {status: BatchStatus.Downloading; peer: PeerIdStr; partialDownload: PartialDownload} - | {status: BatchStatus.AwaitingProcessing; peer: PeerIdStr; blocks: BlockInput[]} + | {status: BatchStatus.AwaitingDownload; blocks?: IBlockInput[]} + | {status: BatchStatus.Downloading; peer: PeerIdStr; blocks: IBlockInput[]} + | {status: BatchStatus.AwaitingProcessing; blocks: IBlockInput[]} | {status: BatchStatus.Processing; attempt: Attempt} | {status: BatchStatus.AwaitingValidation; attempt: Attempt}; @@ -53,13 +54,19 @@ export type BatchMetadata = { export type DownloadSuccessOutput = | { status: BatchStatus.AwaitingProcessing; - blocks: BlockInput[]; + blocks: IBlockInput[]; } | { status: BatchStatus.AwaitingDownload; - pendingDataColumns: number[]; + blocks: IBlockInput[]; }; +export type BatchRequests = { + blocksRequest?: phase0.BeaconBlocksByRangeRequest; + blobsRequest?: deneb.BlobSidecarsByRangeRequest; + columnsRequest?: fulu.DataColumnSidecarsByRangeRequest; +}; + /** * Batches are downloaded at the first block of the epoch. * @@ -72,11 +79,16 @@ export type DownloadSuccessOutput = * Jul2022: Offset changed from 1 to 0, see rationale in {@link BATCH_SLOT_OFFSET} */ export class Batch { + readonly forkName: ForkName; readonly startEpoch: Epoch; + readonly startSlot: Slot; + readonly count: number; + readonly requests: BatchRequests; + /** State of the batch. */ - state: BatchState = {status: BatchStatus.AwaitingDownload, partialDownload: null}; - /** BeaconBlocksByRangeRequest */ - readonly request: phase0.BeaconBlocksByRangeRequest; + state: BatchState = {status: BatchStatus.AwaitingDownload}; + /** Peers that provided good data */ + readonly goodPeers: PeerIdStr[] = []; /** The `Attempts` that have been made and failed to send us this batch. */ readonly failedProcessingAttempts: Attempt[] = []; /** The `Attempts` that have been made and failed because of execution malfunction. */ @@ -86,22 +98,79 @@ export class Batch { private readonly config: ChainForkConfig; constructor(startEpoch: Epoch, config: ChainForkConfig) { - const {startSlot, count} = getBatchSlotRange(startEpoch); - this.config = config; + + const {startSlot, count} = getBatchSlotRange(startEpoch); + this.forkName = this.config.getForkName(startSlot); this.startEpoch = startEpoch; - this.request = { - startSlot, - count, - step: 1, + this.startSlot = startSlot; + this.count = count; + this.requests = this.getRequests([]); + } + + /** + * Builds ByRange requests for block, blobs and columns + */ + private getRequests(blocks: IBlockInput[]): BatchRequests { + let blockStartSlot = this.startSlot; + let dataStartSlot = this.startSlot; + const neededColumns = new Set(); + + // ensure blocks are in slot-wise order + for (const blockInput of blocks.sort((a, b) => a.slot - b.slot)) { + const blockSlot = blockInput.slot; + // check if block/data is present and if start of range is directly before blockSlot to avoid + // missing blocks/data if there is a gap. just pull remainder of range + // ie startSlot = 32 and have [32, 33, 34, 35, 36, _, 38, 39, _, _, ... _missing endSlot=63_] + // will return a startSlot of 37 and pull range 37-63 + if (blockInput.hasBlock() && blockStartSlot === blockSlot) { + blockStartSlot = blockSlot + 1; + } + if (!blockInput.hasAllData()) { + if (isBlockInputColumns(blockInput)) { + for (const missing of blockInput.getMissingSampledColumnMeta()) { + neededColumns.add(missing.index); + } + } + } else if (dataStartSlot === blockSlot) { + dataStartSlot = blockSlot + 1; + } + } + + const endSlot = this.startSlot + this.count - 1; + const requests: BatchRequests = { + blocksRequest: + blockStartSlot <= endSlot + ? { + startSlot: blockStartSlot, + // range of 40 - 63, startSlot will be inclusive but subtraction will exclusive so need to + 1 + count: endSlot - blockStartSlot + 1, + step: 1, + } + : undefined, }; + if (dataStartSlot <= this.startSlot + this.count) { + if (isForkPostFulu(this.forkName)) { + requests.columnsRequest = { + startSlot: dataStartSlot, + count: endSlot - dataStartSlot + 1, + columns: Array.from(neededColumns), + }; + } else if (isForkPostDeneb(this.forkName)) { + requests.blobsRequest = { + startSlot: dataStartSlot, + count: endSlot - dataStartSlot + 1, + }; + } + } + return requests; } /** * Gives a list of peers from which this batch has had a failed download or processing attempt. */ getFailedPeers(): PeerIdStr[] { - return [...this.failedDownloadAttempts, ...this.failedProcessingAttempts.map((a) => a.peer)]; + return [...this.failedDownloadAttempts, ...this.failedProcessingAttempts.flatMap((a) => a.peers)]; } getMetadata(): BatchMetadata { @@ -111,84 +180,82 @@ export class Batch { /** * AwaitingDownload -> Downloading */ - startDownloading(peer: PeerIdStr): PartialDownload { + startDownloading(peer: PeerIdStr): BatchRequests { if (this.state.status !== BatchStatus.AwaitingDownload) { throw new BatchError(this.wrongStatusErrorType(BatchStatus.AwaitingDownload)); } - const {partialDownload} = this.state; - this.state = {status: BatchStatus.Downloading, peer, partialDownload}; - return partialDownload; + this.state = {status: BatchStatus.Downloading, peer, blocks: this.state.blocks ?? []}; } /** * Downloading -> AwaitingProcessing * pendingDataColumns is null when a complete download is done, otherwise it contains the columns that are still pending */ - downloadingSuccess(downloadResult: { - blocks: BlockInput[]; - pendingDataColumns: null | number[]; - }): DownloadSuccessOutput { + downloadingSuccess(peer: PeerIdStr, blocks: IBlockInput[]): DownloadSuccessOutput { if (this.state.status !== BatchStatus.Downloading) { throw new BatchError(this.wrongStatusErrorType(BatchStatus.Downloading)); } - let updatedPendingDataColumns = this.state.partialDownload?.pendingDataColumns ?? null; - const {blocks, pendingDataColumns} = downloadResult; - if (updatedPendingDataColumns == null) { - // state pendingDataColumns is null as initial value, just update it to pendingDataColumns in this case - updatedPendingDataColumns = pendingDataColumns; - } else { - updatedPendingDataColumns = - // pendingDataColumns = null means a complete download - pendingDataColumns == null - ? null - : // if not state pendingDataColumns should be reduced over time, see see https://github.com/ChainSafe/lodestar/issues/8036 - updatedPendingDataColumns.filter((column) => pendingDataColumns.includes(column)); - } + this.goodPeers.push(peer); - if (updatedPendingDataColumns === null) { - // complete download - this.state = {status: BatchStatus.AwaitingProcessing, peer: this.state.peer, blocks}; - return {status: BatchStatus.AwaitingProcessing, blocks}; + let allComplete = true; + const slots = new Set(); + for (const block of blocks) { + slots.add(block.slot); + if (!block.hasBlockAndAllData()) { + allComplete = false; + } } - // partial download, track updatedPendingDataColumns in state - this.state = { - status: BatchStatus.AwaitingDownload, - partialDownload: blocks.length === 0 ? null : {blocks, pendingDataColumns: updatedPendingDataColumns}, - }; - return {status: BatchStatus.AwaitingDownload, pendingDataColumns: updatedPendingDataColumns}; + if (slots.size > this.count) { + throw new BatchError({ + code: BatchErrorCode.INVALID_COUNT, + startEpoch: this.startEpoch, + count: slots.size, + expected: this.count, + status: this.state.status, + }); + } + if (slots.size === this.count && allComplete) { + this.state = {status: BatchStatus.AwaitingProcessing, blocks}; + } else { + this.requests = this.getRequests(blocks); + this.state = {status: BatchStatus.AwaitingDownload, blocks}; + } } /** * Downloading -> AwaitingDownload */ - downloadingError(): void { + downloadingError(peer: PeerIdStr): void { if (this.state.status !== BatchStatus.Downloading) { throw new BatchError(this.wrongStatusErrorType(BatchStatus.Downloading)); } - this.failedDownloadAttempts.push(this.state.peer); + this.failedDownloadAttempts.push(peer); if (this.failedDownloadAttempts.length > MAX_BATCH_DOWNLOAD_ATTEMPTS) { throw new BatchError(this.errorType({code: BatchErrorCode.MAX_DOWNLOAD_ATTEMPTS})); } - const {partialDownload} = this.state; - this.state = {status: BatchStatus.AwaitingDownload, partialDownload}; + this.state = {status: BatchStatus.AwaitingDownload, blocks: this.state.blocks}; } /** * AwaitingProcessing -> Processing */ - startProcessing(): BlockInput[] { + startProcessing(): IBlockInput[] { if (this.state.status !== BatchStatus.AwaitingProcessing) { throw new BatchError(this.wrongStatusErrorType(BatchStatus.AwaitingProcessing)); } const blocks = this.state.blocks; const hash = hashBlocks(blocks, this.config); // tracks blocks to report peer on processing error - this.state = {status: BatchStatus.Processing, attempt: {peer: this.state.peer, hash}}; + // Reset goodPeers in case another download attempt needs to be made. When Attempt is successful or not the peers + // that the data came from will be handled by the Attempt that goes for processing + const peers = this.goodPeers; + this.goodPeers = []; + this.state = {status: BatchStatus.Processing, attempt: {peers, hash}}; return blocks; } @@ -243,17 +310,15 @@ export class Batch { return this.state.attempt; } - isPostFulu(): boolean { - return this.config.getForkSeq(this.request.startSlot) >= ForkSeq.fulu; - } - private onExecutionEngineError(attempt: Attempt): void { this.executionErrorAttempts.push(attempt); if (this.executionErrorAttempts.length > MAX_BATCH_PROCESSING_ATTEMPTS) { throw new BatchError(this.errorType({code: BatchErrorCode.MAX_EXECUTION_ENGINE_ERROR_ATTEMPTS})); } - this.state = {status: BatchStatus.AwaitingDownload, partialDownload: null}; + // remove any downloaded blocks and re-attempt + // TODO(fulu): need to remove the bad blocks from the SeenBlockInputCache + this.state = {status: BatchStatus.AwaitingDownload}; } private onProcessingError(attempt: Attempt): void { @@ -262,7 +327,9 @@ export class Batch { throw new BatchError(this.errorType({code: BatchErrorCode.MAX_PROCESSING_ATTEMPTS})); } - this.state = {status: BatchStatus.AwaitingDownload, partialDownload: null}; + // remove any downloaded blocks and re-attempt + // TODO(fulu): need to remove the bad blocks from the SeenBlockInputCache + this.state = {status: BatchStatus.AwaitingDownload}; } /** Helper to construct typed BatchError. Stack traces are correct as the error is thrown above */ @@ -277,6 +344,7 @@ export class Batch { export enum BatchErrorCode { WRONG_STATUS = "BATCH_ERROR_WRONG_STATUS", + INVALID_COUNT = "BATCH_ERROR_INVALID_COUNT", MAX_DOWNLOAD_ATTEMPTS = "BATCH_ERROR_MAX_DOWNLOAD_ATTEMPTS", MAX_PROCESSING_ATTEMPTS = "BATCH_ERROR_MAX_PROCESSING_ATTEMPTS", MAX_EXECUTION_ENGINE_ERROR_ATTEMPTS = "MAX_EXECUTION_ENGINE_ERROR_ATTEMPTS", @@ -284,6 +352,7 @@ export enum BatchErrorCode { type BatchErrorType = | {code: BatchErrorCode.WRONG_STATUS; expectedStatus: BatchStatus} + | {code: BatchErrorCode.INVALID_COUNT; count: number; expected: number} | {code: BatchErrorCode.MAX_DOWNLOAD_ATTEMPTS} | {code: BatchErrorCode.MAX_PROCESSING_ATTEMPTS} | {code: BatchErrorCode.MAX_EXECUTION_ENGINE_ERROR_ATTEMPTS}; diff --git a/packages/beacon-node/src/sync/range/chain.ts b/packages/beacon-node/src/sync/range/chain.ts index bf239ce4b72f..409b91208c49 100644 --- a/packages/beacon-node/src/sync/range/chain.ts +++ b/packages/beacon-node/src/sync/range/chain.ts @@ -447,15 +447,13 @@ export class SyncChain { peer: prettyPrintPeerIdStr(peer.peerId), }); try { - const partialDownload = batch.startDownloading(peer.peerId); + const requests = batch.startDownloading(peer.peerId); // wrapError ensures to never call both batch success() and batch error() - const res = await wrapError( - this.downloadBeaconBlocksByRange(peer, batch.request, partialDownload, this.syncType) - ); + const res = await wrapError(this.downloadBeaconBlocksByRange(peer, requests, partialDownload, this.syncType)); if (!res.err) { - const downloadSuccessOutput = batch.downloadingSuccess(res.result); + const downloadSuccessOutput = batch.downloadingSuccess(peer.peerId, res.result.blocks); if (downloadSuccessOutput.status === BatchStatus.AwaitingProcessing) { const blocks = downloadSuccessOutput.blocks; let hasPostDenebBlocks = false; @@ -491,11 +489,9 @@ export class SyncChain { }); this.triggerBatchProcessor(); } else { - const pendingDataColumns = downloadSuccessOutput.pendingDataColumns.join(","); this.logger.debug("Partially downloaded batch", { id: this.logId, ...batch.getMetadata(), - pendingDataColumns, peer: peer.peerId, }); // the flow will continue to call triggerBatchDownloader() below @@ -506,7 +502,7 @@ export class SyncChain { {id: this.logId, ...batch.getMetadata(), peer: prettyPrintPeerIdStr(peer.peerId)}, res.err ); - batch.downloadingError(); // Throws after MAX_DOWNLOAD_ATTEMPTS + batch.downloadingError(peer.peerId); // Throws after MAX_DOWNLOAD_ATTEMPTS } // Preemptively request more blocks from peers whilst we process current blocks @@ -583,12 +579,14 @@ export class SyncChain { const attemptOk = batch.validationSuccess(); for (const attempt of batch.failedProcessingAttempts) { if (attempt.hash !== attemptOk.hash) { - if (attemptOk.peer === attempt.peer.toString()) { - // The same peer corrected its previous attempt - this.reportPeer(attempt.peer, PeerAction.MidToleranceError, "SyncChainInvalidBatchSelf"); - } else { - // A different peer sent an bad batch - this.reportPeer(attempt.peer, PeerAction.LowToleranceError, "SyncChainInvalidBatchOther"); + for (const badAttemptPeer of attempt.peers) { + if (attemptOk.peers.find((goodPeer) => goodPeer === badAttemptPeer)) { + // The same peer corrected its previous attempt + this.reportPeer(badAttemptPeer, PeerAction.MidToleranceError, "SyncChainInvalidBatchSelf"); + } else { + // A different peer sent an bad batch + this.reportPeer(badAttemptPeer, PeerAction.LowToleranceError, "SyncChainInvalidBatchOther"); + } } } } diff --git a/packages/beacon-node/src/sync/range/utils/peerBalancer.ts b/packages/beacon-node/src/sync/range/utils/peerBalancer.ts index 2b80ac3caea5..398174feb315 100644 --- a/packages/beacon-node/src/sync/range/utils/peerBalancer.ts +++ b/packages/beacon-node/src/sync/range/utils/peerBalancer.ts @@ -1,3 +1,4 @@ +import {isForkPostFulu} from "@lodestar/params"; import {PeerSyncMeta} from "../../../network/peers/peersData.js"; import {CustodyConfig} from "../../../util/dataColumns.js"; import {PeerIdStr} from "../../../util/peerId.js"; @@ -57,8 +58,9 @@ export class ChainPeersBalancer { if (batch.state.status !== BatchStatus.AwaitingDownload) { return; } - const {partialDownload} = batch.state; - const pendingDataColumns = partialDownload?.pendingDataColumns ?? this.custodyConfig.sampledColumns; + const {columnsRequest} = batch.requests; + // TODO(fulu): This is fulu specific and hinders our peer selection PreFulu + const pendingDataColumns = columnsRequest?.columns ?? this.custodyConfig.sampledColumns; const eligiblePeers = this.filterPeers(batch, pendingDataColumns, false); const failedPeers = new Set(batch.getFailedPeers()); @@ -129,23 +131,23 @@ export class ChainPeersBalancer { continue; } - if (target.slot < batch.request.startSlot) { + if (target.slot < batch.startSlot) { continue; } - if (batch.isPostFulu() && this.syncType === RangeSyncType.Head) { + if (isForkPostFulu(batch.forkName) && this.syncType === RangeSyncType.Head) { // for head sync, target slot is head slot and each peer may have a different head slot // we don't want to retry a batch with a peer that's not as up-to-date as the previous peer // see https://github.com/ChainSafe/lodestar/issues/8193 - const blocks = batch.state.partialDownload?.blocks; - const lastBlock = blocks?.at(-1)?.block; - const lastBlockSlot = lastBlock?.message?.slot; + const blocks = batch.state?.blocks; + const lastBlock = blocks?.at(-1); + const lastBlockSlot = lastBlock?.slot; if (lastBlockSlot && lastBlockSlot > target.slot) { continue; } } - if (!batch.isPostFulu()) { + if (!isForkPostFulu(batch.forkName)) { // pre-fulu logic, we don't care columns and earliestAvailableSlot eligiblePeers.push({syncInfo: peer, columns: 0, hasEarliestAvailableSlots: false}); continue; @@ -157,7 +159,7 @@ export class ChainPeersBalancer { continue; } - if (earliestAvailableSlot > batch.request.startSlot) { + if (earliestAvailableSlot > batch.startSlot) { continue; } From c6999c553ef830fe59302897bd3efeefae253447 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Sat, 16 Aug 2025 04:36:43 +0700 Subject: [PATCH 017/173] fix: logic errors in batch.getRequests --- packages/beacon-node/src/sync/range/batch.ts | 145 +++++++++++++------ 1 file changed, 100 insertions(+), 45 deletions(-) diff --git a/packages/beacon-node/src/sync/range/batch.ts b/packages/beacon-node/src/sync/range/batch.ts index 6da6182376d8..39478a3f0827 100644 --- a/packages/beacon-node/src/sync/range/batch.ts +++ b/packages/beacon-node/src/sync/range/batch.ts @@ -6,8 +6,10 @@ import {isBlockInputColumns} from "../../chain/blocks/blockInput/blockInput.js"; import {IBlockInput} from "../../chain/blocks/blockInput/types.js"; import {BlockError, BlockErrorCode} from "../../chain/errors/index.js"; import {PartialDownload} from "../../network/reqresp/beaconBlocksMaybeBlobsByRange.js"; +import {CustodyConfig} from "../../util/dataColumns.js"; import {PeerIdStr} from "../../util/peerId.js"; import {MAX_BATCH_DOWNLOAD_ATTEMPTS, MAX_BATCH_PROCESSING_ATTEMPTS} from "../constants.js"; +import {DownloadByRangeRequests} from "../utils/downloadByRange.js"; import {getBatchSlotRange, hashBlocks} from "./utils/index.js"; /** @@ -39,32 +41,27 @@ export type Attempt = { hash: RootHex; }; -export type BatchState = - | {status: BatchStatus.AwaitingDownload; blocks?: IBlockInput[]} - | {status: BatchStatus.Downloading; peer: PeerIdStr; blocks: IBlockInput[]} - | {status: BatchStatus.AwaitingProcessing; blocks: IBlockInput[]} - | {status: BatchStatus.Processing; attempt: Attempt} - | {status: BatchStatus.AwaitingValidation; attempt: Attempt}; - -export type BatchMetadata = { - startEpoch: Epoch; - status: BatchStatus; +export type BatchStateAwaitingDownload = { + status: BatchStatus.AwaitingDownload; + blocks: IBlockInput[]; }; -export type DownloadSuccessOutput = +export type DownloadSuccessState = + | BatchStateAwaitingDownload | { status: BatchStatus.AwaitingProcessing; blocks: IBlockInput[]; - } - | { - status: BatchStatus.AwaitingDownload; - blocks: IBlockInput[]; }; -export type BatchRequests = { - blocksRequest?: phase0.BeaconBlocksByRangeRequest; - blobsRequest?: deneb.BlobSidecarsByRangeRequest; - columnsRequest?: fulu.DataColumnSidecarsByRangeRequest; +export type BatchState = + | DownloadSuccessState + | {status: BatchStatus.Downloading; peer: PeerIdStr} + | {status: BatchStatus.Processing; attempt: Attempt} + | {status: BatchStatus.AwaitingValidation; attempt: Attempt}; + +export type BatchMetadata = { + startEpoch: Epoch; + status: BatchStatus; }; /** @@ -83,12 +80,13 @@ export class Batch { readonly startEpoch: Epoch; readonly startSlot: Slot; readonly count: number; - readonly requests: BatchRequests; + /** Block, blob and column requests that are used to determine the best peer and are used in downloadByRange */ + requests: DownloadByRangeRequests; /** State of the batch. */ - state: BatchState = {status: BatchStatus.AwaitingDownload}; + state: BatchState = {status: BatchStatus.AwaitingDownload, blocks: []}; /** Peers that provided good data */ - readonly goodPeers: PeerIdStr[] = []; + goodPeers: PeerIdStr[] = []; /** The `Attempts` that have been made and failed to send us this batch. */ readonly failedProcessingAttempts: Attempt[] = []; /** The `Attempts` that have been made and failed because of execution malfunction. */ @@ -96,9 +94,11 @@ export class Batch { /** The number of download retries this batch has undergone due to a failed request. */ private readonly failedDownloadAttempts: PeerIdStr[] = []; private readonly config: ChainForkConfig; + private readonly custodyConfig: CustodyConfig; - constructor(startEpoch: Epoch, config: ChainForkConfig) { + constructor(startEpoch: Epoch, config: ChainForkConfig, custodyConfig: CustodyConfig) { this.config = config; + this.custodyConfig = custodyConfig; const {startSlot, count} = getBatchSlotRange(startEpoch); this.forkName = this.config.getForkName(startSlot); @@ -111,7 +111,40 @@ export class Batch { /** * Builds ByRange requests for block, blobs and columns */ - private getRequests(blocks: IBlockInput[]): BatchRequests { + private getRequests(blocks: IBlockInput[]): DownloadByRangeRequests { + // fresh request where no blocks have started to be pulled yet + if (!blocks.length) { + const blocksRequest: phase0.BeaconBlocksByRangeRequest = { + startSlot: this.startSlot, + count: this.count, + step: 1, + }; + if (isForkPostFulu(this.forkName)) { + return { + blocksRequest, + columnsRequest: { + startSlot: this.startSlot, + count: this.count, + columns: this.custodyConfig.sampledColumns, + }, + }; + } + if (isForkPostDeneb(this.forkName)) { + return { + blocksRequest, + blobsRequest: { + startSlot: this.startSlot, + count: this.count, + }, + }; + } + return { + blocksRequest, + }; + } + + // subsequent request where part of the epoch has already been downloaded. Need to figure out what is the beginning + // of the range where download needs to resume let blockStartSlot = this.startSlot; let dataStartSlot = this.startSlot; const neededColumns = new Set(); @@ -119,10 +152,16 @@ export class Batch { // ensure blocks are in slot-wise order for (const blockInput of blocks.sort((a, b) => a.slot - b.slot)) { const blockSlot = blockInput.slot; - // check if block/data is present and if start of range is directly before blockSlot to avoid - // missing blocks/data if there is a gap. just pull remainder of range - // ie startSlot = 32 and have [32, 33, 34, 35, 36, _, 38, 39, _, _, ... _missing endSlot=63_] - // will return a startSlot of 37 and pull range 37-63 + // check if block/data is present (hasBlock/hasAllData). If present then check if startSlot is the same as + // blockSlot. If it is then do not need to pull that slot so increment startSlot by 1. check will fail + // if there is a gap and then the blocks/data is present again. to simplify the request just re-pull remainder + // of range. + // + // ie startSlot = 32 and count = 32. so for slots = [32, 33, 34, 35, 36, _, 38, 39, _, _, ... _endSlot=63_] + // will return an updated startSlot of 37 and pull range 37-63 on the next request. + // + // if all slot have already been pulled then the startSlot will eventually get incremented to the slot after + // the desired end slot if (blockInput.hasBlock() && blockStartSlot === blockSlot) { blockStartSlot = blockSlot + 1; } @@ -137,32 +176,36 @@ export class Batch { } } + // if the blockStartSlot or dataStartSlot is after the desired endSlot then no request will be made for the batch + // because it is complete const endSlot = this.startSlot + this.count - 1; - const requests: BatchRequests = { - blocksRequest: - blockStartSlot <= endSlot - ? { - startSlot: blockStartSlot, - // range of 40 - 63, startSlot will be inclusive but subtraction will exclusive so need to + 1 - count: endSlot - blockStartSlot + 1, - step: 1, - } - : undefined, - }; - if (dataStartSlot <= this.startSlot + this.count) { + const requests: DownloadByRangeRequests = {}; + if (blockStartSlot <= endSlot) { + requests.blocksRequest = { + startSlot: blockStartSlot, + // range of 40 - 63, startSlot will be inclusive but subtraction will exclusive so need to + 1 + count: endSlot - blockStartSlot + 1, + step: 1, + }; + } + if (dataStartSlot <= endSlot) { + // range of 40 - 63, startSlot will be inclusive but subtraction will exclusive so need to + 1 + const count = endSlot - dataStartSlot + 1; if (isForkPostFulu(this.forkName)) { requests.columnsRequest = { + count, startSlot: dataStartSlot, - count: endSlot - dataStartSlot + 1, columns: Array.from(neededColumns), }; } else if (isForkPostDeneb(this.forkName)) { requests.blobsRequest = { + count, startSlot: dataStartSlot, - count: endSlot - dataStartSlot + 1, }; } + // dataSlot will still have a value but do not create a request for preDeneb forks } + return requests; } @@ -177,22 +220,32 @@ export class Batch { return {startEpoch: this.startEpoch, status: this.state.status}; } + getBlocks(): IBlockInput[] { + switch (this.state.status) { + case BatchStatus.Downloading: + case BatchStatus.AwaitingValidation: + case BatchStatus.Processing: + throw new BatchError(this.wrongStatusErrorType(BatchStatus.AwaitingDownload)); + } + return this.state.blocks; + } + /** * AwaitingDownload -> Downloading */ - startDownloading(peer: PeerIdStr): BatchRequests { + startDownloading(peer: PeerIdStr): void { if (this.state.status !== BatchStatus.AwaitingDownload) { throw new BatchError(this.wrongStatusErrorType(BatchStatus.AwaitingDownload)); } - this.state = {status: BatchStatus.Downloading, peer, blocks: this.state.blocks ?? []}; + this.state = {status: BatchStatus.Downloading, peer, blocks: this.state.blocks}; } /** * Downloading -> AwaitingProcessing * pendingDataColumns is null when a complete download is done, otherwise it contains the columns that are still pending */ - downloadingSuccess(peer: PeerIdStr, blocks: IBlockInput[]): DownloadSuccessOutput { + downloadingSuccess(peer: PeerIdStr, blocks: IBlockInput[]): DownloadSuccessState { if (this.state.status !== BatchStatus.Downloading) { throw new BatchError(this.wrongStatusErrorType(BatchStatus.Downloading)); } @@ -223,6 +276,8 @@ export class Batch { this.requests = this.getRequests(blocks); this.state = {status: BatchStatus.AwaitingDownload, blocks}; } + + return this.state as DownloadSuccessState; } /** From 1448520849ece45162ab56704e4f0b8c74afb8b2 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Sat, 16 Aug 2025 07:02:04 +0700 Subject: [PATCH 018/173] feat: update chain and range to use downloadByRoot and updated batch --- packages/beacon-node/src/sync/range/chain.ts | 106 ++++++++----------- packages/beacon-node/src/sync/range/range.ts | 37 +++---- 2 files changed, 62 insertions(+), 81 deletions(-) diff --git a/packages/beacon-node/src/sync/range/chain.ts b/packages/beacon-node/src/sync/range/chain.ts index 409b91208c49..35e34741b338 100644 --- a/packages/beacon-node/src/sync/range/chain.ts +++ b/packages/beacon-node/src/sync/range/chain.ts @@ -2,7 +2,8 @@ import {ChainForkConfig} from "@lodestar/config"; import {ForkName, isForkPostFulu} from "@lodestar/params"; import {Epoch, Root, Slot, phase0} from "@lodestar/types"; import {ErrorAborted, Logger, toRootHex} from "@lodestar/utils"; -import {BlockInput, BlockInputDataColumns, BlockInputType} from "../../chain/blocks/types.js"; +import {isBlockInputBlobs, isBlockInputColumns} from "../../chain/blocks/blockInput/blockInput.js"; +import {IBlockInput} from "../../chain/blocks/blockInput/types.js"; import {Metrics} from "../../metrics/metrics.js"; import {PeerAction, prettyPrintPeerIdStr} from "../../network/index.js"; import {PeerSyncMeta} from "../../network/peers/peersData.js"; @@ -13,7 +14,7 @@ import {PeerIdStr} from "../../util/peerId.js"; import {wrapError} from "../../util/wrapError.js"; import {BATCH_BUFFER_SIZE, EPOCHS_PER_BATCH, MAX_LOOK_AHEAD_EPOCHS} from "../constants.js"; import {RangeSyncType} from "../utils/remoteSyncType.js"; -import {Batch, BatchError, BatchErrorCode, BatchMetadata, BatchStatus} from "./batch.js"; +import {Batch, BatchError, BatchErrorCode, BatchMetadata, BatchStatus, DownloadByRangeRequests} from "./batch.js"; import { ChainPeersBalancer, PeerSyncInfo, @@ -39,14 +40,9 @@ export type SyncChainFns = { * Must return if ALL blocks are processed successfully * If SOME blocks are processed must throw BlockProcessorError() */ - processChainSegment: (blocks: BlockInput[], syncType: RangeSyncType) => Promise; + processChainSegment: (blocks: IBlockInput[], syncType: RangeSyncType) => Promise; /** Must download blocks, and validate their range */ - downloadBeaconBlocksByRange: ( - peer: PeerSyncMeta, - request: phase0.BeaconBlocksByRangeRequest, - partialDownload: PartialDownload, - syncType: RangeSyncType - ) => Promise<{blocks: BlockInput[]; pendingDataColumns: null | number[]}>; + downloadByRange: (peer: PeerSyncMeta, batch: Batch, syncType: RangeSyncType) => Promise; /** Report peer for negative actions. Decouples from the full network instance */ reportPeer: (peer: PeerIdStr, action: PeerAction, actionName: string) => void; /** Gets current peer custodyColumns and earliestAvailableSlot */ @@ -117,7 +113,7 @@ export class SyncChain { private status = SyncChainStatus.Stopped; private readonly processChainSegment: SyncChainFns["processChainSegment"]; - private readonly downloadBeaconBlocksByRange: SyncChainFns["downloadBeaconBlocksByRange"]; + private readonly downloadByRange: SyncChainFns["downloadByRange"]; private readonly reportPeer: SyncChainFns["reportPeer"]; private readonly getConnectedPeerSyncMeta: SyncChainFns["getConnectedPeerSyncMeta"]; /** AsyncIterable that guarantees processChainSegment is run only at once at anytime */ @@ -143,7 +139,7 @@ export class SyncChain { this.target = initialTarget; this.syncType = syncType; this.processChainSegment = fns.processChainSegment; - this.downloadBeaconBlocksByRange = fns.downloadBeaconBlocksByRange; + this.downloadByRange = fns.downloadByRange; this.reportPeer = fns.reportPeer; this.getConnectedPeerSyncMeta = fns.getConnectedPeerSyncMeta; this.config = config; @@ -432,7 +428,7 @@ export class SyncChain { return null; } - const batch = new Batch(startEpoch, this.config); + const batch = new Batch(startEpoch, this.config, this.custodyConfig); this.batches.set(startEpoch, batch); return batch; } @@ -447,66 +443,53 @@ export class SyncChain { peer: prettyPrintPeerIdStr(peer.peerId), }); try { - const requests = batch.startDownloading(peer.peerId); + batch.startDownloading(peer.peerId); // wrapError ensures to never call both batch success() and batch error() - const res = await wrapError(this.downloadBeaconBlocksByRange(peer, requests, partialDownload, this.syncType)); + const res = await wrapError(this.downloadByRange(peer, batch, this.syncType)); - if (!res.err) { - const downloadSuccessOutput = batch.downloadingSuccess(peer.peerId, res.result.blocks); - if (downloadSuccessOutput.status === BatchStatus.AwaitingProcessing) { - const blocks = downloadSuccessOutput.blocks; - let hasPostDenebBlocks = false; - const blobs = blocks.reduce((acc, blockInput) => { - hasPostDenebBlocks ||= blockInput.type === BlockInputType.availableData; - return hasPostDenebBlocks - ? acc + - (blockInput.type === BlockInputType.availableData && - (blockInput.blockData.fork === ForkName.deneb || blockInput.blockData.fork === ForkName.electra) - ? blockInput.blockData.blobs.length - : 0) - : 0; - }, 0); - const dataColumns = blocks.reduce((acc, blockInput) => { - hasPostDenebBlocks ||= blockInput.type === BlockInputType.availableData; - return hasPostDenebBlocks - ? acc + - (blockInput.type === BlockInputType.availableData && isForkPostFulu(blockInput.blockData.fork) - ? (blockInput.blockData as BlockInputDataColumns).dataColumns.length - : 0) - : 0; - }, 0); - - const downloadInfo = {blocks: blocks.length}; - if (hasPostDenebBlocks) { - Object.assign(downloadInfo, {blobs, dataColumns}); - } - this.logger.debug("Downloaded batch", { - id: this.logId, - ...batch.getMetadata(), - ...downloadInfo, - peer: prettyPrintPeerIdStr(peer.peerId), - }); - this.triggerBatchProcessor(); - } else { - this.logger.debug("Partially downloaded batch", { - id: this.logId, - ...batch.getMetadata(), - peer: peer.peerId, - }); - // the flow will continue to call triggerBatchDownloader() below - } - } else { + if (res.err) { this.logger.verbose( "Batch download error", {id: this.logId, ...batch.getMetadata(), peer: prettyPrintPeerIdStr(peer.peerId)}, res.err ); batch.downloadingError(peer.peerId); // Throws after MAX_DOWNLOAD_ATTEMPTS + } else { + const downloadSuccessOutput = batch.downloadingSuccess(peer.peerId, res.result); + const logMeta: Record = { + blockCount: downloadSuccessOutput.blocks.length, + }; + for (const block of downloadSuccessOutput.blocks) { + if (isBlockInputBlobs(block)) { + logMeta.blobCount = (logMeta.blobCount ?? 0) + block.getLogMeta().receivedBlobs; + } else if (isBlockInputColumns(block)) { + logMeta.columnCount = (logMeta.columnCount ?? 0) + block.getLogMeta().receivedColumns; + } + } + + let logMessage: string; + if (downloadSuccessOutput.status === BatchStatus.AwaitingProcessing) { + logMessage = "Finished downloading batch by range"; + this.triggerBatchProcessor(); + } else { + logMessage = "Partially downloaded batch by range. Attempting another round of downloads"; + // the flow will continue to call triggerBatchDownloader() below + } + + this.logger.debug(logMessage, { + id: this.logId, + epoch: batch.startEpoch, + ...logMeta, + peer: prettyPrintPeerIdStr(peer.peerId), + }); } // Preemptively request more blocks from peers whilst we process current blocks - this.triggerBatchDownloader(); + // + // TODO(fulu): why is this second call here. should fall through to the one below the catch block. commenting + // for now and will resolve during PR process + // this.triggerBatchDownloader(); } catch (e) { // bubble the error up to the main async iterable loop this.batchProcessor.end(e as Error); @@ -647,8 +630,9 @@ export function shouldReportPeerOnBatchError( return {action: PeerAction.LowToleranceError, reason: "SyncChainMaxProcessingAttempts"}; // TODO: Should peers be reported for MAX_DOWNLOAD_ATTEMPTS? - case BatchErrorCode.WRONG_STATUS: case BatchErrorCode.MAX_DOWNLOAD_ATTEMPTS: + case BatchErrorCode.INVALID_COUNT: + case BatchErrorCode.WRONG_STATUS: case BatchErrorCode.MAX_EXECUTION_ENGINE_ERROR_ATTEMPTS: return null; } diff --git a/packages/beacon-node/src/sync/range/range.ts b/packages/beacon-node/src/sync/range/range.ts index 45e90f68c5c1..465347f6fe76 100644 --- a/packages/beacon-node/src/sync/range/range.ts +++ b/packages/beacon-node/src/sync/range/range.ts @@ -4,13 +4,16 @@ import {computeStartSlotAtEpoch} from "@lodestar/state-transition"; import {Epoch, Status, fulu} from "@lodestar/types"; import {Logger, toRootHex} from "@lodestar/utils"; import {StrictEventEmitter} from "strict-event-emitter-types"; +import {BlockInputSource} from "../../chain/blocks/blockInput/types.js"; +import {isDaOutOfRange} from "../../chain/blocks/blockInput/utils.js"; import {AttestationImportOpt, ImportBlockOpts} from "../../chain/blocks/index.js"; import {IBeaconChain} from "../../chain/index.js"; import {Metrics} from "../../metrics/index.js"; import {INetwork} from "../../network/index.js"; -import {beaconBlocksMaybeBlobsByRange} from "../../network/reqresp/beaconBlocksMaybeBlobsByRange.js"; import {PeerIdStr} from "../../util/peerId.js"; +import {cacheByRangeResponses, downloadByRange} from "../utils/downloadByRange.js"; import {RangeSyncType, getRangeSyncTarget, rangeSyncTypes} from "../utils/remoteSyncType.js"; +import {BatchStateAwaitingDownload} from "./batch.js"; import {ChainTarget, SyncChain, SyncChainDebugState, SyncChainFns} from "./chain.js"; import {updateChains} from "./utils/index.js"; @@ -199,24 +202,18 @@ export class RangeSync extends (EventEmitter as {new (): RangeSyncEmitter}) { } }; - /** Convenience method for `SyncChain` */ - private downloadBeaconBlocksByRange: SyncChainFns["downloadBeaconBlocksByRange"] = async ( - peer, - request, - partialDownload, - syncType: RangeSyncType - ) => { - return beaconBlocksMaybeBlobsByRange( - this.config, - this.network, - peer, - request, - this.chain.clock.currentEpoch, - partialDownload, - syncType, - this.metrics, - this.logger - ); + private downloadByRange: SyncChainFns["downloadByRange"] = async (peer, batch, _syncType) => { + const byRangeResponse = await downloadByRange({ + config: this.config, + network: this.network, + logger: this.logger, + peerIdStr: peer.peerId, + daOutOfRange: isDaOutOfRange(this.config, batch.forkName, batch.startSlot, this.chain.clock.currentEpoch), + ...batch.requests, + }); + const existingBlocks = batch.getBlocks(); + const cached = cacheByRangeResponses(peer.peerId, byRangeResponse, existingBlocks, this.chain.seenBlockInputCache); + return cached; }; /** Convenience method for `SyncChain` */ @@ -247,7 +244,7 @@ export class RangeSync extends (EventEmitter as {new (): RangeSyncEmitter}) { syncType, { processChainSegment: this.processChainSegment, - downloadBeaconBlocksByRange: this.downloadBeaconBlocksByRange, + downloadByRange: this.downloadByRange, reportPeer: this.reportPeer, getConnectedPeerSyncMeta: this.getConnectedPeerSyncMeta, onEnd: this.onSyncChainEnd, From 6470907857101a0eb2d31f8b8941280794ea1027 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Sat, 16 Aug 2025 07:02:16 +0700 Subject: [PATCH 019/173] feat: add throwOnDuplicateAdd to IBlockInput.addBlock --- .../src/chain/blocks/blockInput/blockInput.ts | 70 +++++++++++-------- .../src/chain/blocks/blockInput/types.ts | 2 +- 2 files changed, 41 insertions(+), 31 deletions(-) diff --git a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts index 907a0939e6ee..9bcea3e154ce 100644 --- a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts +++ b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts @@ -222,14 +222,16 @@ export class BlockInputPreData extends AbstractBlockInput { return new BlockInputPreData(init, state); } - addBlock(_: AddBlock): void { - throw new BlockInputError( - { - code: BlockInputErrorCode.INVALID_CONSTRUCTION, - blockRoot: this.blockRootHex, - }, - "Cannot addBlock to BlockInputPreData" - ); + addBlock(_: AddBlock, opts = {throwOnDuplicateAdd: true}): void { + if (opts.throwOnDuplicateAdd) { + throw new BlockInputError( + { + code: BlockInputErrorCode.INVALID_CONSTRUCTION, + blockRoot: this.blockRootHex, + }, + "Cannot addBlock to BlockInputPreData" + ); + } } } @@ -335,17 +337,7 @@ export class BlockInputBlobs extends AbstractBlockInput): void { - if (this.state.hasBlock) { - throw new BlockInputError( - { - code: BlockInputErrorCode.INVALID_CONSTRUCTION, - blockRoot: this.blockRootHex, - }, - "Cannot addBlock to BlockInputBlobs after it already has a block" - ); - } - + addBlock({blockRootHex, block, source}: AddBlock, opts = {throwOnDuplicateAdd: true}): void { // this check suffices for checking slot, parentRoot, and forkName if (blockRootHex !== this.blockRootHex) { throw new BlockInputError( @@ -360,6 +352,20 @@ export class BlockInputBlobs extends AbstractBlockInput): void { - if (this.state.hasBlock) { - throw new BlockInputError( - { - code: BlockInputErrorCode.INVALID_CONSTRUCTION, - blockRoot: this.blockRootHex, - }, - "Cannot addBlock to BlockInputColumns after it already has a block" - ); - } - + addBlock(props: AddBlock, opts = {throwOnDuplicateAdd: true}): void { if (props.blockRootHex !== this.blockRootHex) { throw new BlockInputError( { @@ -680,6 +676,20 @@ export class BlockInputColumns extends AbstractBlockInput): void; + addBlock(props: AddBlock, opts?: {throwOnDuplicateAdd: boolean}): void; /** Whether the block has been seen and validated. If true, `getBlock` is guaranteed to not throw */ hasBlock(): boolean; getBlock(): SignedBeaconBlock; From 32adcd2f02a2ab6a3f176dcf8ae081ec7ff7ad69 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Sat, 16 Aug 2025 07:36:49 +0700 Subject: [PATCH 020/173] feat: add throwOnDuplicate option for blobs and columns --- .../src/chain/blocks/blockInput/blockInput.ts | 63 ++++++++++++++----- 1 file changed, 47 insertions(+), 16 deletions(-) diff --git a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts index 9bcea3e154ce..0be730804cb2 100644 --- a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts +++ b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts @@ -395,7 +395,27 @@ export class BlockInputBlobs extends AbstractBlockInput Date: Sat, 16 Aug 2025 07:38:02 +0700 Subject: [PATCH 021/173] feat: rough out byRange caching --- packages/beacon-node/src/sync/range/range.ts | 14 +- .../src/sync/utils/downloadByRange.ts | 163 +++++++++++++++++- 2 files changed, 165 insertions(+), 12 deletions(-) diff --git a/packages/beacon-node/src/sync/range/range.ts b/packages/beacon-node/src/sync/range/range.ts index 465347f6fe76..e4640e5b9f08 100644 --- a/packages/beacon-node/src/sync/range/range.ts +++ b/packages/beacon-node/src/sync/range/range.ts @@ -202,8 +202,8 @@ export class RangeSync extends (EventEmitter as {new (): RangeSyncEmitter}) { } }; - private downloadByRange: SyncChainFns["downloadByRange"] = async (peer, batch, _syncType) => { - const byRangeResponse = await downloadByRange({ + private downloadByRange: SyncChainFns["downloadByRange"] = async (peer, batch, syncType) => { + const responses = await downloadByRange({ config: this.config, network: this.network, logger: this.logger, @@ -211,8 +211,14 @@ export class RangeSync extends (EventEmitter as {new (): RangeSyncEmitter}) { daOutOfRange: isDaOutOfRange(this.config, batch.forkName, batch.startSlot, this.chain.clock.currentEpoch), ...batch.requests, }); - const existingBlocks = batch.getBlocks(); - const cached = cacheByRangeResponses(peer.peerId, byRangeResponse, existingBlocks, this.chain.seenBlockInputCache); + const cached = cacheByRangeResponses({ + config: this.config, + cache: this.chain.seenBlockInputCache, + syncType, + peerIdStr: peer.peerId, + responses, + batchBlocks: batch.getBlocks(), + }); return cached; }; diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index 757976af8d18..c1ab505b7dd3 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -2,12 +2,19 @@ import {ChainForkConfig} from "@lodestar/config"; import {ForkPostDeneb, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; import {DataAvailabilityStatus} from "@lodestar/state-transition"; import {RootHex, SignedBeaconBlock, Slot, deneb, fulu, phase0} from "@lodestar/types"; -import {LodestarError, Logger, prettyBytes, prettyPrintIndices} from "@lodestar/utils"; -import {BlockInputSource, IBlockInput} from "../../chain/blocks/blockInput/index.js"; +import {LodestarError, Logger, prettyBytes, prettyPrintIndices, toRootHex} from "@lodestar/utils"; +import { + BlockInputSource, + DAType, + IBlockInput, + isBlockInputBlobs, + isBlockInputColumns, +} from "../../chain/blocks/blockInput/index.js"; import {SeenBlockInput} from "../../chain/seenCache/seenGossipBlockInput.js"; import {INetwork, prettyPrintPeerIdStr} from "../../network/index.js"; import {linspace} from "../../util/numpy.js"; import {PeerIdStr} from "../../util/peerId.js"; +import {RangeSyncType} from "./remoteSyncType.js"; export type DownloadByRangeRequests = { blocksRequest: phase0.BeaconBlocksByRangeRequest; @@ -27,7 +34,7 @@ export type DownloadAndCacheByRangeProps = DownloadByRangeRequests & { network: INetwork; logger: Logger; peerIdStr: string; - dataAvailabilityStatus: DataAvailabilityStatus; + daOutOfRange: boolean; }; export type DownloadAndCacheByRangeResults = { @@ -37,6 +44,138 @@ export type DownloadAndCacheByRangeResults = { numberOfColumns: number; }; +export type CacheByRangeResponsesProps = { + config: ChainForkConfig; + cache: SeenBlockInput; + syncType: RangeSyncType; + peerIdStr: PeerIdStr; + responses: DownloadByRangeResponses; + batchBlocks: IBlockInput[]; +}; + +export async function cacheByRangeResponses({ + config, + cache, + syncType, + peerIdStr, + responses, + batchBlocks, +}: CacheByRangeResponsesProps): IBlockInput[] { + const source = BlockInputSource.byRange; + const seenTimestampSec = Date.now() / 1000; + const updatedBatchBlocks = [...batchBlocks]; + + for (const block of responses.blocks ?? []) { + const existing = updatedBatchBlocks.find((b) => b.slot === block.message.slot); + if (existing) { + const blockRoot = config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message); + const blockRootHex = toRootHex(blockRoot); + // will throw if root hex does not match (meaning we are following the wrong chain) + existing.addBlock( + { + block, + blockRootHex, + source: { + source, + peerIdStr, + seenTimestampSec, + }, + }, + {throwOnDuplicateAdd: false} + ); + } else { + updatedBatchBlocks.push( + cache.getByBlock({ + block, + source, + peerIdStr, + seenTimestampSec, + }) + ); + } + } + + for (const blobSidecar of responses.blobSidecars ?? []) { + const existing = updatedBatchBlocks.find((b) => b.slot === blobSidecar.signedBlockHeader.message.slot); + if (existing) { + const blockRoot = config + .getForkTypes(existing.slot) + .BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message); + const blockRootHex = toRootHex(blockRoot); + if (!isBlockInputBlobs(existing)) { + throw new DownloadByRangeError({ + code: DownloadByRangeErrorCode.MISMATCH_BLOCK_INPUT_TYPE, + cachedType: existing.type, + expectedType: DAType.Blobs, + slot: existing.slot, + blockRoot: prettyBytes(existing.blockRootHex), + }); + } + // will throw if root hex does not match (meaning we are following the wrong chain) + existing.addBlob( + { + blobSidecar, + blockRootHex, + seenTimestampSec, + peerIdStr, + source, + }, + {throwOnDuplicateAdd: false} + ); + } else { + updatedBatchBlocks.push( + cache.getByBlob({ + blobSidecar, + source, + peerIdStr, + seenTimestampSec, + }) + ); + } + } + + for (const columnSidecar of responses.columnSidecars ?? []) { + const existing = updatedBatchBlocks.find((b) => b.slot === columnSidecar.signedBlockHeader.message.slot); + if (existing) { + const blockRoot = config + .getForkTypes(existing.slot) + .BeaconBlockHeader.hashTreeRoot(columnSidecar.signedBlockHeader.message); + const blockRootHex = toRootHex(blockRoot); + if (!isBlockInputColumns(existing)) { + throw new DownloadByRangeError({ + code: DownloadByRangeErrorCode.MISMATCH_BLOCK_INPUT_TYPE, + cachedType: existing.type, + expectedType: DAType.Columns, + slot: existing.slot, + blockRoot: prettyBytes(existing.blockRootHex), + }); + } + // will throw if root hex does not match (meaning we are following the wrong chain) + existing.addColumn( + { + columnSidecar, + blockRootHex, + seenTimestampSec, + peerIdStr, + source, + }, + {throwOnDuplicateAdd: false} + ); + } else { + updatedBatchBlocks.push( + cache.getByBlob({ + columnSidecar, + source, + peerIdStr, + seenTimestampSec, + }) + ); + } + } + + return updatedBatchBlocks; +} + export async function downloadAndCacheByRange( request: DownloadAndCacheByRangeProps ): Promise { @@ -158,14 +297,14 @@ export async function downloadByRange({ network, logger, peerIdStr, - dataAvailabilityStatus, + daOutOfRange, blocksRequest, blobsRequest, columnsRequest, }: Omit): Promise { const slotRangeString = validateRequests({ config, - dataAvailabilityStatus, + daOutOfRange, blocksRequest, blobsRequest, columnsRequest, @@ -206,7 +345,7 @@ export async function downloadByRange({ */ export function validateRequests({ config, - dataAvailabilityStatus, + daOutOfRange, blocksRequest, blobsRequest, columnsRequest, @@ -223,14 +362,14 @@ export function validateRequests({ }); } - if (dataAvailabilityStatus !== DataAvailabilityStatus.Available) { + if (daOutOfRange) { if (dataRequest) { throw new DownloadByRangeError( { code: DownloadByRangeErrorCode.INVALID_DATA_REQUEST, slotRange, }, - "Cannot request data if it is not available" + "Cannot request data if it is outside of the availability range" ); } @@ -745,6 +884,7 @@ export enum DownloadByRangeErrorCode { EXTRA_COLUMNS_SOME_SLOTS = "DOWNLOAD_BY_RANGE_ERROR_EXTRA_COLUMNS_SOME_SLOTS", PEER_CUSTODY_FAILURE = "DOWNLOAD_BY_RANGE_ERROR_PEER_CUSTODY_FAILURE", CACHING_ERROR = "DOWNLOAD_BY_RANGE_CACHING_ERROR", + MISMATCH_BLOCK_INPUT_TYPE = "DOWNLOAD_BY_RANGE_MISMATCH_BLOCK_INPUT_TYPE", } export type DownloadByRangeErrorType = @@ -841,6 +981,13 @@ export type DownloadByRangeErrorType = code: DownloadByRangeErrorCode.PEER_CUSTODY_FAILURE; peerId: string; missingColumns: string; + } + | { + code: DownloadByRangeErrorCode.MISMATCH_BLOCK_INPUT_TYPE; + expectedType: DAType; + cachedType: DAType; + slot: Slot; + blockRoot: string; }; export class DownloadByRangeError extends LodestarError {} From dfc11d4766fe6775a59186a19c29f4e6e87115aa Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Mon, 18 Aug 2025 21:08:58 +0700 Subject: [PATCH 022/173] refactor: remove beaconBlockMaybeBlobsByRange --- .../reqresp/beaconBlocksMaybeBlobsByRange.ts | 507 ------------------ 1 file changed, 507 deletions(-) delete mode 100644 packages/beacon-node/src/network/reqresp/beaconBlocksMaybeBlobsByRange.ts diff --git a/packages/beacon-node/src/network/reqresp/beaconBlocksMaybeBlobsByRange.ts b/packages/beacon-node/src/network/reqresp/beaconBlocksMaybeBlobsByRange.ts deleted file mode 100644 index 5497f4500541..000000000000 --- a/packages/beacon-node/src/network/reqresp/beaconBlocksMaybeBlobsByRange.ts +++ /dev/null @@ -1,507 +0,0 @@ -import {ChainForkConfig} from "@lodestar/config"; -import {ForkName, ForkSeq} from "@lodestar/params"; -import {computeEpochAtSlot} from "@lodestar/state-transition"; -import { - ColumnIndex, - Epoch, - SignedBeaconBlock, - Slot, - WithOptionalBytes, - deneb, - fulu, - phase0, - ssz, -} from "@lodestar/types"; -import {Logger} from "@lodestar/utils"; -import { - BlobsSource, - BlockInput, - BlockInputBlobs, - BlockInputDataColumns, - BlockInputType, - BlockSource, - CachedData, - CachedDataColumns, - DataColumnsSource, - getBlockInput, - getBlockInputDataColumns, -} from "../../chain/blocks/types.js"; -import {getEmptyBlockInputCacheEntry} from "../../chain/seenCache/seenGossipBlockInput.js"; -import {Metrics} from "../../metrics/index.js"; -import {RangeSyncType} from "../../sync/utils/remoteSyncType.js"; -import {PeerIdStr} from "../../util/peerId.js"; -import {INetwork} from "../interface.js"; -import {PeerSyncMeta} from "../peers/peersData.js"; -import {PeerAction} from "../peers/score/interface.js"; - -export type PartialDownload = null | {blocks: BlockInput[]; pendingDataColumns: number[]}; -export const SyncSourceByRoot = "ByRoot" as const; -export type SyncSource = RangeSyncType | typeof SyncSourceByRoot; - -/** - * Download blocks and blobs (prefulu) or data columns (fulu) by range. - * returns: - * - array of blocks with blobs or data columns - * - pendingDataColumns: null if all data columns are present, or array of column indexes that are missing. Also null for prefulu - */ -export async function beaconBlocksMaybeBlobsByRange( - config: ChainForkConfig, - network: INetwork, - peer: PeerSyncMeta, - request: phase0.BeaconBlocksByRangeRequest, - currentEpoch: Epoch, - partialDownload: PartialDownload, - syncSource: SyncSource, - metrics: Metrics | null, - logger?: Logger -): Promise<{blocks: BlockInput[]; pendingDataColumns: null | number[]}> { - const {peerId, client: peerClient, custodyGroups: peerColumns, earliestAvailableSlot} = peer; - // Code below assumes the request is in the same epoch - // Range sync satisfies this condition, but double check here for sanity - const {startSlot, count} = request; - if (count < 1) { - throw Error(`Invalid count=${count} in BeaconBlocksByRangeRequest`); - } - const endSlot = startSlot + count - 1; - - const startEpoch = computeEpochAtSlot(startSlot); - const endEpoch = computeEpochAtSlot(endSlot); - if (startEpoch !== endEpoch) { - throw Error( - `BeaconBlocksByRangeRequest must be in the same epoch startEpoch=${startEpoch} != endEpoch=${endEpoch}` - ); - } - - const forkSeq = config.getForkSeq(startSlot); - - // Note: Assumes all blocks in the same epoch - if (forkSeq < ForkSeq.deneb) { - const beaconBlocks = await network.sendBeaconBlocksByRange(peerId, request); - if (beaconBlocks.length === 0) { - throw Error( - `peerId=${peerId} peerClient=${peerClient} returned no blocks for BeaconBlocksByRangeRequest ${JSON.stringify(request)}` - ); - } - - const blocks = beaconBlocks.map((block) => getBlockInput.preData(config, block.data, BlockSource.byRange)); - return {blocks, pendingDataColumns: null}; - } - - // From Deneb - // Only request blobs if they are recent enough - if (startEpoch >= currentEpoch - config.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS) { - if (forkSeq < ForkSeq.fulu) { - const [allBlocks, allBlobSidecars] = await Promise.all([ - network.sendBeaconBlocksByRange(peerId, request), - network.sendBlobSidecarsByRange(peerId, request), - ]); - - if (allBlocks.length === 0) { - throw Error( - `peerId=${peerId} peerClient=${peerClient} returns no blocks allBlobSidecars=${allBlobSidecars.length} for BeaconBlocksByRangeRequest ${JSON.stringify(request)}` - ); - } - - const blocks = matchBlockWithBlobs( - config, - allBlocks, - allBlobSidecars, - endSlot, - BlockSource.byRange, - BlobsSource.byRange, - syncSource - ); - return {blocks, pendingDataColumns: null}; - } - - // From fulu, get columns - const sampledColumns = network.custodyConfig.sampledColumns; - const neededColumns = partialDownload ? partialDownload.pendingDataColumns : sampledColumns; - - // This should never throw. Already checking for this in ChainPeerBalancer when selecting the peer - if ((earliestAvailableSlot ?? 0) > startSlot) { - throw new Error( - `earliestAvailableSlot=${earliestAvailableSlot} not respected for ByRange startSlot=${startSlot}` - ); - } - - // get match - const columns = peerColumns.reduce((acc, elem) => { - if (neededColumns.includes(elem)) { - acc.push(elem); - } - return acc; - }, [] as number[]); - - if (columns.length === 0 && partialDownload !== null) { - // this peer has nothing to offer and should not have been selected for batch download - // throw error? - return partialDownload; - } - - const pendingDataColumns = neededColumns.reduce((acc, elem) => { - if (!columns.includes(elem)) { - acc.push(elem); - } - return acc; - }, [] as number[]); - - const dataColumnRequest = {...request, columns}; - const [allBlocks, allDataColumnSidecars] = await Promise.all([ - // TODO-das: investigate why partialDownload blocks is empty here - partialDownload && partialDownload.blocks.length > 0 - ? partialDownload.blocks.map((blockInput) => ({data: blockInput.block})) - : network.sendBeaconBlocksByRange(peerId, request), - columns.length === 0 ? [] : network.sendDataColumnSidecarsByRange(peerId, dataColumnRequest), - ]); - logger?.debug("ByRange requests", { - beaconBlocksRequest: JSON.stringify(ssz.phase0.BeaconBlocksByRangeRequest.toJson(request)), - dataColumnRequest: JSON.stringify(ssz.fulu.DataColumnSidecarsByRangeRequest.toJson(dataColumnRequest)), - [`allBlocks(${allBlocks.length})`]: allBlocks.map((blk) => blk.data.message.slot).join(" "), - [`allDataColumnSidecars(${allDataColumnSidecars.length})`]: allDataColumnSidecars - .map((dCol) => `${dCol.signedBlockHeader.message.slot}:${dCol.index}`) - .join(" "), - peerColumns: peerColumns.join(" "), - peerId, - peerClient, - prevPartialDownload: !!partialDownload, - }); - - if (allBlocks.length === 0) { - throw Error( - `peerId=${peerId} peerClient=${peerClient} returns no blocks dataColumnSidecars=${allDataColumnSidecars.length} for BeaconBlocksByRangeRequest ${JSON.stringify(request)}` - ); - } - - const blocks = matchBlockWithDataColumns( - network, - peerId, - config, - sampledColumns, - columns, - allBlocks, - allDataColumnSidecars, - endSlot, - BlockSource.byRange, - DataColumnsSource.byRange, - partialDownload, - peerClient, - syncSource, - metrics, - logger - ); - - return {blocks, pendingDataColumns: pendingDataColumns.length > 0 ? pendingDataColumns : null}; - } - - logger?.verbose( - `Download range is out of ${config.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS} epochs, skip Blobs and DataColumnSidecars download`, - { - startEpoch, - startSlot, - endSlot, - currentEpoch, - } - ); - - // Data is out of range, only request blocks - const blocks = await network.sendBeaconBlocksByRange(peerId, request); - if (blocks.length === 0) { - throw Error( - `peerId=${peerId} peerClient=${peerClient} returned no blocks for BeaconBlocksByRangeRequest ${JSON.stringify(request)}` - ); - } - return { - blocks: blocks.map((block) => getBlockInput.outOfRangeData(config, block.data, BlockSource.byRange)), - // null means all data columns are present - pendingDataColumns: null, - }; -} - -// Assumes that the blobs are in the same sequence as blocks, doesn't require block to be sorted -export function matchBlockWithBlobs( - config: ChainForkConfig, - allBlocks: WithOptionalBytes[], - allBlobSidecars: deneb.BlobSidecar[], - endSlot: Slot, - blockSource: BlockSource, - blobsSource: BlobsSource, - syncSource: SyncSource -): BlockInput[] { - const blockInputs: BlockInput[] = []; - let blobSideCarIndex = 0; - let lastMatchedSlot = -1; - - // Match blobSideCar with the block as some blocks would have no blobs and hence - // would be omitted from the response. If there are any inconsitencies in the - // response, the validations during import will reject the block and hence this - // entire segment. - // - // Assuming that the blocks and blobs will come in same sorted order - for (let i = 0; i < allBlocks.length; i++) { - const block = allBlocks[i]; - if (config.getForkSeq(block.data.message.slot) < ForkSeq.deneb) { - blockInputs.push(getBlockInput.preData(config, block.data, blockSource)); - } else { - const blobSidecars: deneb.BlobSidecar[] = []; - - const blockRoot = config.getForkTypes(block.data.message.slot).BeaconBlock.hashTreeRoot(block.data.message); - const matchBlob = (blobSidecar?: deneb.BlobSidecar): boolean => { - if (blobSidecar === undefined) { - return false; - } - - if (syncSource === RangeSyncType.Head || syncSource === SyncSourceByRoot) { - return ( - Buffer.compare( - ssz.phase0.BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message), - blockRoot - ) === 0 - ); - } - - // For finalized range sync, we can just match by slot - return blobSidecar.signedBlockHeader.message.slot === block.data.message.slot; - }; - - while (matchBlob(allBlobSidecars[blobSideCarIndex])) { - blobSidecars.push(allBlobSidecars[blobSideCarIndex]); - lastMatchedSlot = block.data.message.slot; - blobSideCarIndex++; - } - - // Quick inspect how many blobSidecars was expected - const blobKzgCommitmentsLen = (block.data.message.body as deneb.BeaconBlockBody).blobKzgCommitments.length; - if (blobKzgCommitmentsLen !== blobSidecars.length) { - throw Error( - `Missing blobSidecars for blockSlot=${block.data.message.slot} with blobKzgCommitmentsLen=${blobKzgCommitmentsLen} blobSidecars=${blobSidecars.length}` - ); - } - - const blockData = { - fork: config.getForkName(block.data.message.slot), - blobs: blobSidecars, - blobsSource, - } as BlockInputBlobs; - - blockInputs.push(getBlockInput.availableData(config, block.data, blockSource, blockData)); - } - } - - // If there are still unconsumed blobs this means that the response was inconsistent - // and matching was wrong and hence we should throw error - if ( - allBlobSidecars[blobSideCarIndex] !== undefined && - // If there are no blobs, the blobs request can give 1 block outside the requested range - allBlobSidecars[blobSideCarIndex].signedBlockHeader.message.slot <= endSlot - ) { - throw Error( - `Unmatched blobSidecars, blocks=${allBlocks.length}, blobs=${ - allBlobSidecars.length - } lastMatchedSlot=${lastMatchedSlot}, pending blobSidecars slots=${allBlobSidecars - .slice(blobSideCarIndex) - .map((blb) => blb.signedBlockHeader.message.slot) - .join(" ")}` - ); - } - return blockInputs; -} - -export function matchBlockWithDataColumns( - network: INetwork, - peerId: PeerIdStr, - config: ChainForkConfig, - sampledColumns: ColumnIndex[], - requestedColumns: number[], - allBlocks: WithOptionalBytes[], - allDataColumnSidecars: fulu.DataColumnSidecar[], - endSlot: Slot, - blockSource: BlockSource, - dataColumnsSource: DataColumnsSource, - prevPartialDownload: null | PartialDownload, - peerClient: string, - syncSource: SyncSource, - metrics: Metrics | null, - logger?: Logger -): BlockInput[] { - const blockInputs: BlockInput[] = []; - let dataColumnSideCarIndex = 0; - let lastMatchedSlot = -1; - const neededColumns = prevPartialDownload?.pendingDataColumns ?? sampledColumns; - const shouldHaveAllData = neededColumns.reduce((acc, elem) => acc && requestedColumns.includes(elem), true); - - // Match dataColumnSideCar with the block as some blocks would have no dataColumns and hence - // would be omitted from the response. If there are any inconsitencies in the - // response, the validations during import will reject the block and hence this - // entire segment. - // - // Assuming that the blocks and blobs will come in same sorted order - for (let i = 0; i < allBlocks.length; i++) { - const block = allBlocks[i]; - - const forkSeq = config.getForkSeq(block.data.message.slot); - if (forkSeq < ForkSeq.fulu) { - throw Error(`Invalid block forkSeq=${forkSeq} < ForSeq.fulu for matchBlockWithDataColumns`); - } - const dataColumnSidecars: fulu.DataColumnSidecar[] = []; - const blockRoot = config.getForkTypes(block.data.message.slot).BeaconBlock.hashTreeRoot(block.data.message); - const matchDataColumnSidecar = (dataColumnSidecar?: fulu.DataColumnSidecar): boolean => { - if (dataColumnSidecar === undefined) { - return false; - } - - if (syncSource === RangeSyncType.Head || syncSource === SyncSourceByRoot) { - return ( - Buffer.compare( - ssz.phase0.BeaconBlockHeader.hashTreeRoot(dataColumnSidecar.signedBlockHeader.message), - blockRoot - ) === 0 - ); - } - - // For finalized range sync, we can just match by slot - return dataColumnSidecar.signedBlockHeader.message.slot === block.data.message.slot; - }; - while (matchDataColumnSidecar(allDataColumnSidecars[dataColumnSideCarIndex])) { - dataColumnSidecars.push(allDataColumnSidecars[dataColumnSideCarIndex]); - lastMatchedSlot = block.data.message.slot; - dataColumnSideCarIndex++; - } - metrics?.dataColumns.bySource.inc({source: DataColumnsSource.byRange}, dataColumnSidecars.length); - - const blobKzgCommitmentsLen = (block.data.message.body as deneb.BeaconBlockBody).blobKzgCommitments.length; - logger?.debug("processing matchBlockWithDataColumns", { - blobKzgCommitmentsLen, - dataColumnSidecars: dataColumnSidecars.length, - shouldHaveAllData, - neededColumns: neededColumns.join(" "), - requestedColumns: requestedColumns.join(" "), - slot: block.data.message.slot, - dataColumnsSlots: dataColumnSidecars.map((dcm) => dcm.signedBlockHeader.message.slot).join(" "), - peerClient, - }); - if (blobKzgCommitmentsLen === 0) { - if (dataColumnSidecars.length > 0) { - // only penalize peer with Finalized range sync or "ByRoot" sync source - if (syncSource !== RangeSyncType.Head) { - network.reportPeer(peerId, PeerAction.LowToleranceError, "Missing or mismatching dataColumnSidecars"); - } - throw Error( - `Missing or mismatching dataColumnSidecars from peerId=${peerId} for blockSlot=${block.data.message.slot} with blobKzgCommitmentsLen=0 dataColumnSidecars=${dataColumnSidecars.length}>0` - ); - } - - const blockData = { - fork: config.getForkName(block.data.message.slot), - dataColumns: [], - dataColumnsBytes: [], - dataColumnsSource, - } as BlockInputDataColumns; - blockInputs.push(getBlockInput.availableData(config, block.data, blockSource, blockData)); - } else { - // Quick inspect how many blobSidecars was expected - const dataColumnIndexes = dataColumnSidecars.map((dataColumnSidecar) => dataColumnSidecar.index); - const requestedColumnsPresent = requestedColumns.reduce( - (acc, columnIndex) => acc && dataColumnIndexes.includes(columnIndex), - true - ); - - logger?.debug("matchBlockWithDataColumns2", { - dataColumnIndexes: dataColumnIndexes.join(" "), - requestedColumnsPresent, - slot: block.data.message.slot, - peerClient, - }); - - if (dataColumnSidecars.length !== requestedColumns.length || !requestedColumnsPresent) { - logger?.debug( - `Missing or mismatching dataColumnSidecars from peerId=${peerId} for blockSlot=${block.data.message.slot} with numColumns=${sampledColumns.length} dataColumnSidecars=${dataColumnSidecars.length} requestedColumnsPresent=${requestedColumnsPresent} received dataColumnIndexes=${dataColumnIndexes.join(" ")} requested=${requestedColumns.join(" ")}`, - { - allBlocks: allBlocks.length, - allDataColumnSidecars: allDataColumnSidecars.length, - peerId, - blobKzgCommitmentsLen, - peerClient, - } - ); - // only penalize peer with Finalized range sync or "ByRoot" sync source - if (syncSource !== RangeSyncType.Head) { - network.reportPeer(peerId, PeerAction.LowToleranceError, "Missing or mismatching dataColumnSidecars"); - } - throw Error( - `Missing or mismatching dataColumnSidecars from peerId=${peerId} for blockSlot=${block.data.message.slot} blobKzgCommitmentsLen=${blobKzgCommitmentsLen} with numColumns=${sampledColumns.length} dataColumnSidecars=${dataColumnSidecars.length} requestedColumnsPresent=${requestedColumnsPresent} received dataColumnIndexes=${dataColumnIndexes.join(" ")} requested=${requestedColumns.join(" ")}` - ); - } - - let cachedData: CachedData; - // TODO-das: investigate why partialDownload blocks is empty here - if (prevPartialDownload !== null && prevPartialDownload.blocks.length > 0) { - const prevBlockInput = prevPartialDownload.blocks[i]; - if (prevBlockInput.type !== BlockInputType.dataPromise) { - throw Error(`prevBlockInput.type=${prevBlockInput.type} in prevPartialDownload`); - } - cachedData = prevBlockInput.cachedData; - } else { - // biome-ignore lint/style/noNonNullAssertion: checked below for validity - cachedData = getEmptyBlockInputCacheEntry(config.getForkName(block.data.message.slot), -1).cachedData!; - if (cachedData === undefined) { - throw Error("Invalid cachedData=undefined from getEmptyBlockInputCacheEntry"); - } - } - - if (cachedData.fork !== ForkName.fulu) { - throw Error("Invalid fork for cachedData on dataColumns"); - } - - for (const dataColumnSidecar of dataColumnSidecars) { - (cachedData as CachedDataColumns).dataColumnsCache.set(dataColumnSidecar.index, { - dataColumn: dataColumnSidecar, - dataColumnBytes: null, - }); - } - - if (shouldHaveAllData) { - const {dataColumns, dataColumnsBytes} = getBlockInputDataColumns( - (cachedData as CachedDataColumns).dataColumnsCache, - sampledColumns - ); - - const blockData = { - fork: config.getForkName(block.data.message.slot), - dataColumns, - dataColumnsBytes, - dataColumnsSource, - } as BlockInputDataColumns; - - // TODO DENEB: instead of null, pass payload in bytes - blockInputs.push(getBlockInput.availableData(config, block.data, blockSource, blockData)); - } else { - blockInputs.push(getBlockInput.dataPromise(config, block.data, blockSource, cachedData)); - } - } - } - - // for head sync, there could be unconsumed data column sidecars because the retried peers may have higher head - if ( - allDataColumnSidecars[dataColumnSideCarIndex] !== undefined && - // If there are no data columns, the data columns request can give 1 block outside the requested range - allDataColumnSidecars[dataColumnSideCarIndex].signedBlockHeader.message.slot <= endSlot && - // only penalize peer with Finalized range sync or "ByRoot" sync source - syncSource !== RangeSyncType.Head - ) { - network.reportPeer(peerId, PeerAction.LowToleranceError, "Unmatched dataColumnSidecars"); - throw Error( - `Unmatched dataColumnSidecars, blocks=${allBlocks.length}, blobs=${ - allDataColumnSidecars.length - } lastMatchedSlot=${lastMatchedSlot}, pending dataColumnSidecars slots=${allDataColumnSidecars - .slice(dataColumnSideCarIndex) - .map((blb) => blb.signedBlockHeader.message.slot) - .join(" ")} endSlot=${endSlot}, peerId=${peerId}, peerClient=${peerClient}` - ); - } - logger?.debug("matched BlockWithDataColumns", { - peerClient, - blockInputs: blockInputs.map((bInpt) => `${bInpt.block.message.slot}=${bInpt.type}`).join(" "), - }); - return blockInputs; -} From aed92ef188d50f5b7fd5f67d0ca70324c664e53d Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Mon, 18 Aug 2025 22:33:24 +0700 Subject: [PATCH 023/173] refactor: remove beaconBlockMaybeBlobsByRoot --- .../reqresp/beaconBlocksMaybeBlobsByRoot.ts | 680 ------------------ .../beacon-node/src/network/reqresp/index.ts | 2 - 2 files changed, 682 deletions(-) delete mode 100644 packages/beacon-node/src/network/reqresp/beaconBlocksMaybeBlobsByRoot.ts diff --git a/packages/beacon-node/src/network/reqresp/beaconBlocksMaybeBlobsByRoot.ts b/packages/beacon-node/src/network/reqresp/beaconBlocksMaybeBlobsByRoot.ts deleted file mode 100644 index f24febac8c7e..000000000000 --- a/packages/beacon-node/src/network/reqresp/beaconBlocksMaybeBlobsByRoot.ts +++ /dev/null @@ -1,680 +0,0 @@ -import {toHexString} from "@chainsafe/ssz"; -import {routes} from "@lodestar/api"; -import {ChainForkConfig} from "@lodestar/config"; -import {ForkName, ForkSeq} from "@lodestar/params"; -import {signedBlockToSignedHeader} from "@lodestar/state-transition"; -import {RootHex, SignedBeaconBlock, deneb, fulu, phase0} from "@lodestar/types"; -import {BlobAndProof} from "@lodestar/types/deneb"; -import {Logger, fromHex, toHex} from "@lodestar/utils"; -import { - BlobsSource, - BlockInput, - BlockInputBlobs, - BlockInputDataColumns, - BlockInputType, - BlockSource, - CachedBlobs, - CachedDataColumns, - DataColumnsSource, - NullBlockInput, - getBlockInput, - getBlockInputBlobs, - getBlockInputDataColumns, -} from "../../chain/blocks/types.js"; -import {ChainEventEmitter} from "../../chain/emitter.js"; -import {BlockInputAvailabilitySource} from "../../chain/seenCache/seenGossipBlockInput.js"; -import {IExecutionEngine} from "../../execution/index.js"; -import {Metrics} from "../../metrics/index.js"; -import {computeInclusionProof, kzgCommitmentToVersionedHash} from "../../util/blobs.js"; -import {getDataColumnsFromExecution} from "../../util/dataColumns.js"; -import {PeerIdStr} from "../../util/peerId.js"; -import {INetwork} from "../interface.js"; -import { - PartialDownload, - SyncSourceByRoot, - matchBlockWithBlobs, - matchBlockWithDataColumns, -} from "./beaconBlocksMaybeBlobsByRange.js"; - -// keep 1 epoch of stuff, assmume 16 blobs -const MAX_ENGINE_GETBLOBS_CACHE = 32 * 16; -const MAX_UNAVAILABLE_RETRY_CACHE = 32; - -export async function beaconBlocksMaybeBlobsByRoot( - config: ChainForkConfig, - network: INetwork, - peerId: PeerIdStr, - request: phase0.BeaconBlocksByRootRequest, - partialDownload: null | PartialDownload, - peerClient: string, - metrics: Metrics | null, - logger?: Logger -): Promise<{blocks: BlockInput[]; pendingDataColumns: null | number[]}> { - // console.log("beaconBlocksMaybeBlobsByRoot", request); - const allBlocks = partialDownload - ? partialDownload.blocks.map((blockInput) => ({data: blockInput.block})) - : await network.sendBeaconBlocksByRoot(peerId, request); - - logger?.debug("beaconBlocksMaybeBlobsByRoot response", {allBlocks: allBlocks.length, peerClient}); - - const preDataBlocks = []; - const blobsDataBlocks = []; - const dataColumnsDataBlocks = []; - - const sampledColumns = network.custodyConfig.sampledColumns; - const neededColumns = partialDownload ? partialDownload.pendingDataColumns : sampledColumns; - const {custodyGroups: peerColumns} = network.getConnectedPeerSyncMeta(peerId); - - // get match - const columns = peerColumns.reduce((acc, elem) => { - if (neededColumns.includes(elem)) { - acc.push(elem); - } - return acc; - }, [] as number[]); - let pendingDataColumns = null; - - const blobIdentifiers: deneb.BlobIdentifier[] = []; - const dataColumnsByRootIdentifiers: fulu.DataColumnsByRootIdentifier[] = []; - - let prevFork = null; - for (const block of allBlocks) { - const slot = block.data.message.slot; - const blockRoot = config.getForkTypes(slot).BeaconBlock.hashTreeRoot(block.data.message); - const fork = config.getForkName(slot); - if (fork !== (prevFork ?? fork)) { - throw Error("beaconBlocksMaybeBlobsByRoot only accepts requests of same fork"); - } - prevFork = fork; - - if (ForkSeq[fork] < ForkSeq.deneb) { - preDataBlocks.push(block); - } else if (fork === ForkName.deneb || fork === ForkName.electra) { - blobsDataBlocks.push(block); - const blobKzgCommitmentsLen = (block.data.message.body as deneb.BeaconBlockBody).blobKzgCommitments.length; - logger?.debug("beaconBlocksMaybeBlobsByRoot", {blobKzgCommitmentsLen, peerClient}); - for (let index = 0; index < blobKzgCommitmentsLen; index++) { - // try see if the blob is available locally - blobIdentifiers.push({blockRoot, index}); - } - } else if (fork === ForkName.fulu) { - dataColumnsDataBlocks.push(block); - const blobKzgCommitmentsLen = (block.data.message.body as deneb.BeaconBlockBody).blobKzgCommitments.length; - const custodyColumnIndexes = blobKzgCommitmentsLen > 0 ? columns : []; - if (custodyColumnIndexes.length > 0) { - dataColumnsByRootIdentifiers.push({ - blockRoot, - columns: custodyColumnIndexes, - }); - } - } else { - throw Error(`Invalid fork=${fork} in beaconBlocksMaybeBlobsByRoot`); - } - } - - let blockInputs = preDataBlocks.map((block) => getBlockInput.preData(config, block.data, BlockSource.byRoot)); - - if (blobsDataBlocks.length > 0) { - let allBlobSidecars: deneb.BlobSidecar[]; - if (blobIdentifiers.length > 0) { - allBlobSidecars = await network.sendBlobSidecarsByRoot(peerId, blobIdentifiers); - } else { - allBlobSidecars = []; - } - - // The last arg is to provide slot to which all blobs should be exausted in matching - // and here it should be infinity since all bobs should match - const blockInputWithBlobs = matchBlockWithBlobs( - config, - allBlocks, - allBlobSidecars, - Infinity, - BlockSource.byRoot, - BlobsSource.byRoot, - SyncSourceByRoot - ); - blockInputs = [...blockInputs, ...blockInputWithBlobs]; - } - - if (dataColumnsDataBlocks.length > 0) { - pendingDataColumns = neededColumns.reduce((acc, elem) => { - if (!columns.includes(elem)) { - acc.push(elem); - } - return acc; - }, [] as number[]); - - let allDataColumnsSidecars: fulu.DataColumnSidecar[]; - logger?.debug("allDataColumnsSidecars partialDownload", { - ...(partialDownload - ? {blocks: partialDownload.blocks.length, pendingDataColumns: partialDownload.pendingDataColumns.join(" ")} - : {blocks: null, pendingDataColumns: null}), - dataColumnIdentifiers: dataColumnsByRootIdentifiers - .map((id) => `${id.blockRoot}: ${id.columns.join(" ")}`) - .join(" "), - peerClient, - }); - if (dataColumnsByRootIdentifiers.length > 0) { - allDataColumnsSidecars = await network.sendDataColumnSidecarsByRoot(peerId, dataColumnsByRootIdentifiers); - } else { - if (partialDownload !== null) { - return partialDownload; - } - allDataColumnsSidecars = []; - } - - // The last arg is to provide slot to which all blobs should be exausted in matching - // and here it should be infinity since all bobs should match - // TODO: should not call matchBlockWithDataColumns() because it's supposed for range sync - // in that function, peers should return all requested data columns, this function runs at gossip time - // and it should not expect that - const blockInputWithBlobs = matchBlockWithDataColumns( - network, - peerId, - config, - sampledColumns, - columns, - allBlocks, - allDataColumnsSidecars, - Infinity, - BlockSource.byRoot, - DataColumnsSource.byRoot, - partialDownload, - peerClient, - SyncSourceByRoot, - metrics, - logger - ); - blockInputs = [...blockInputs, ...blockInputWithBlobs]; - } - - return { - blocks: blockInputs, - pendingDataColumns: pendingDataColumns && pendingDataColumns.length > 0 ? pendingDataColumns : null, - }; -} - -export async function unavailableBeaconBlobsByRoot( - config: ChainForkConfig, - network: INetwork, - peerId: PeerIdStr, - peerClient: string, - unavailableBlockInput: BlockInput | NullBlockInput, - opts: { - logger?: Logger; - metrics?: Metrics | null; - executionEngine: IExecutionEngine; - emitter: ChainEventEmitter; - engineGetBlobsCache?: Map; - blockInputsRetryTrackerCache?: Set; - } -): Promise { - if (unavailableBlockInput.block !== null && unavailableBlockInput.type !== BlockInputType.dataPromise) { - return unavailableBlockInput; - } - - // resolve the block if thats unavailable - let block: SignedBeaconBlock, cachedData: NullBlockInput["cachedData"]; - if (unavailableBlockInput.block === null) { - const allBlocks = await network.sendBeaconBlocksByRoot(peerId, [fromHex(unavailableBlockInput.blockRootHex)]); - block = allBlocks[0].data; - cachedData = unavailableBlockInput.cachedData; - unavailableBlockInput = getBlockInput.dataPromise(config, block, BlockSource.byRoot, cachedData); - // console.log( - // "downloaded sendBeaconBlocksByRoot", - // ssz.fulu.SignedBeaconBlock.toJson(block as fulu.SignedBeaconBlock) - // ); - } else { - ({block, cachedData} = unavailableBlockInput); - } - - const forkSeq = config.getForkSeq(block.message.slot); - - if (forkSeq < ForkSeq.fulu) { - return unavailableBeaconBlobsByRootPreFulu( - config, - network, - peerId, - unavailableBlockInput, - block, - cachedData as CachedBlobs, - opts - ); - } - - return unavailableBeaconBlobsByRootPostFulu( - config, - network, - peerId, - peerClient, - unavailableBlockInput, - block, - cachedData, - { - metrics: opts.metrics, - executionEngine: opts.executionEngine, - emitter: opts.emitter, - logger: opts.logger, - } - ); -} - -export async function unavailableBeaconBlobsByRootPreFulu( - config: ChainForkConfig, - network: INetwork, - peerId: PeerIdStr, - unavailableBlockInput: BlockInput | NullBlockInput, - block: SignedBeaconBlock, - cachedData: CachedBlobs, - opts: { - metrics?: Metrics | null; - emitter: ChainEventEmitter; - executionEngine: IExecutionEngine; - engineGetBlobsCache?: Map; - blockInputsRetryTrackerCache?: Set; - } -): Promise { - const {executionEngine, metrics, emitter, engineGetBlobsCache, blockInputsRetryTrackerCache} = opts; - if (unavailableBlockInput.block !== null && unavailableBlockInput.type !== BlockInputType.dataPromise) { - return unavailableBlockInput; - } - - // resolve missing blobs - const slot = block.message.slot; - const fork = config.getForkName(slot); - const blockRoot = config.getForkTypes(slot).BeaconBlock.hashTreeRoot(block.message); - const blockRootHex = toHexString(blockRoot); - - const blockTriedBefore = blockInputsRetryTrackerCache?.has(blockRootHex) === true; - if (blockTriedBefore) { - metrics?.blockInputFetchStats.totalDataPromiseBlockInputsReTriedBlobsPull.inc(); - } else { - metrics?.blockInputFetchStats.totalDataPromiseBlockInputsTriedBlobsPull.inc(); - blockInputsRetryTrackerCache?.add(blockRootHex); - } - - const blobKzgCommitmentsLen = (block.message.body as deneb.BeaconBlockBody).blobKzgCommitments.length; - const signedBlockHeader = signedBlockToSignedHeader(config, block); - - const engineReqIdentifiers: (deneb.BlobIdentifier & { - kzgCommitment: deneb.KZGCommitment; - versionedHash: Uint8Array; - })[] = []; - const networkReqIdentifiers: deneb.BlobIdentifier[] = []; - - let getBlobsUseful = false; - for (let index = 0; index < blobKzgCommitmentsLen; index++) { - if (cachedData.blobsCache.has(index) === false) { - const kzgCommitment = (block.message.body as deneb.BeaconBlockBody).blobKzgCommitments[index]; - const versionedHash = kzgCommitmentToVersionedHash(kzgCommitment); - - // check if the getblobs cache has the data if block not been queried before - if (engineGetBlobsCache?.has(toHexString(versionedHash)) === true && !blockTriedBefore) { - const catchedBlobAndProof = engineGetBlobsCache.get(toHexString(versionedHash)) ?? null; - if (catchedBlobAndProof === null) { - metrics?.blockInputFetchStats.dataPromiseBlobsFoundInGetBlobsCacheNull.inc(); - networkReqIdentifiers.push({blockRoot, index}); - } else { - metrics?.blockInputFetchStats.dataPromiseBlobsFoundInGetBlobsCacheNotNull.inc(); - // compute TODO: also add inclusion proof cache - const {blob, proof: kzgProof} = catchedBlobAndProof; - const kzgCommitmentInclusionProof = computeInclusionProof(fork, block.message.body, index); - const blobSidecar = {index, blob, kzgCommitment, kzgProof, signedBlockHeader, kzgCommitmentInclusionProof}; - cachedData.blobsCache.set(blobSidecar.index, blobSidecar); - } - } else if (blockTriedBefore) { - // only retry it from network - networkReqIdentifiers.push({blockRoot, index}); - } else { - // see if we can pull from EL - metrics?.blockInputFetchStats.dataPromiseBlobsNotAvailableInGetBlobsCache.inc(); - engineReqIdentifiers.push({blockRoot, index, versionedHash, kzgCommitment}); - } - } else { - metrics?.blockInputFetchStats.dataPromiseBlobsAlreadyAvailable.inc(); - } - } - - if (engineReqIdentifiers.length > 0) { - metrics?.blockInputFetchStats.totalDataPromiseBlockInputsTriedGetBlobs.inc(); - } - const versionedHashes = engineReqIdentifiers.map((bi) => bi.versionedHash); - metrics?.blockInputFetchStats.dataPromiseBlobsEngineGetBlobsApiRequests.inc(versionedHashes.length); - - const blobAndProofs = await executionEngine.getBlobs(ForkName.deneb, versionedHashes).catch((_e) => { - metrics?.blockInputFetchStats.dataPromiseBlobsEngineApiGetBlobsErroredNull.inc(versionedHashes.length); - return versionedHashes.map((_vh) => null); - }); - - for (let j = 0; j < versionedHashes.length; j++) { - const blobAndProof = blobAndProofs[j] ?? null; - const versionedHash = versionedHashes[j]; - // save to cache for future reference - engineGetBlobsCache?.set(toHexString(versionedHash), blobAndProof); - if (blobAndProof !== null) { - metrics?.blockInputFetchStats.dataPromiseBlobsEngineGetBlobsApiNotNull.inc(); - - // if we already got it by now, save the compute - if (cachedData.blobsCache.has(engineReqIdentifiers[j].index) === false) { - metrics?.blockInputFetchStats.dataPromiseBlobsEngineApiGetBlobsUseful.inc(); - getBlobsUseful = true; - const {blob, proof: kzgProof} = blobAndProof; - const {kzgCommitment, index} = engineReqIdentifiers[j]; - const kzgCommitmentInclusionProof = computeInclusionProof(fork, block.message.body, index); - const blobSidecar = {index, blob, kzgCommitment, kzgProof, signedBlockHeader, kzgCommitmentInclusionProof}; - // add them in cache so that its reflected in all the blockInputs that carry this - // for e.g. a blockInput that might be awaiting blobs promise fullfillment in - // verifyBlocksDataAvailability - cachedData.blobsCache.set(blobSidecar.index, blobSidecar); - - if (emitter.listenerCount(routes.events.EventType.blobSidecar)) { - emitter.emit(routes.events.EventType.blobSidecar, { - blockRoot: blockRootHex, - slot, - index, - kzgCommitment: toHex(kzgCommitment), - versionedHash: toHex(versionedHash), - }); - } - } else { - metrics?.blockInputFetchStats.dataPromiseBlobsDelayedGossipAvailable.inc(); - metrics?.blockInputFetchStats.dataPromiseBlobsDelayedGossipAvailableSavedGetBlobsCompute.inc(); - } - } - // may be blobsidecar arrived in the timespan of making the request - else { - metrics?.blockInputFetchStats.dataPromiseBlobsEngineGetBlobsApiNull.inc(); - if (cachedData.blobsCache.has(engineReqIdentifiers[j].index) === false) { - const {blockRoot, index} = engineReqIdentifiers[j]; - networkReqIdentifiers.push({blockRoot, index}); - } else { - metrics?.blockInputFetchStats.dataPromiseBlobsDelayedGossipAvailable.inc(); - } - } - } - - if (engineGetBlobsCache !== undefined) { - // prune out engineGetBlobsCache - let pruneLength = Math.max(0, engineGetBlobsCache?.size - MAX_ENGINE_GETBLOBS_CACHE); - for (const key of engineGetBlobsCache.keys()) { - if (pruneLength <= 0) break; - engineGetBlobsCache.delete(key); - pruneLength--; - metrics?.blockInputFetchStats.getBlobsCachePruned.inc(); - } - metrics?.blockInputFetchStats.getBlobsCacheSize.set(engineGetBlobsCache.size); - } - if (blockInputsRetryTrackerCache !== undefined) { - // prune out engineGetBlobsCache - let pruneLength = Math.max(0, blockInputsRetryTrackerCache?.size - MAX_UNAVAILABLE_RETRY_CACHE); - for (const key of blockInputsRetryTrackerCache.keys()) { - if (pruneLength <= 0) break; - blockInputsRetryTrackerCache.delete(key); - pruneLength--; - metrics?.blockInputFetchStats.dataPromiseBlockInputRetryTrackerCachePruned.inc(); - } - metrics?.blockInputFetchStats.dataPromiseBlockInputRetryTrackerCacheSize.set(blockInputsRetryTrackerCache.size); - } - - // if clients expect sorted identifiers - networkReqIdentifiers.sort((a, b) => a.index - b.index); - let networkResBlobSidecars: deneb.BlobSidecar[]; - metrics?.blockInputFetchStats.dataPromiseBlobsFinallyQueriedFromNetwork.inc(networkReqIdentifiers.length); - if (blockTriedBefore) { - metrics?.blockInputFetchStats.dataPromiseBlobsRetriedFromNetwork.inc(networkReqIdentifiers.length); - } - - if (networkReqIdentifiers.length > 0) { - networkResBlobSidecars = await network.sendBlobSidecarsByRoot(peerId, networkReqIdentifiers); - metrics?.blockInputFetchStats.dataPromiseBlobsFinallyAvailableFromNetwork.inc(networkResBlobSidecars.length); - if (blockTriedBefore) { - metrics?.blockInputFetchStats.dataPromiseBlobsRetriedAvailableFromNetwork.inc(networkResBlobSidecars.length); - } - } else { - networkResBlobSidecars = []; - } - - // add them in cache so that its reflected in all the blockInputs that carry this - // for e.g. a blockInput that might be awaiting blobs promise fullfillment in - // verifyBlocksDataAvailability - for (const blobSidecar of networkResBlobSidecars) { - cachedData.blobsCache.set(blobSidecar.index, blobSidecar); - - if (emitter.listenerCount(routes.events.EventType.blobSidecar)) { - emitter.emit(routes.events.EventType.blobSidecar, { - blockRoot: blockRootHex, - slot, - index: blobSidecar.index, - kzgCommitment: toHex(blobSidecar.kzgCommitment), - versionedHash: toHex(kzgCommitmentToVersionedHash(blobSidecar.kzgCommitment)), - }); - } - } - - // check and see if all blobs are now available and in that case resolve availability - // if not this will error and the leftover blobs will be tried from another peer - const allBlobs = getBlockInputBlobs(cachedData.blobsCache); - const {blobs} = allBlobs; - if (blobs.length !== blobKzgCommitmentsLen) { - throw Error(`Not all blobs fetched missingBlobs=${blobKzgCommitmentsLen - blobs.length}`); - } - const blockData = {fork: cachedData.fork, ...allBlobs, blobsSource: BlobsSource.byRoot} as BlockInputBlobs; - cachedData.resolveAvailability(blockData); - metrics?.syncUnknownBlock.resolveAvailabilitySource.inc({source: BlockInputAvailabilitySource.UNKNOWN_SYNC}); - - metrics?.blockInputFetchStats.totalDataPromiseBlockInputsResolvedAvailable.inc(); - if (getBlobsUseful) { - metrics?.blockInputFetchStats.totalDataPromiseBlockInputsAvailableUsingGetBlobs.inc(); - if (networkReqIdentifiers.length === 0) { - metrics?.blockInputFetchStats.totalDataPromiseBlockInputsAvailableFromGetBlobs.inc(); - } - } - if (networkResBlobSidecars.length > 0) { - metrics?.blockInputFetchStats.totalDataPromiseBlockInputsFinallyAvailableFromNetworkReqResp.inc(); - } - if (blockTriedBefore) { - metrics?.blockInputFetchStats.totalDataPromiseBlockInputsRetriedAvailableFromNetwork.inc(); - } - - return getBlockInput.availableData(config, block, BlockSource.byRoot, blockData); -} - -/** - * Download more columns for a BlockInput - * - unavailableBlockInput should have block, but not enough blobs (deneb) or data columns (fulu) - * - * This function may return data promise, and consumer should continue with fetching more blobs or columns from other peers - * see UnknownBlockSync.fetchUnavailableBlockInput() - */ -export async function unavailableBeaconBlobsByRootPostFulu( - config: ChainForkConfig, - network: INetwork, - peerId: PeerIdStr, - peerClient: string, - unavailableBlockInput: BlockInput, - block: SignedBeaconBlock, - cachedData: NullBlockInput["cachedData"], - opts: { - metrics?: Metrics | null; - executionEngine: IExecutionEngine; - emitter: ChainEventEmitter; - logger?: Logger; - } -): Promise { - if (unavailableBlockInput.block !== null && unavailableBlockInput.type !== BlockInputType.dataPromise) { - return unavailableBlockInput; - } - - if (cachedData.fork === ForkName.deneb || cachedData.fork === ForkName.electra) { - const {blobsCache, resolveAvailability} = cachedData; - - // resolve missing blobs - const blobIdentifiers: deneb.BlobIdentifier[] = []; - const slot = block.message.slot; - const blockRoot = config.getForkTypes(slot).BeaconBlock.hashTreeRoot(block.message); - - const blobKzgCommitmentsLen = (block.message.body as deneb.BeaconBlockBody).blobKzgCommitments.length; - for (let index = 0; index < blobKzgCommitmentsLen; index++) { - if (blobsCache.has(index) === false) blobIdentifiers.push({blockRoot, index}); - } - - let allBlobSidecars: deneb.BlobSidecar[]; - if (blobIdentifiers.length > 0) { - allBlobSidecars = await network.sendBlobSidecarsByRoot(peerId, blobIdentifiers); - } else { - allBlobSidecars = []; - } - - // add them in cache so that its reflected in all the blockInputs that carry this - // for e.g. a blockInput that might be awaiting blobs promise fullfillment in - // verifyBlocksDataAvailability - for (const blobSidecar of allBlobSidecars) { - blobsCache.set(blobSidecar.index, blobSidecar); - } - - // check and see if all blobs are now available and in that case resolve availability - // if not this will error and the leftover blobs will be tried from another peer - const allBlobs = getBlockInputBlobs(blobsCache); - const {blobs} = allBlobs; - if (blobs.length !== blobKzgCommitmentsLen) { - throw Error(`Not all blobs fetched missingBlobs=${blobKzgCommitmentsLen - blobs.length}`); - } - const blockData = {fork: cachedData.fork, ...allBlobs, blobsSource: BlobsSource.byRoot} as BlockInputBlobs; - resolveAvailability(blockData); - opts.metrics?.syncUnknownBlock.resolveAvailabilitySource.inc({source: BlockInputAvailabilitySource.UNKNOWN_SYNC}); - return getBlockInput.availableData(config, block, BlockSource.byRoot, blockData); - } - - // fulu fork - const {dataColumnsCache, resolveAvailability} = cachedData as CachedDataColumns; - - // resolve missing blobs - const slot = block.message.slot; - const blockRoot = config.getForkTypes(slot).BeaconBlock.hashTreeRoot(block.message); - - const blobKzgCommitments = (block.message.body as deneb.BeaconBlockBody).blobKzgCommitments; - if (blobKzgCommitments.length === 0) { - const blockData = { - fork: cachedData.fork, - dataColumns: [], - dataColumnsBytes: [], - dataColumnsSource: DataColumnsSource.gossip, - } as BlockInputDataColumns; - - resolveAvailability(blockData); - opts.metrics?.syncUnknownBlock.resolveAvailabilitySource.inc({source: BlockInputAvailabilitySource.UNKNOWN_SYNC}); - return getBlockInput.availableData(config, block, BlockSource.byRoot, blockData); - } - - const sampledColumns = network.custodyConfig.sampledColumns; - let neededColumns = sampledColumns.reduce((acc, elem) => { - if (dataColumnsCache.get(elem) === undefined) { - acc.push(elem); - } - return acc; - }, [] as number[]); - - let resolveBlockInput: ((block: BlockInput) => void) | null = null; - const blockInputPromise = new Promise((resolveCB) => { - resolveBlockInput = resolveCB; - }); - if (resolveBlockInput === null) { - throw Error("Promise Constructor was not executed immediately"); - } - - const gotColumnsFromExecution = await getDataColumnsFromExecution( - config, - network.custodyConfig, - opts.executionEngine, - opts.emitter, - { - fork: config.getForkName(block.message.slot), - block: block, - cachedData: cachedData, - blockInputPromise, - resolveBlockInput, - }, - opts.metrics ?? null - ); - - if (!gotColumnsFromExecution) { - const {custodyGroups: peerColumns} = network.getConnectedPeerSyncMeta(peerId); - - // get match - const columns = peerColumns.reduce((acc, elem) => { - if (neededColumns.includes(elem)) { - acc.push(elem); - } - return acc; - }, [] as number[]); - - // this peer can't help fetching columns for this block - if (unavailableBlockInput.block !== null && columns.length === 0 && neededColumns.length > 0) { - return unavailableBlockInput; - } - - let allDataColumnSidecars: fulu.DataColumnSidecar[]; - if (columns.length > 0) { - allDataColumnSidecars = await network.sendDataColumnSidecarsByRoot(peerId, [{blockRoot, columns}]); - opts.metrics?.dataColumns.bySource.inc({source: DataColumnsSource.byRoot}, allDataColumnSidecars.length); - } else { - allDataColumnSidecars = []; - } - - const logCtx = { - slot: block.message.slot, - requestedColumns: columns.join(","), - respondedColumns: allDataColumnSidecars.map((dcs) => dcs.index).join(","), - peerClient, - }; - - opts.logger?.verbose("unavailableBeaconBlobsByRootPostFulu: Requested data columns from peer", logCtx); - - // the same to matchBlockWithDataColumns() without expecting requested data columns = responded data columns - // because at gossip time peer may not have enough column to return - for (const dataColumnSidecar of allDataColumnSidecars) { - dataColumnsCache.set(dataColumnSidecar.index, { - dataColumn: dataColumnSidecar, - // TODO: req/resp should return bytes here - dataColumnBytes: null, - }); - } - } - - // reevaluate needeColumns and resolve availability if possible - neededColumns = sampledColumns.reduce((acc, elem) => { - if (dataColumnsCache.get(elem) === undefined) { - acc.push(elem); - } - return acc; - }, [] as number[]); - - const logCtx = { - slot: block.message.slot, - neededColumns: neededColumns.join(","), - sampledColumns: sampledColumns.join(","), - }; - - if (neededColumns.length === 0) { - const {dataColumns, dataColumnsBytes} = getBlockInputDataColumns( - (cachedData as CachedDataColumns).dataColumnsCache, - sampledColumns - ); - - // don't forget to resolve availability as the block may be stuck in availability wait - const blockData = { - fork: config.getForkName(block.message.slot), - dataColumns, - dataColumnsBytes, - dataColumnsSource: DataColumnsSource.byRoot, - } as BlockInputDataColumns; - resolveAvailability(blockData); - opts.logger?.verbose( - "unavailableBeaconBlobsByRootPostFulu: Resolved availability for block with all data columns", - logCtx - ); - return getBlockInput.availableData(config, block, BlockSource.byRoot, blockData); - } - opts.logger?.verbose("unavailableBeaconBlobsByRootPostFulu: Still missing data columns for block", logCtx); - return getBlockInput.dataPromise(config, block, BlockSource.byRoot, cachedData); -} diff --git a/packages/beacon-node/src/network/reqresp/index.ts b/packages/beacon-node/src/network/reqresp/index.ts index dfce5c426c8c..033834c4eadf 100644 --- a/packages/beacon-node/src/network/reqresp/index.ts +++ b/packages/beacon-node/src/network/reqresp/index.ts @@ -1,4 +1,2 @@ export * from "./ReqRespBeaconNode.js"; export * from "./interface.js"; -export * from "./beaconBlocksMaybeBlobsByRange.js"; -export * from "./beaconBlocksMaybeBlobsByRoot.js"; From e3a093bcf17571bac96f8dbddaf261565f51be96 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 19 Aug 2025 01:04:14 +0700 Subject: [PATCH 024/173] feat: switch "unknown*" event types from Network to Chain events --- .../src/api/impl/beacon/blocks/index.ts | 5 +-- packages/beacon-node/src/chain/emitter.ts | 28 +++++++++++++++- packages/beacon-node/src/network/events.ts | 9 ----- .../src/network/processor/gossipHandlers.ts | 33 ++++++++++++------- .../src/network/processor/index.ts | 3 +- packages/beacon-node/src/sync/unknownBlock.ts | 20 +++++------ 6 files changed, 63 insertions(+), 35 deletions(-) diff --git a/packages/beacon-node/src/api/impl/beacon/blocks/index.ts b/packages/beacon-node/src/api/impl/beacon/blocks/index.ts index 2461ac15245a..77838e4d73be 100644 --- a/packages/beacon-node/src/api/impl/beacon/blocks/index.ts +++ b/packages/beacon-node/src/api/impl/beacon/blocks/index.ts @@ -32,6 +32,7 @@ import {BlockInputSource, isBlockInputBlobs, isBlockInputColumns} from "../../.. import {ImportBlockOpts} from "../../../../chain/blocks/types.js"; import {verifyBlocksInEpoch} from "../../../../chain/blocks/verifyBlock.js"; import {BeaconChain} from "../../../../chain/chain.js"; +import {ChainEvent} from "../../../../chain/emitter.js"; import {BlockError, BlockErrorCode, BlockGossipError} from "../../../../chain/errors/index.js"; import {ProduceFullBellatrix, ProduceFullDeneb, ProduceFullFulu} from "../../../../chain/produceBlock/index.js"; import {validateGossipBlock} from "../../../../chain/validation/block.js"; @@ -175,7 +176,7 @@ export function getBeaconBlockApi({ if (!blockLocallyProduced) { const parentBlock = chain.forkChoice.getBlock(signedBlock.message.parentRoot); if (parentBlock === null) { - network.events.emit(NetworkEvent.unknownBlockParent, { + chain.emitter.emit(ChainEvent.unknownParent, { blockInput: blockForImport, peer: IDENTITY_PEER_ID, }); @@ -269,7 +270,7 @@ export function getBeaconBlockApi({ .processBlock(blockForImport, {...opts, eagerPersistBlock: false}) .catch((e) => { if (e instanceof BlockError && e.type.code === BlockErrorCode.PARENT_UNKNOWN) { - network.events.emit(NetworkEvent.unknownBlockParent, { + chain.emitter.emit(ChainEvent.unknownParent, { blockInput: blockForImport, peer: IDENTITY_PEER_ID, }); diff --git a/packages/beacon-node/src/chain/emitter.ts b/packages/beacon-node/src/chain/emitter.ts index 35eef5c0759a..f95ceb1802f5 100644 --- a/packages/beacon-node/src/chain/emitter.ts +++ b/packages/beacon-node/src/chain/emitter.ts @@ -4,7 +4,9 @@ import {StrictEventEmitter} from "strict-event-emitter-types"; import {routes} from "@lodestar/api"; import {CheckpointWithHex} from "@lodestar/fork-choice"; import {CachedBeaconStateAllForks} from "@lodestar/state-transition"; -import {fulu, phase0} from "@lodestar/types"; +import {RootHex, fulu, phase0} from "@lodestar/types"; +import {PeerIdStr} from "../util/peerId.js"; +import {IBlockInput} from "./blocks/blockInput/types.js"; /** * Important chain events that occur during normal chain operation. @@ -47,6 +49,18 @@ export enum ChainEvent { * Trigger an update of status so reqresp by peers have current earliestAvailableSlot */ updateStatus = "updateStatus", + /** + * + */ + unknownParent = "unknownParent", + /** + * + */ + unknownBlockRoot = "unknownBlockRoot", + /** + * + */ + incompleteBlockInput = "incompleteBlockInput", } export type HeadEventData = routes.events.EventData[routes.events.EventType.head]; @@ -55,6 +69,12 @@ export type ReorgEventData = routes.events.EventData[routes.events.EventType.cha // API events are emitted through the same ChainEventEmitter for re-use internally type ApiEvents = {[K in routes.events.EventType]: (data: routes.events.EventData[K]) => void}; +export type ChainEventData = { + [ChainEvent.unknownParent]: {blockInput: IBlockInput; peer: PeerIdStr}; + [ChainEvent.unknownBlockRoot]: {rootHex: RootHex; peer?: PeerIdStr}; + [ChainEvent.incompleteBlockInput]: {blockInput: IBlockInput; peer: PeerIdStr}; +}; + export type IChainEvents = ApiEvents & { [ChainEvent.checkpoint]: (checkpoint: phase0.Checkpoint, state: CachedBeaconStateAllForks) => void; @@ -66,6 +86,12 @@ export type IChainEvents = ApiEvents & { [ChainEvent.publishDataColumns]: (sidecars: fulu.DataColumnSidecar[]) => void; [ChainEvent.updateStatus]: () => void; + + // Sync events that are chain->chain. Initiated from network requests but do not cross the network + // barrier so are considered ChainEvent(s). + [ChainEvent.unknownParent]: (data: ChainEventData[ChainEvent.unknownParent]) => void; + [ChainEvent.unknownBlockRoot]: (data: ChainEventData[ChainEvent.unknownBlockRoot]) => void; + [ChainEvent.incompleteBlockInput]: (data: ChainEventData[ChainEvent.incompleteBlockInput]) => void; }; /** diff --git a/packages/beacon-node/src/network/events.ts b/packages/beacon-node/src/network/events.ts index d8fc63dc3b13..7e0944495f4d 100644 --- a/packages/beacon-node/src/network/events.ts +++ b/packages/beacon-node/src/network/events.ts @@ -14,10 +14,6 @@ export enum NetworkEvent { /** A peer has been disconnected */ peerDisconnected = "peer-manager.peer-disconnected", reqRespRequest = "req-resp.request", - // TODO remove this event, this is not a network-level concern, rather a chain / sync concern - unknownBlockParent = "unknownBlockParent", - unknownBlock = "unknownBlock", - unknownBlockInput = "unknownBlockInput", // Network processor events /** (Network -> App) A gossip message is ready for validation */ @@ -35,8 +31,6 @@ export type NetworkEventData = { }; [NetworkEvent.peerDisconnected]: {peer: PeerIdStr}; [NetworkEvent.reqRespRequest]: {request: RequestTypedContainer; peer: PeerId}; - [NetworkEvent.unknownBlockInput]: {blockInput: BlockInput; peer: PeerIdStr}; - [NetworkEvent.unknownBlock]: {rootHex: RootHex; peer?: PeerIdStr}; [NetworkEvent.pendingGossipsubMessage]: PendingGossipsubMessage; [NetworkEvent.gossipMessageValidationResult]: { msgId: string; @@ -49,9 +43,6 @@ export const networkEventDirection: Record = { [NetworkEvent.peerConnected]: EventDirection.workerToMain, [NetworkEvent.peerDisconnected]: EventDirection.workerToMain, [NetworkEvent.reqRespRequest]: EventDirection.none, // Only used internally in NetworkCore - [NetworkEvent.unknownBlockParent]: EventDirection.workerToMain, - [NetworkEvent.unknownBlock]: EventDirection.workerToMain, - [NetworkEvent.unknownBlockInput]: EventDirection.workerToMain, [NetworkEvent.pendingGossipsubMessage]: EventDirection.workerToMain, [NetworkEvent.gossipMessageValidationResult]: EventDirection.mainToWorker, }; diff --git a/packages/beacon-node/src/network/processor/gossipHandlers.ts b/packages/beacon-node/src/network/processor/gossipHandlers.ts index 246097caae93..b4603e17def4 100644 --- a/packages/beacon-node/src/network/processor/gossipHandlers.ts +++ b/packages/beacon-node/src/network/processor/gossipHandlers.ts @@ -17,6 +17,7 @@ import { import {LogLevel, Logger, prettyBytes, toHex, toRootHex} from "@lodestar/utils"; import {BlockInput, BlockInputSource} from "../../chain/blocks/blockInput/index.js"; import {BlobSidecarValidation} from "../../chain/blocks/types.js"; +import {ChainEvent} from "../../chain/emitter.js"; import { AttestationError, AttestationErrorCode, @@ -165,10 +166,12 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand return blockInput; } catch (e) { if (e instanceof BlockGossipError) { + // TODO(fulu): check that this is the only error that should trigger resolution of the block and all others + // cause the block to get thrown away // Don't trigger this yet if full block and blobs haven't arrived yet if (e.type.code === BlockErrorCode.PARENT_UNKNOWN && blockInput !== null) { logger.debug("Gossip block has error", {slot, root: blockShortHex, code: e.type.code}); - events.emit(NetworkEvent.unknownBlockInput, {blockInput, peer: peerIdStr}); + chain.emitter.emit(ChainEvent.incompleteBlockInput, {blockInput, peer: peerIdStr}); // throw error (don't prune the blockInput) throw e; } @@ -338,7 +341,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand ...blockInput.getLogMeta(), }); // The data is not yet fully available, immediately trigger an aggressive pull via unknown block sync - events.emit(NetworkEvent.unknownBlockInput, {blockInput, peer: peerIdStr}); + chain.emitter.emit(ChainEvent.incompleteBlockInput, {blockInput, peer: peerIdStr}); } else { metrics?.blockInputFetchStats.totalDataAvailableBlockInputs.inc(); metrics?.blockInputFetchStats.totalDataAvailableBlockInputBlobs.inc( @@ -448,11 +451,14 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand ...blockInput.getLogMeta(), }); blockInput.waitForAllData(cutoffTimeMs).catch((_e) => { - chain.logger.debug("Received gossip blob, attempting fetch of unavailable data", { - blobIndex: index, - ...blockInput.getLogMeta(), - }); - events.emit(NetworkEvent.unknownBlockInput, {blockInput, peer: peerIdStr}); + chain.logger.debug( + "Waited for data after receiving gossip blob. Cut-off reached so attempting to fetch remainder of BlockInput", + { + blobIndex: index, + ...blockInput.getLogMeta(), + } + ); + chain.emitter.emit(ChainEvent.incompleteBlockInput, {blockInput, peer: peerIdStr}); }); } }, @@ -488,11 +494,14 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand ...blockInput.getLogMeta(), }); blockInput.waitForAllData(cutoffTimeMs).catch((_e) => { - chain.logger.debug("Received gossip data column, attempting fetch of unavailable data", { - dataColumnIndex: index, - ...blockInput.getLogMeta(), - }); - events.emit(NetworkEvent.unknownBlockInput, {blockInput, peer: peerIdStr}); + chain.logger.debug( + "Waited for data after receiving gossip column. Cut-off reached so attempting to fetch remainder of BlockInput", + { + dataColumnIndex: index, + ...blockInput.getLogMeta(), + } + ); + chain.emitter.emit(ChainEvent.incompleteBlockInput, {blockInput, peer: peerIdStr}); }); } }, diff --git a/packages/beacon-node/src/network/processor/index.ts b/packages/beacon-node/src/network/processor/index.ts index db0596b3fe57..032e66ec4843 100644 --- a/packages/beacon-node/src/network/processor/index.ts +++ b/packages/beacon-node/src/network/processor/index.ts @@ -24,6 +24,7 @@ import {GossipHandlerOpts, ValidatorFnsModules, getGossipHandlers} from "./gossi import {createGossipQueues} from "./gossipQueues/index.js"; import {ValidatorFnModules, getGossipValidatorBatchFn, getGossipValidatorFn} from "./gossipValidatorFn.js"; import {PendingGossipsubMessage} from "./types.js"; +import {ChainEvent} from "../../chain/emitter.js"; export * from "./types.js"; @@ -235,7 +236,7 @@ export class NetworkProcessor { } // Search for the unknown block this.unknownRootsBySlot.getOrDefault(slot).add(root); - this.events.emit(NetworkEvent.unknownBlock, {rootHex: root, peer}); + this.chain.emitter.emit(ChainEvent.unknownBlockRoot, {rootHex: root, peer}); } private onPendingGossipsubMessage(message: PendingGossipsubMessage): void { diff --git a/packages/beacon-node/src/sync/unknownBlock.ts b/packages/beacon-node/src/sync/unknownBlock.ts index 5d8216ff40d4..686576e28526 100644 --- a/packages/beacon-node/src/sync/unknownBlock.ts +++ b/packages/beacon-node/src/sync/unknownBlock.ts @@ -6,7 +6,7 @@ import {Logger, fromHex, pruneSetToMax, toRootHex} from "@lodestar/utils"; import {sleep} from "@lodestar/utils"; import {BlockInput, BlockInputType, CachedDataColumns, NullBlockInput} from "../chain/blocks/types.js"; import {BlockError, BlockErrorCode} from "../chain/errors/index.js"; -import {IBeaconChain} from "../chain/index.js"; +import {ChainEvent, ChainEventData, IBeaconChain} from "../chain/index.js"; import {Metrics} from "../metrics/index.js"; import {INetwork, NetworkEvent, NetworkEventData} from "../network/index.js"; import { @@ -64,9 +64,9 @@ export class UnknownBlockSync { // cannot chain to the above if or the log will be incorrect if (!this.subscribedToNetworkEvents) { this.logger.verbose("UnknownBlockSync enabled."); - this.network.events.on(NetworkEvent.unknownBlock, this.onUnknownBlock); - this.network.events.on(NetworkEvent.unknownBlockInput, this.onUnknownBlockInput); - this.network.events.on(NetworkEvent.unknownBlockParent, this.onUnknownParent); + this.chain.emitter.on(ChainEvent.unknownBlockRoot, this.onUnknownBlock); + this.chain.emitter.on(ChainEvent.incompleteBlockInput, this.onUnknownBlockInput); + this.chain.emitter.on(ChainEvent.unknownParent, this.onUnknownParent); this.network.events.on(NetworkEvent.peerConnected, this.triggerUnknownBlockSearch); this.subscribedToNetworkEvents = true; } @@ -77,9 +77,9 @@ export class UnknownBlockSync { unsubscribeFromNetwork(): void { this.logger.verbose("UnknownBlockSync disabled."); - this.network.events.off(NetworkEvent.unknownBlock, this.onUnknownBlock); - this.network.events.off(NetworkEvent.unknownBlockInput, this.onUnknownBlockInput); - this.network.events.off(NetworkEvent.unknownBlockParent, this.onUnknownParent); + this.chain.emitter.off(ChainEvent.unknownBlockRoot, this.onUnknownBlock); + this.chain.emitter.off(ChainEvent.incompleteBlockInput, this.onUnknownBlockInput); + this.chain.emitter.off(ChainEvent.unknownParent, this.onUnknownParent); this.network.events.off(NetworkEvent.peerConnected, this.triggerUnknownBlockSearch); this.subscribedToNetworkEvents = false; } @@ -96,7 +96,7 @@ export class UnknownBlockSync { /** * Process an unknownBlock event and register the block in `pendingBlocks` Map. */ - private onUnknownBlock = (data: NetworkEventData[NetworkEvent.unknownBlock]): void => { + private onUnknownBlock = (data: ChainEventData[ChainEvent.unknownBlockRoot]): void => { try { const unknownBlockType = this.addUnknownBlock(data.rootHex, data.peer); this.triggerUnknownBlockSearch(); @@ -109,7 +109,7 @@ export class UnknownBlockSync { /** * Process an unknownBlockInput event and register the block in `pendingBlocks` Map. */ - private onUnknownBlockInput = (data: NetworkEventData[NetworkEvent.unknownBlockInput]): void => { + private onUnknownBlockInput = (data: ChainEventData[ChainEvent.incompleteBlockInput]): void => { try { const unknownBlockType = this.addUnknownBlock(data.blockInput, data.peer); this.triggerUnknownBlockSearch(); @@ -122,7 +122,7 @@ export class UnknownBlockSync { /** * Process an unknownBlockParent event and register the block in `pendingBlocks` Map. */ - private onUnknownParent = (data: NetworkEventData[NetworkEvent.unknownBlockParent]): void => { + private onUnknownParent = (data: ChainEventData[ChainEvent.unknownParent]): void => { try { this.addUnknownParent(data.blockInput, data.peer); this.triggerUnknownBlockSearch(); From 335891607ede5ff348e33481a9c79a62ab0bf52d Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 19 Aug 2025 04:03:27 +0700 Subject: [PATCH 025/173] fix: type issue in SeenBlockInput --- .../beacon-node/src/chain/seenCache/seenGossipBlockInput.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts b/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts index 89c424c05d27..640ad1fbc26c 100644 --- a/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts +++ b/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts @@ -1,6 +1,6 @@ import {ChainForkConfig} from "@lodestar/config"; import {CheckpointWithHex} from "@lodestar/fork-choice"; -import {ForkName, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; +import {ForkName, ForkPostFulu, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; import {computeStartSlotAtEpoch} from "@lodestar/state-transition"; import {RootHex, SignedBeaconBlock, Slot, deneb, fulu} from "@lodestar/types"; import {LodestarError, Logger, toRootHex} from "@lodestar/utils"; @@ -165,7 +165,7 @@ export class SeenBlockInput { }); } else if (isForkPostFulu(forkName)) { blockInput = BlockInputColumns.createFromBlock({ - block, + block: block as SignedBeaconBlock, blockRootHex, daOutOfRange, forkName, From b116bfb09075e8eff3e110c77e7aff10c3c18ca5 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 19 Aug 2025 04:05:14 +0700 Subject: [PATCH 026/173] refactor: change name from UnknownBlock and options to BlockInputSync --- packages/beacon-node/src/sync/options.ts | 2 +- packages/beacon-node/src/sync/sync.ts | 10 +++++----- .../beacon-node/test/unit/sync/unknownBlock.test.ts | 8 ++++---- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/beacon-node/src/sync/options.ts b/packages/beacon-node/src/sync/options.ts index 7afd624b9d2f..a7248f42e338 100644 --- a/packages/beacon-node/src/sync/options.ts +++ b/packages/beacon-node/src/sync/options.ts @@ -15,7 +15,7 @@ export type SyncOptions = { /** USE FOR TESTING ONLY. Disable range sync completely */ disableRangeSync?: boolean; /** USE FOR TESTING ONLY. Disable unknown block sync completely */ - disableUnknownBlockSync?: boolean; + disableBlockInputSync?: boolean; /** * The batch size of slots for backfill sync can attempt to sync/process before yielding * to sync loop. This number can be increased or decreased to make a suitable resource diff --git a/packages/beacon-node/src/sync/sync.ts b/packages/beacon-node/src/sync/sync.ts index 1fa49f1f405b..3763c9bee78b 100644 --- a/packages/beacon-node/src/sync/sync.ts +++ b/packages/beacon-node/src/sync/sync.ts @@ -13,7 +13,7 @@ import {IBeaconSync, SyncModules, SyncingStatus} from "./interface.js"; import {SyncChainDebugState, SyncState, syncStateMetric} from "./interface.js"; import {SyncOptions} from "./options.js"; import {RangeSync, RangeSyncEvent, RangeSyncStatus} from "./range/range.js"; -import {UnknownBlockSync} from "./unknownBlock.js"; +import {BlockInputSync} from "./unknownBlock.js"; import {PeerSyncType, getPeerSyncType, peerSyncTypes} from "./utils/remoteSyncType.js"; export class BeaconSync implements IBeaconSync { @@ -24,7 +24,7 @@ export class BeaconSync implements IBeaconSync { private readonly opts: SyncOptions; private readonly rangeSync: RangeSync; - private readonly unknownBlockSync: UnknownBlockSync; + private readonly unknownBlockSync: BlockInputSync; /** For metrics only */ private readonly peerSyncType = new Map(); @@ -38,7 +38,7 @@ export class BeaconSync implements IBeaconSync { this.metrics = metrics; this.logger = logger; this.rangeSync = new RangeSync(modules, opts); - this.unknownBlockSync = new UnknownBlockSync(config, network, chain, logger, metrics, opts); + this.unknownBlockSync = new BlockInputSync(config, network, chain, logger, metrics, opts); this.slotImportTolerance = opts.slotImportTolerance ?? SLOTS_PER_EPOCH; // Subscribe to RangeSync completing a SyncChain and recompute sync state @@ -232,7 +232,7 @@ export class BeaconSync implements IBeaconSync { // also start searching for unknown blocks if (!this.unknownBlockSync.isSubscribedToNetwork()) { this.unknownBlockSync.subscribeToNetwork(); - this.metrics?.syncUnknownBlock.switchNetworkSubscriptions.inc({action: "subscribed"}); + this.metrics?.blockInputSync.switchNetworkSubscriptions.inc({action: "subscribed"}); } } @@ -256,7 +256,7 @@ export class BeaconSync implements IBeaconSync { // also stop searching for unknown blocks if (this.unknownBlockSync.isSubscribedToNetwork()) { this.unknownBlockSync.unsubscribeFromNetwork(); - this.metrics?.syncUnknownBlock.switchNetworkSubscriptions.inc({action: "unsubscribed"}); + this.metrics?.blockInputSync.switchNetworkSubscriptions.inc({action: "unsubscribed"}); } } } diff --git a/packages/beacon-node/test/unit/sync/unknownBlock.test.ts b/packages/beacon-node/test/unit/sync/unknownBlock.test.ts index c33d1c1f689d..ff8dc5e563ba 100644 --- a/packages/beacon-node/test/unit/sync/unknownBlock.test.ts +++ b/packages/beacon-node/test/unit/sync/unknownBlock.test.ts @@ -13,7 +13,7 @@ import {SeenBlockProposers} from "../../../src/chain/seenCache/seenBlockProposer import {ZERO_HASH} from "../../../src/constants/constants.js"; import {INetwork, NetworkEvent, NetworkEventBus, PeerAction} from "../../../src/network/index.js"; import {defaultSyncOptions} from "../../../src/sync/options.js"; -import {UnknownBlockSync} from "../../../src/sync/unknownBlock.js"; +import {BlockInputSync} from "../../../src/sync/unknownBlock.js"; import {ClockStopped} from "../../mocks/clock.js"; import {MockedBeaconChain, getMockedBeaconChain} from "../../mocks/mockedBeaconChain.js"; import {testLogger} from "../../utils/logger.js"; @@ -188,7 +188,7 @@ describe.skip( const setTimeoutSpy = vi.spyOn(global, "setTimeout"); const processBlockSpy = vi.spyOn(chain, "processBlock"); - const syncService = new UnknownBlockSync(config, network as INetwork, chain as IBeaconChain, logger, null, { + const syncService = new BlockInputSync(config, network as INetwork, chain as IBeaconChain, logger, null, { ...defaultSyncOptions, maxPendingBlocks, }); @@ -247,7 +247,7 @@ describe("UnknownBlockSync", () => { let network: INetwork; let chain: MockedBeaconChain; const logger = testLogger(); - let service: UnknownBlockSync; + let service: BlockInputSync; beforeEach(() => { network = { @@ -277,7 +277,7 @@ describe("UnknownBlockSync", () => { const testName = actions.map((action) => (action ? "subscribe" : "unsubscribe")).join(" - "); it(testName, () => { const events = network.events as EventEmitter; - service = new UnknownBlockSync(minimalConfig, network, chain, logger, null, defaultSyncOptions); + service = new BlockInputSync(minimalConfig, network, chain, logger, null, defaultSyncOptions); for (const action of actions) { if (action) { service.subscribeToNetwork(); From f387cac7bfbec4e4122be7c62f2575471a37c1a1 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 19 Aug 2025 04:13:16 +0700 Subject: [PATCH 027/173] feat: add source to events data --- .../src/api/impl/beacon/blocks/index.ts | 2 ++ .../src/api/impl/validator/index.ts | 3 ++- packages/beacon-node/src/chain/emitter.ts | 8 +++--- packages/beacon-node/src/network/interface.ts | 3 ++- packages/beacon-node/src/network/network.ts | 5 ++-- .../src/network/processor/gossipHandlers.ts | 26 +++++++++++++++---- .../src/network/processor/index.ts | 9 ++++--- 7 files changed, 39 insertions(+), 17 deletions(-) diff --git a/packages/beacon-node/src/api/impl/beacon/blocks/index.ts b/packages/beacon-node/src/api/impl/beacon/blocks/index.ts index 77838e4d73be..2f666f7de28a 100644 --- a/packages/beacon-node/src/api/impl/beacon/blocks/index.ts +++ b/packages/beacon-node/src/api/impl/beacon/blocks/index.ts @@ -179,6 +179,7 @@ export function getBeaconBlockApi({ chain.emitter.emit(ChainEvent.unknownParent, { blockInput: blockForImport, peer: IDENTITY_PEER_ID, + source: BlockInputSource.api, }); chain.persistInvalidSszValue( chain.config.getForkTypes(slot).SignedBeaconBlock, @@ -273,6 +274,7 @@ export function getBeaconBlockApi({ chain.emitter.emit(ChainEvent.unknownParent, { blockInput: blockForImport, peer: IDENTITY_PEER_ID, + source: BlockInputSource.api, }); } throw e; diff --git a/packages/beacon-node/src/api/impl/validator/index.ts b/packages/beacon-node/src/api/impl/validator/index.ts index 076e281bc7b9..c18842ab62ae 100644 --- a/packages/beacon-node/src/api/impl/validator/index.ts +++ b/packages/beacon-node/src/api/impl/validator/index.ts @@ -57,6 +57,7 @@ import { toRootHex, } from "@lodestar/utils"; import {MAX_BUILDER_BOOST_FACTOR} from "@lodestar/validator"; +import {BlockInputSource} from "../../../chain/blocks/blockInput/types.js"; import { AttestationError, AttestationErrorCode, @@ -972,7 +973,7 @@ export function getValidatorApi( // see https://github.com/ChainSafe/lodestar/issues/5063 if (!chain.forkChoice.hasBlock(beaconBlockRoot)) { const rootHex = toRootHex(beaconBlockRoot); - network.searchUnknownSlotRoot({slot, root: rootHex}); + network.searchUnknownSlotRoot({slot, root: rootHex}, BlockInputSource.api); // if result of this call is false, i.e. block hasn't seen after 1 slot then the below notOnOptimisticBlockRoot call will throw error await chain.waitForBlock(slot, rootHex); } diff --git a/packages/beacon-node/src/chain/emitter.ts b/packages/beacon-node/src/chain/emitter.ts index f95ceb1802f5..fdaab4f005d0 100644 --- a/packages/beacon-node/src/chain/emitter.ts +++ b/packages/beacon-node/src/chain/emitter.ts @@ -6,7 +6,7 @@ import {CheckpointWithHex} from "@lodestar/fork-choice"; import {CachedBeaconStateAllForks} from "@lodestar/state-transition"; import {RootHex, fulu, phase0} from "@lodestar/types"; import {PeerIdStr} from "../util/peerId.js"; -import {IBlockInput} from "./blocks/blockInput/types.js"; +import {BlockInputSource, IBlockInput} from "./blocks/blockInput/types.js"; /** * Important chain events that occur during normal chain operation. @@ -70,9 +70,9 @@ export type ReorgEventData = routes.events.EventData[routes.events.EventType.cha type ApiEvents = {[K in routes.events.EventType]: (data: routes.events.EventData[K]) => void}; export type ChainEventData = { - [ChainEvent.unknownParent]: {blockInput: IBlockInput; peer: PeerIdStr}; - [ChainEvent.unknownBlockRoot]: {rootHex: RootHex; peer?: PeerIdStr}; - [ChainEvent.incompleteBlockInput]: {blockInput: IBlockInput; peer: PeerIdStr}; + [ChainEvent.unknownParent]: {blockInput: IBlockInput; peer: PeerIdStr; source: BlockInputSource}; + [ChainEvent.unknownBlockRoot]: {rootHex: RootHex; peer?: PeerIdStr; source: BlockInputSource}; + [ChainEvent.incompleteBlockInput]: {blockInput: IBlockInput; peer: PeerIdStr; source: BlockInputSource}; }; export type IChainEvents = ApiEvents & { diff --git a/packages/beacon-node/src/network/interface.ts b/packages/beacon-node/src/network/interface.ts index 70443d2eb75f..6da6c77c1b01 100644 --- a/packages/beacon-node/src/network/interface.ts +++ b/packages/beacon-node/src/network/interface.ts @@ -34,6 +34,7 @@ import { } from "@lodestar/types"; import type {Datastore} from "interface-datastore"; import {Libp2p as ILibp2p} from "libp2p"; +import {BlockInputSource} from "../chain/blocks/blockInput/types.js"; import {CustodyConfig} from "../util/dataColumns.js"; import {PeerIdStr} from "../util/peerId.js"; import {BlobSidecarsByRootRequest} from "../util/types.js"; @@ -66,7 +67,7 @@ export interface INetwork extends INetworkCorePublic { reportPeer(peer: PeerIdStr, action: PeerAction, actionName: string): void; shouldAggregate(subnet: SubnetID, slot: Slot): boolean; reStatusPeers(peers: PeerIdStr[]): Promise; - searchUnknownSlotRoot(slotRoot: SlotRootHex, peer?: PeerIdStr): void; + searchUnknownSlotRoot(slotRoot: SlotRootHex, source: BlockInputSource, peer?: PeerIdStr): void; // ReqResp sendBeaconBlocksByRange( peerId: PeerIdStr, diff --git a/packages/beacon-node/src/network/network.ts b/packages/beacon-node/src/network/network.ts index 5a51ec3b15d0..5f176be7c657 100644 --- a/packages/beacon-node/src/network/network.ts +++ b/packages/beacon-node/src/network/network.ts @@ -28,6 +28,7 @@ import { phase0, } from "@lodestar/types"; import {prettyPrintIndices, sleep} from "@lodestar/utils"; +import {BlockInputSource} from "../chain/blocks/blockInput/types.js"; import {ChainEvent, IBeaconChain} from "../chain/index.js"; import {computeSubnetForDataColumnSidecar} from "../chain/validation/dataColumnSidecar.js"; import {IBeaconDb} from "../db/interface.js"; @@ -271,8 +272,8 @@ export class Network implements INetwork { return this.core.reStatusPeers(peers); } - searchUnknownSlotRoot(slotRoot: SlotRootHex, peer?: PeerIdStr): void { - this.networkProcessor.searchUnknownSlotRoot(slotRoot, peer); + searchUnknownSlotRoot(slotRoot: SlotRootHex, source: BlockInputSource, peer?: PeerIdStr): void { + this.networkProcessor.searchUnknownSlotRoot(slotRoot, source, peer); } async reportPeer(peer: PeerIdStr, action: PeerAction, actionName: string): Promise { diff --git a/packages/beacon-node/src/network/processor/gossipHandlers.ts b/packages/beacon-node/src/network/processor/gossipHandlers.ts index b4603e17def4..70b5ae694cf7 100644 --- a/packages/beacon-node/src/network/processor/gossipHandlers.ts +++ b/packages/beacon-node/src/network/processor/gossipHandlers.ts @@ -171,7 +171,11 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand // Don't trigger this yet if full block and blobs haven't arrived yet if (e.type.code === BlockErrorCode.PARENT_UNKNOWN && blockInput !== null) { logger.debug("Gossip block has error", {slot, root: blockShortHex, code: e.type.code}); - chain.emitter.emit(ChainEvent.incompleteBlockInput, {blockInput, peer: peerIdStr}); + chain.emitter.emit(ChainEvent.incompleteBlockInput, { + blockInput, + peer: peerIdStr, + source: BlockInputSource.gossip, + }); // throw error (don't prune the blockInput) throw e; } @@ -341,7 +345,11 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand ...blockInput.getLogMeta(), }); // The data is not yet fully available, immediately trigger an aggressive pull via unknown block sync - chain.emitter.emit(ChainEvent.incompleteBlockInput, {blockInput, peer: peerIdStr}); + chain.emitter.emit(ChainEvent.incompleteBlockInput, { + blockInput, + peer: peerIdStr, + source: BlockInputSource.gossip, + }); } else { metrics?.blockInputFetchStats.totalDataAvailableBlockInputs.inc(); metrics?.blockInputFetchStats.totalDataAvailableBlockInputBlobs.inc( @@ -458,7 +466,11 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand ...blockInput.getLogMeta(), } ); - chain.emitter.emit(ChainEvent.incompleteBlockInput, {blockInput, peer: peerIdStr}); + chain.emitter.emit(ChainEvent.incompleteBlockInput, { + blockInput, + peer: peerIdStr, + source: BlockInputSource.gossip, + }); }); } }, @@ -501,7 +513,11 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand ...blockInput.getLogMeta(), } ); - chain.emitter.emit(ChainEvent.incompleteBlockInput, {blockInput, peer: peerIdStr}); + chain.emitter.emit(ChainEvent.incompleteBlockInput, { + blockInput, + peer: peerIdStr, + source: BlockInputSource.gossip, + }); }); } }, @@ -840,7 +856,7 @@ export async function validateGossipFnRetryUnknownRoot( if (unknownBlockRootRetries === 0) { // Trigger unknown block root search here const rootHex = toRootHex(blockRoot); - network.searchUnknownSlotRoot({slot, root: rootHex}); + network.searchUnknownSlotRoot({slot, root: rootHex}, BlockInputSource.gossip); } if (unknownBlockRootRetries++ < MAX_UNKNOWN_BLOCK_ROOT_RETRIES) { diff --git a/packages/beacon-node/src/network/processor/index.ts b/packages/beacon-node/src/network/processor/index.ts index 032e66ec4843..333e595ba9de 100644 --- a/packages/beacon-node/src/network/processor/index.ts +++ b/packages/beacon-node/src/network/processor/index.ts @@ -4,6 +4,8 @@ import {computeStartSlotAtEpoch} from "@lodestar/state-transition"; import {RootHex, Slot, SlotRootHex} from "@lodestar/types"; import {Logger, MapDef, mapValues, sleep} from "@lodestar/utils"; import {pruneSetToMax} from "@lodestar/utils"; +import {BlockInputSource} from "../../chain/blocks/blockInput/types.js"; +import {ChainEvent} from "../../chain/emitter.js"; import {GossipErrorCode} from "../../chain/errors/gossipValidation.js"; import {IBeaconChain} from "../../chain/interface.js"; import {IBeaconDb} from "../../db/interface.js"; @@ -24,7 +26,6 @@ import {GossipHandlerOpts, ValidatorFnsModules, getGossipHandlers} from "./gossi import {createGossipQueues} from "./gossipQueues/index.js"; import {ValidatorFnModules, getGossipValidatorBatchFn, getGossipValidatorFn} from "./gossipValidatorFn.js"; import {PendingGossipsubMessage} from "./types.js"; -import {ChainEvent} from "../../chain/emitter.js"; export * from "./types.js"; @@ -230,13 +231,13 @@ export class NetworkProcessor { return queue.getAll(); } - searchUnknownSlotRoot({slot, root}: SlotRootHex, peer?: PeerIdStr): void { + searchUnknownSlotRoot({slot, root}: SlotRootHex, source: BlockInputSource, peer?: PeerIdStr): void { if (this.chain.seenBlock(root) || this.unknownRootsBySlot.getOrDefault(slot).has(root)) { return; } // Search for the unknown block this.unknownRootsBySlot.getOrDefault(slot).add(root); - this.chain.emitter.emit(ChainEvent.unknownBlockRoot, {rootHex: root, peer}); + this.chain.emitter.emit(ChainEvent.unknownBlockRoot, {rootHex: root, peer, source}); } private onPendingGossipsubMessage(message: PendingGossipsubMessage): void { @@ -269,7 +270,7 @@ export class NetworkProcessor { // check if we processed a block with this root // no need to check if root is a descendant of the current finalized block, it will be checked once we validate the message if needed if (root && !this.chain.forkChoice.hasBlockHexUnsafe(root)) { - this.searchUnknownSlotRoot({slot, root}, message.propagationSource.toString()); + this.searchUnknownSlotRoot({slot, root}, BlockInputSource.gossip, message.propagationSource.toString()); if (this.unknownBlockGossipsubMessagesCount > MAX_QUEUED_UNKNOWN_BLOCK_GOSSIP_OBJECTS) { // TODO: Should report the dropped job to gossip? It will be eventually pruned from the mcache From 021523c5783c300e7727b6720d2b8b91d63255dc Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 19 Aug 2025 04:13:52 +0700 Subject: [PATCH 028/173] fix: convert to new IBlockInput --- packages/beacon-node/src/sync/range/chain.ts | 3 +-- .../beacon-node/src/sync/range/utils/hashBlocks.ts | 13 +++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/packages/beacon-node/src/sync/range/chain.ts b/packages/beacon-node/src/sync/range/chain.ts index 35e34741b338..987e6e4692ef 100644 --- a/packages/beacon-node/src/sync/range/chain.ts +++ b/packages/beacon-node/src/sync/range/chain.ts @@ -7,14 +7,13 @@ import {IBlockInput} from "../../chain/blocks/blockInput/types.js"; import {Metrics} from "../../metrics/metrics.js"; import {PeerAction, prettyPrintPeerIdStr} from "../../network/index.js"; import {PeerSyncMeta} from "../../network/peers/peersData.js"; -import {PartialDownload} from "../../network/reqresp/beaconBlocksMaybeBlobsByRange.js"; import {CustodyConfig} from "../../util/dataColumns.js"; import {ItTrigger} from "../../util/itTrigger.js"; import {PeerIdStr} from "../../util/peerId.js"; import {wrapError} from "../../util/wrapError.js"; import {BATCH_BUFFER_SIZE, EPOCHS_PER_BATCH, MAX_LOOK_AHEAD_EPOCHS} from "../constants.js"; import {RangeSyncType} from "../utils/remoteSyncType.js"; -import {Batch, BatchError, BatchErrorCode, BatchMetadata, BatchStatus, DownloadByRangeRequests} from "./batch.js"; +import {Batch, BatchError, BatchErrorCode, BatchMetadata, BatchStatus} from "./batch.js"; import { ChainPeersBalancer, PeerSyncInfo, diff --git a/packages/beacon-node/src/sync/range/utils/hashBlocks.ts b/packages/beacon-node/src/sync/range/utils/hashBlocks.ts index 050700217c8a..ee27224d1054 100644 --- a/packages/beacon-node/src/sync/range/utils/hashBlocks.ts +++ b/packages/beacon-node/src/sync/range/utils/hashBlocks.ts @@ -1,23 +1,24 @@ import {ChainForkConfig} from "@lodestar/config"; -import {RootHex} from "@lodestar/types"; +import {RootHex, SignedBeaconBlock} from "@lodestar/types"; import {toRootHex} from "@lodestar/utils"; -import {BlockInput} from "../../../chain/blocks/types.js"; +import {IBlockInput} from "../../../chain/blocks/blockInput/types.js"; /** * String to uniquely identify block segments. Used for peer scoring and to compare if batches are equivalent. */ -export function hashBlocks(blocks: BlockInput[], config: ChainForkConfig): RootHex { +export function hashBlocks(blocks: IBlockInput[], config: ChainForkConfig): RootHex { switch (blocks.length) { case 0: return "0x"; case 1: { - const block0 = blocks[0].block; + const block0 = blocks[0].getBlock(); return toRootHex(config.getForkTypes(block0.message.slot).SignedBeaconBlock.hashTreeRoot(block0)); } default: { - const block0 = blocks[0].block; - const blockN = blocks.at(-1)?.block as BlockInput["block"]; + const block0 = blocks[0].getBlock(); + const blockN = blocks.at(-1)?.getBlock() as SignedBeaconBlock; return ( + // TODO(fulu): should we be doing checks for presence to make sure these do not blow up? toRootHex(config.getForkTypes(block0.message.slot).SignedBeaconBlock.hashTreeRoot(block0)) + toRootHex(config.getForkTypes(blockN.message.slot).SignedBeaconBlock.hashTreeRoot(blockN)) ); From 5a450aab51a261b5497ad513585a20cbf0890881 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 19 Aug 2025 04:24:07 +0700 Subject: [PATCH 029/173] fix: convert some BlockInput to IBlockInput for type safety in sync --- packages/beacon-node/src/chain/blocks/index.ts | 10 +++++----- packages/beacon-node/src/chain/blocks/types.ts | 4 ++-- .../beacon-node/src/chain/blocks/utils/chainSegment.ts | 4 ++-- packages/beacon-node/src/chain/blocks/verifyBlock.ts | 4 ++-- .../src/chain/blocks/verifyBlocksDataAvailability.ts | 4 ++-- .../src/chain/blocks/verifyBlocksSanityChecks.ts | 8 ++++---- .../chain/blocks/verifyBlocksStateTransitionOnly.ts | 4 ++-- .../src/chain/blocks/writeBlockInputToDb.ts | 6 +++--- packages/beacon-node/src/chain/chain.ts | 6 +++--- packages/beacon-node/src/chain/interface.ts | 6 +++--- .../src/network/processor/gossipHandlers.ts | 6 +++--- 11 files changed, 31 insertions(+), 31 deletions(-) diff --git a/packages/beacon-node/src/chain/blocks/index.ts b/packages/beacon-node/src/chain/blocks/index.ts index 455a534ed54e..0a08dfaa8874 100644 --- a/packages/beacon-node/src/chain/blocks/index.ts +++ b/packages/beacon-node/src/chain/blocks/index.ts @@ -5,7 +5,7 @@ import {JobItemQueue, isQueueErrorAborted} from "../../util/queue/index.js"; import type {BeaconChain} from "../chain.js"; import {BlockError, BlockErrorCode, isBlockErrorAborted} from "../errors/index.js"; import {BlockProcessOpts} from "../options.js"; -import {BlockInput} from "./blockInput/index.js"; +import {IBlockInput} from "./blockInput/types.js"; import {importBlock} from "./importBlock.js"; import {FullyVerifiedBlock, ImportBlockOpts} from "./types.js"; import {assertLinearChainSegment} from "./utils/chainSegment.js"; @@ -20,10 +20,10 @@ const QUEUE_MAX_LENGTH = 256; * BlockProcessor processes block jobs in a queued fashion, one after the other. */ export class BlockProcessor { - readonly jobQueue: JobItemQueue<[BlockInput[], ImportBlockOpts], void>; + readonly jobQueue: JobItemQueue<[IBlockInput[], ImportBlockOpts], void>; constructor(chain: BeaconChain, metrics: Metrics | null, opts: BlockProcessOpts, signal: AbortSignal) { - this.jobQueue = new JobItemQueue<[BlockInput[], ImportBlockOpts], void>( + this.jobQueue = new JobItemQueue<[IBlockInput[], ImportBlockOpts], void>( (job, importOpts) => { return processBlocks.call(chain, job, {...opts, ...importOpts}); }, @@ -32,7 +32,7 @@ export class BlockProcessor { ); } - async processBlocksJob(job: BlockInput[], opts: ImportBlockOpts = {}): Promise { + async processBlocksJob(job: IBlockInput[], opts: ImportBlockOpts = {}): Promise { await this.jobQueue.push(job, opts); } } @@ -49,7 +49,7 @@ export class BlockProcessor { */ export async function processBlocks( this: BeaconChain, - blocks: BlockInput[], + blocks: IBlockInput[], opts: BlockProcessOpts & ImportBlockOpts ): Promise { if (blocks.length === 0) { diff --git a/packages/beacon-node/src/chain/blocks/types.ts b/packages/beacon-node/src/chain/blocks/types.ts index c6ff8545af78..ff21450f9b40 100644 --- a/packages/beacon-node/src/chain/blocks/types.ts +++ b/packages/beacon-node/src/chain/blocks/types.ts @@ -3,7 +3,7 @@ import {MaybeValidExecutionStatus} from "@lodestar/fork-choice"; import {ForkSeq} from "@lodestar/params"; import {CachedBeaconStateAllForks, DataAvailabilityStatus, computeEpochAtSlot} from "@lodestar/state-transition"; import type {Slot, fulu} from "@lodestar/types"; -import {BlockInput} from "./blockInput/index.js"; +import {IBlockInput} from "./blockInput/types.js"; export enum GossipedInputType { block = "block", @@ -88,7 +88,7 @@ export type ImportBlockOpts = { * A wrapper around a `SignedBeaconBlock` that indicates that this block is fully verified and ready to import */ export type FullyVerifiedBlock = { - blockInput: BlockInput; + blockInput: IBlockInput; postState: CachedBeaconStateAllForks; parentBlockSlot: Slot; proposerBalanceDelta: number; diff --git a/packages/beacon-node/src/chain/blocks/utils/chainSegment.ts b/packages/beacon-node/src/chain/blocks/utils/chainSegment.ts index b205f25c2845..5c9b4d8b9d56 100644 --- a/packages/beacon-node/src/chain/blocks/utils/chainSegment.ts +++ b/packages/beacon-node/src/chain/blocks/utils/chainSegment.ts @@ -1,13 +1,13 @@ import {ChainForkConfig} from "@lodestar/config"; import {ssz} from "@lodestar/types"; import {BlockError, BlockErrorCode} from "../../errors/index.js"; -import {BlockInput} from "../blockInput/index.js"; +import {IBlockInput} from "../blockInput/types.js"; /** * Assert this chain segment of blocks is linear with slot numbers and hashes */ -export function assertLinearChainSegment(config: ChainForkConfig, blocks: BlockInput[]): void { +export function assertLinearChainSegment(config: ChainForkConfig, blocks: IBlockInput[]): void { for (let i = 0; i < blocks.length - 1; i++) { const block = blocks[i].getBlock(); const child = blocks[i + 1].getBlock(); diff --git a/packages/beacon-node/src/chain/blocks/verifyBlock.ts b/packages/beacon-node/src/chain/blocks/verifyBlock.ts index b0b6414815f4..b4512713560c 100644 --- a/packages/beacon-node/src/chain/blocks/verifyBlock.ts +++ b/packages/beacon-node/src/chain/blocks/verifyBlock.ts @@ -13,7 +13,7 @@ import type {BeaconChain} from "../chain.js"; import {BlockError, BlockErrorCode} from "../errors/index.js"; import {BlockProcessOpts} from "../options.js"; import {RegenCaller} from "../regen/index.js"; -import {BlockInput, DAType} from "./blockInput/index.js"; +import {BlockInput, DAType, IBlockInput} from "./blockInput/index.js"; import {ImportBlockOpts} from "./types.js"; import {DENEB_BLOWFISH_BANNER} from "./utils/blowfishBanner.js"; import {ELECTRA_GIRAFFE_BANNER} from "./utils/giraffeBanner.js"; @@ -39,7 +39,7 @@ import {writeBlockInputToDb} from "./writeBlockInputToDb.js"; export async function verifyBlocksInEpoch( this: BeaconChain, parentBlock: ProtoBlock, - blocksInput: BlockInput[], + blocksInput: IBlockInput[], opts: BlockProcessOpts & ImportBlockOpts ): Promise<{ postStates: CachedBeaconStateAllForks[]; diff --git a/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts b/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts index a983ccf3b2b1..92ebbb447ce6 100644 --- a/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts +++ b/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts @@ -1,5 +1,5 @@ import {DataAvailabilityStatus} from "@lodestar/state-transition"; -import {BlockInput, DAType} from "./blockInput/index.js"; +import {BlockInput, DAType, IBlockInput} from "./blockInput/index.js"; // we can now wait for full 12 seconds because unavailable block sync will try pulling // the blobs from the network anyway after 500ms of seeing the block @@ -12,7 +12,7 @@ const BLOB_AVAILABILITY_TIMEOUT = 12_000; * - Returns the data availability status for each block input */ export async function verifyBlocksDataAvailability( - blocks: BlockInput[], + blocks: IBlockInput[], signal: AbortSignal ): Promise<{ dataAvailabilityStatuses: DataAvailabilityStatus[]; diff --git a/packages/beacon-node/src/chain/blocks/verifyBlocksSanityChecks.ts b/packages/beacon-node/src/chain/blocks/verifyBlocksSanityChecks.ts index 9280b1f13e86..63e5c7b471c5 100644 --- a/packages/beacon-node/src/chain/blocks/verifyBlocksSanityChecks.ts +++ b/packages/beacon-node/src/chain/blocks/verifyBlocksSanityChecks.ts @@ -6,7 +6,7 @@ import {toRootHex} from "@lodestar/utils"; import {IClock} from "../../util/clock.js"; import {BlockError, BlockErrorCode} from "../errors/index.js"; import {IChainOptions} from "../options.js"; -import {BlockInput} from "./blockInput/index.js"; +import {IBlockInput} from "./blockInput/types.js"; import {ImportBlockOpts} from "./types.js"; /** @@ -29,10 +29,10 @@ export function verifyBlocksSanityChecks( opts: IChainOptions; blacklistedBlocks: Map; }, - blocks: BlockInput[], + blocks: IBlockInput[], opts: ImportBlockOpts ): { - relevantBlocks: BlockInput[]; + relevantBlocks: IBlockInput[]; parentSlots: Slot[]; parentBlock: ProtoBlock | null; } { @@ -40,7 +40,7 @@ export function verifyBlocksSanityChecks( throw Error("Empty partiallyVerifiedBlocks"); } - const relevantBlocks: BlockInput[] = []; + const relevantBlocks: IBlockInput[] = []; const parentSlots: Slot[] = []; let parentBlock: ProtoBlock | null = null; diff --git a/packages/beacon-node/src/chain/blocks/verifyBlocksStateTransitionOnly.ts b/packages/beacon-node/src/chain/blocks/verifyBlocksStateTransitionOnly.ts index 16b96af9bbed..231195b20bbe 100644 --- a/packages/beacon-node/src/chain/blocks/verifyBlocksStateTransitionOnly.ts +++ b/packages/beacon-node/src/chain/blocks/verifyBlocksStateTransitionOnly.ts @@ -12,7 +12,7 @@ import {nextEventLoop} from "../../util/eventLoop.js"; import {BlockError, BlockErrorCode} from "../errors/index.js"; import {BlockProcessOpts} from "../options.js"; import {ValidatorMonitor} from "../validatorMonitor.js"; -import {BlockInput} from "./blockInput/index.js"; +import {BlockInput, IBlockInput} from "./blockInput/index.js"; import {ImportBlockOpts} from "./types.js"; /** @@ -25,7 +25,7 @@ import {ImportBlockOpts} from "./types.js"; */ export async function verifyBlocksStateTransitionOnly( preState0: CachedBeaconStateAllForks, - blocks: BlockInput[], + blocks: IBlockInput[], dataAvailabilityStatuses: DataAvailabilityStatus[], logger: Logger, metrics: Metrics | null, diff --git a/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts b/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts index 79b984a4ea3c..78328ccd76ce 100644 --- a/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts +++ b/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts @@ -6,7 +6,7 @@ import {toHex} from "@lodestar/utils"; import {BlobSidecarsWrapper} from "../../db/repositories/blobSidecars.js"; import {DataColumnSidecarsWrapper} from "../../db/repositories/dataColumnSidecars.js"; import {BeaconChain} from "../chain.js"; -import {BlockInput, isBlockInputBlobs, isBlockInputColumns} from "./blockInput/index.js"; +import {IBlockInput, isBlockInputBlobs, isBlockInputColumns} from "./blockInput/index.js"; /** * Persists block input data to DB. This operation must be eventually completed if a block is imported to the fork-choice. @@ -15,7 +15,7 @@ import {BlockInput, isBlockInputBlobs, isBlockInputColumns} from "./blockInput/i * This operation may be performed before, during or after importing to the fork-choice. As long as errors * are handled properly for eventual consistency. */ -export async function writeBlockInputToDb(this: BeaconChain, blocksInputs: BlockInput[]): Promise { +export async function writeBlockInputToDb(this: BeaconChain, blocksInputs: IBlockInput[]): Promise { // track all these objects for a few batch db operations const putBlocks: KeyValue[] = []; const putSerializedBlocks: KeyValue[] = []; @@ -115,7 +115,7 @@ export async function writeBlockInputToDb(this: BeaconChain, blocksInputs: Block /** * Prunes eagerly persisted block inputs only if not known to the fork-choice */ -export async function removeEagerlyPersistedBlockInputs(this: BeaconChain, blockInputs: BlockInput[]): Promise { +export async function removeEagerlyPersistedBlockInputs(this: BeaconChain, blockInputs: IBlockInput[]): Promise { const blockToRemove = []; const blobsToRemove = []; const dataColumnsToRemove = []; diff --git a/packages/beacon-node/src/chain/chain.ts b/packages/beacon-node/src/chain/chain.ts index f097dd7b961c..10e035f5e3ce 100644 --- a/packages/beacon-node/src/chain/chain.ts +++ b/packages/beacon-node/src/chain/chain.ts @@ -57,7 +57,7 @@ import {SerializedCache} from "../util/serializedCache.js"; import {ArchiveStore} from "./archiveStore/archiveStore.js"; import {CheckpointBalancesCache} from "./balancesCache.js"; import {BeaconProposerCache} from "./beaconProposerCache.js"; -import {BlockInput} from "./blocks/blockInput/index.js"; +import {BlockInput, IBlockInput} from "./blocks/blockInput/index.js"; import {BlockProcessor, ImportBlockOpts} from "./blocks/index.js"; import {BlsMultiThreadWorkerPool, BlsSingleThreadVerifier, IBlsVerifier} from "./bls/index.js"; import {ChainEvent, ChainEventEmitter} from "./emitter.js"; @@ -792,11 +792,11 @@ export class BeaconChain implements IBeaconChain { return {block, executionPayloadValue, consensusBlockValue: gweiToWei(proposerReward), shouldOverrideBuilder}; } - async processBlock(block: BlockInput, opts?: ImportBlockOpts): Promise { + async processBlock(block: IBlockInput, opts?: ImportBlockOpts): Promise { return this.blockProcessor.processBlocksJob([block], opts); } - async processChainSegment(blocks: BlockInput[], opts?: ImportBlockOpts): Promise { + async processChainSegment(blocks: IBlockInput[], opts?: ImportBlockOpts): Promise { return this.blockProcessor.processBlocksJob(blocks, opts); } diff --git a/packages/beacon-node/src/chain/interface.ts b/packages/beacon-node/src/chain/interface.ts index 05c18299fce9..d9a2e8a7fa7e 100644 --- a/packages/beacon-node/src/chain/interface.ts +++ b/packages/beacon-node/src/chain/interface.ts @@ -35,7 +35,7 @@ import {SerializedCache} from "../util/serializedCache.js"; import {IArchiveStore} from "./archiveStore/interface.js"; import {CheckpointBalancesCache} from "./balancesCache.js"; import {BeaconProposerCache, ProposerPreparationData} from "./beaconProposerCache.js"; -import {BlockInput} from "./blocks/blockInput/index.js"; +import {IBlockInput} from "./blocks/blockInput/index.js"; import {ImportBlockOpts} from "./blocks/types.js"; import {IBlsVerifier} from "./bls/index.js"; import {ChainEventEmitter} from "./emitter.js"; @@ -211,9 +211,9 @@ export interface IBeaconChain { }>; /** Process a block until complete */ - processBlock(block: BlockInput, opts?: ImportBlockOpts): Promise; + processBlock(block: IBlockInput, opts?: ImportBlockOpts): Promise; /** Process a chain of blocks until complete */ - processChainSegment(blocks: BlockInput[], opts?: ImportBlockOpts): Promise; + processChainSegment(blocks: IBlockInput[], opts?: ImportBlockOpts): Promise; getStatus(): Status; diff --git a/packages/beacon-node/src/network/processor/gossipHandlers.ts b/packages/beacon-node/src/network/processor/gossipHandlers.ts index 70b5ae694cf7..fd861de665db 100644 --- a/packages/beacon-node/src/network/processor/gossipHandlers.ts +++ b/packages/beacon-node/src/network/processor/gossipHandlers.ts @@ -15,7 +15,7 @@ import { sszTypesFor, } from "@lodestar/types"; import {LogLevel, Logger, prettyBytes, toHex, toRootHex} from "@lodestar/utils"; -import {BlockInput, BlockInputSource} from "../../chain/blocks/blockInput/index.js"; +import {BlockInput, BlockInputSource, IBlockInput} from "../../chain/blocks/blockInput/index.js"; import {BlobSidecarValidation} from "../../chain/blocks/types.js"; import {ChainEvent} from "../../chain/emitter.js"; import { @@ -118,7 +118,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand fork: ForkName, peerIdStr: string, seenTimestampSec: number - ): Promise { + ): Promise { const slot = signedBlock.message.slot; const forkTypes = config.getForkTypes(slot); const blockRootHex = toRootHex(forkTypes.BeaconBlock.hashTreeRoot(signedBlock.message)); @@ -331,7 +331,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand } } - function handleValidBeaconBlock(blockInput: BlockInput, peerIdStr: string, seenTimestampSec: number): void { + function handleValidBeaconBlock(blockInput: IBlockInput, peerIdStr: string, seenTimestampSec: number): void { const signedBlock = blockInput.getBlock(); const slot = signedBlock.message.slot; From 97a5e6a29528a928aceb3ed68bee7f9692a27559 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 19 Aug 2025 04:26:03 +0700 Subject: [PATCH 030/173] fix: remove old types that were used by UnknownBlock and move to new paradigm --- packages/beacon-node/src/sync/interface.ts | 60 ---------------------- packages/beacon-node/src/sync/types.ts | 20 +++++++- 2 files changed, 19 insertions(+), 61 deletions(-) diff --git a/packages/beacon-node/src/sync/interface.ts b/packages/beacon-node/src/sync/interface.ts index 4aef2b74be8d..f9e544fe530f 100644 --- a/packages/beacon-node/src/sync/interface.ts +++ b/packages/beacon-node/src/sync/interface.ts @@ -2,7 +2,6 @@ import {routes} from "@lodestar/api"; import {BeaconConfig} from "@lodestar/config"; import {RootHex, Slot, phase0} from "@lodestar/types"; import {Logger} from "@lodestar/utils"; -import {BlockInput, BlockInputType, NullBlockInput} from "../chain/blocks/types.js"; import {IBeaconChain} from "../chain/index.js"; import {IBeaconDb} from "../db/index.js"; import {Metrics} from "../metrics/index.js"; @@ -54,62 +53,3 @@ export interface SyncModules { chain: IBeaconChain; wsCheckpoint?: phase0.Checkpoint; } - -export type UnknownAndAncestorBlocks = { - unknowns: UnknownBlock[]; - ancestors: DownloadedBlock[]; -}; - -/** - * onUnknownBlock: store 1 record with undefined parentBlockRootHex & blockInput, blockRootHex as key, status pending - * onUnknownBlockParent: - * - store 1 record with known parentBlockRootHex & blockInput, blockRootHex as key, status downloaded - * - store 1 record with undefined parentBlockRootHex & blockInput, parentBlockRootHex as key, status pending - */ -export type PendingBlock = UnknownBlock | DownloadedBlock; - -type PendingBlockCommon = { - blockRootHex: RootHex; - peerIdStrs: Set; - downloadAttempts: number; -}; - -export type UnknownBlock = PendingBlockCommon & { - status: PendingBlockStatus.pending | PendingBlockStatus.fetching; - parentBlockRootHex: null; -} & ( - | {unknownBlockType: PendingBlockType.UNKNOWN_BLOCK; blockInput: null} - | {unknownBlockType: PendingBlockType.UNKNOWN_DATA; blockInput: BlockInput & {type: BlockInputType.dataPromise}} - | {unknownBlockType: PendingBlockType.UNKNOWN_BLOCKINPUT; blockInput: NullBlockInput} - ); - -/** - * either the blobs are unknown or in future some blobs and even the block is unknown - */ - -export type DownloadedBlock = PendingBlockCommon & { - status: PendingBlockStatus.downloaded | PendingBlockStatus.processing; - parentBlockRootHex: RootHex; - blockInput: BlockInput; -}; - -export enum PendingBlockStatus { - pending = "pending", - fetching = "fetching", - downloaded = "downloaded", - processing = "processing", -} - -export enum PendingBlockType { - /** - * We got a block root (from a gossip attestation, for exxample) but we don't have the block in forkchoice. - */ - UNKNOWN_BLOCK = "unknown_block", - /** - * During gossip time, we may get a block but the parent root is unknown (not in forkchoice). - */ - UNKNOWN_PARENT = "unknown_parent", - - UNKNOWN_BLOCKINPUT = "unknown_blockinput", - UNKNOWN_DATA = "unknown_data", -} diff --git a/packages/beacon-node/src/sync/types.ts b/packages/beacon-node/src/sync/types.ts index c8698fb0dfe5..98f59d2292ac 100644 --- a/packages/beacon-node/src/sync/types.ts +++ b/packages/beacon-node/src/sync/types.ts @@ -1,6 +1,24 @@ import {IBlockInput} from "@lodestar/beacon-node/src/chain/blocks/blockInput/index.js"; import {RootHex} from "@lodestar/types"; +export enum PendingBlockType { + /** + * We got a block root (from a gossip attestation, for exxample) but we don't have the block in forkchoice. + */ + UNKNOWN_BLOCK_ROOT = "UnknownBlockRoot", + /** + * During gossip time, we may get a block but the parent root is unknown (not in forkchoice). + */ + UNKNOWN_PARENT = "unknown_parent", + /** + * During gossip we wait for a set amount of time to receive the complete block input but if it does not + * arrive in time we turn to req/resp to pull the remainder so that it can be processed + */ + INCOMPLETE_BLOCK_INPUT = "IncompleteBlockInput", + + UNKNOWN_DATA = "unknown_data", +} + export enum PendingBlockInputStatus { pending = "pending", fetching = "fetching", @@ -17,7 +35,7 @@ export type PendingBlockInput = { }; export type PendingRootHex = { - status: PendingBlockInputStatus; + status: PendingBlockInputStatus.pending | PendingBlockInputStatus.fetching; rootHex: RootHex; timeAddedSec: number; timeSyncedSec?: number; From 66672efe685e5e35f31770d4dd9f19a0f1ad629e Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 19 Aug 2025 04:27:40 +0700 Subject: [PATCH 031/173] fix: bug in Batch (add blocks back to state) --- packages/beacon-node/src/sync/range/batch.ts | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/packages/beacon-node/src/sync/range/batch.ts b/packages/beacon-node/src/sync/range/batch.ts index 39478a3f0827..53608f39923e 100644 --- a/packages/beacon-node/src/sync/range/batch.ts +++ b/packages/beacon-node/src/sync/range/batch.ts @@ -5,7 +5,6 @@ import {LodestarError} from "@lodestar/utils"; import {isBlockInputColumns} from "../../chain/blocks/blockInput/blockInput.js"; import {IBlockInput} from "../../chain/blocks/blockInput/types.js"; import {BlockError, BlockErrorCode} from "../../chain/errors/index.js"; -import {PartialDownload} from "../../network/reqresp/beaconBlocksMaybeBlobsByRange.js"; import {CustodyConfig} from "../../util/dataColumns.js"; import {PeerIdStr} from "../../util/peerId.js"; import {MAX_BATCH_DOWNLOAD_ATTEMPTS, MAX_BATCH_PROCESSING_ATTEMPTS} from "../constants.js"; @@ -55,7 +54,7 @@ export type DownloadSuccessState = export type BatchState = | DownloadSuccessState - | {status: BatchStatus.Downloading; peer: PeerIdStr} + | {status: BatchStatus.Downloading; peer: PeerIdStr; blocks: IBlockInput[]} | {status: BatchStatus.Processing; attempt: Attempt} | {status: BatchStatus.AwaitingValidation; attempt: Attempt}; @@ -373,7 +372,7 @@ export class Batch { // remove any downloaded blocks and re-attempt // TODO(fulu): need to remove the bad blocks from the SeenBlockInputCache - this.state = {status: BatchStatus.AwaitingDownload}; + this.state = {status: BatchStatus.AwaitingDownload, blocks: []}; } private onProcessingError(attempt: Attempt): void { @@ -384,7 +383,7 @@ export class Batch { // remove any downloaded blocks and re-attempt // TODO(fulu): need to remove the bad blocks from the SeenBlockInputCache - this.state = {status: BatchStatus.AwaitingDownload}; + this.state = {status: BatchStatus.AwaitingDownload, blocks: []}; } /** Helper to construct typed BatchError. Stack traces are correct as the error is thrown above */ From 6b53b8ea0441f03ebf96986bf368a93c499fce07 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 19 Aug 2025 04:28:18 +0700 Subject: [PATCH 032/173] fix: by range types to allow for batches that do not need blocks pulled --- packages/beacon-node/src/sync/utils/downloadByRange.ts | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index c1ab505b7dd3..5a3d7bacf804 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -17,7 +17,7 @@ import {PeerIdStr} from "../../util/peerId.js"; import {RangeSyncType} from "./remoteSyncType.js"; export type DownloadByRangeRequests = { - blocksRequest: phase0.BeaconBlocksByRangeRequest; + blocksRequest?: phase0.BeaconBlocksByRangeRequest; blobsRequest?: deneb.BlobSidecarsByRangeRequest; columnsRequest?: fulu.DataColumnSidecarsByRangeRequest; }; @@ -53,10 +53,10 @@ export type CacheByRangeResponsesProps = { batchBlocks: IBlockInput[]; }; -export async function cacheByRangeResponses({ +export function cacheByRangeResponses({ config, cache, - syncType, + // syncType, peerIdStr, responses, batchBlocks, @@ -349,7 +349,7 @@ export function validateRequests({ blocksRequest, blobsRequest, columnsRequest, -}: DownloadByRangeRequests & Pick): string { +}: DownloadByRangeRequests & Pick): string { const startSlot = (blocksRequest?.startSlot ?? blobsRequest?.startSlot ?? columnsRequest?.startSlot) as number; const count = (blocksRequest?.count ?? blobsRequest?.count ?? columnsRequest?.count) as number; const slotRange = `${startSlot} - ${startSlot + count}`; From 738e1fc419a7d9633cd1ff3c364ef957a7257fd5 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 19 Aug 2025 04:28:32 +0700 Subject: [PATCH 033/173] wip: start to update BlockInputSync and pendingBlocksTree --- .../src/metrics/metrics/lodestar.ts | 9 +- packages/beacon-node/src/sync/unknownBlock.ts | 305 +++++++++--------- .../src/sync/utils/pendingBlocksTree.ts | 93 ++++-- 3 files changed, 228 insertions(+), 179 deletions(-) diff --git a/packages/beacon-node/src/metrics/metrics/lodestar.ts b/packages/beacon-node/src/metrics/metrics/lodestar.ts index d741f68f57ce..3ad6282a5b60 100644 --- a/packages/beacon-node/src/metrics/metrics/lodestar.ts +++ b/packages/beacon-node/src/metrics/metrics/lodestar.ts @@ -16,7 +16,7 @@ import {ExecutionPayloadStatus} from "../../execution/index.js"; import {GossipType} from "../../network/index.js"; import {CannotAcceptWorkReason, ReprocessRejectReason} from "../../network/processor/index.js"; import {BackfillSyncMethod} from "../../sync/backfill/backfill.js"; -import {PendingBlockType} from "../../sync/index.js"; +import {PendingBlockType} from "../../sync/types.js"; import {PeerSyncType, RangeSyncType} from "../../sync/utils/remoteSyncType.js"; import {AllocSource} from "../../util/bufferPool.js"; import {RecoverResult} from "../../util/dataColumns.js"; @@ -497,7 +497,7 @@ export function createLodestarMetrics( }), }, - syncUnknownBlock: { + blockInputSync: { switchNetworkSubscriptions: register.gauge<{action: string}>({ name: "lodestar_sync_unknown_block_network_subscriptions_count", help: "Switch network subscriptions on/off", @@ -508,6 +508,11 @@ export function createLodestarMetrics( help: "Total number of unknown block events or requests", labelNames: ["type"], }), + source: register.gauge<{source: BlockInputSource}>({ + name: "lodestar_block_input_sync_source_total", + help: "The origination source of one of the BlockInputSync triggers", + labelNames: ["source"], + }), pendingBlocks: register.gauge({ name: "lodestar_sync_unknown_block_pending_blocks_size", help: "Current size of UnknownBlockSync pending blocks cache", diff --git a/packages/beacon-node/src/sync/unknownBlock.ts b/packages/beacon-node/src/sync/unknownBlock.ts index 686576e28526..9647490ca810 100644 --- a/packages/beacon-node/src/sync/unknownBlock.ts +++ b/packages/beacon-node/src/sync/unknownBlock.ts @@ -2,42 +2,96 @@ import {ChainForkConfig} from "@lodestar/config"; import {ForkName, INTERVALS_PER_SLOT, NUMBER_OF_COLUMNS} from "@lodestar/params"; import {ColumnIndex, Root, RootHex, deneb} from "@lodestar/types"; import {BlobAndProof} from "@lodestar/types/deneb"; -import {Logger, fromHex, pruneSetToMax, toRootHex} from "@lodestar/utils"; +import {Logger, fromHex, prettyBytes, pruneSetToMax, toRootHex} from "@lodestar/utils"; import {sleep} from "@lodestar/utils"; -import {BlockInput, BlockInputType, CachedDataColumns, NullBlockInput} from "../chain/blocks/types.js"; +import {IBlockInput} from "../chain/blocks/blockInput/types.js"; import {BlockError, BlockErrorCode} from "../chain/errors/index.js"; import {ChainEvent, ChainEventData, IBeaconChain} from "../chain/index.js"; import {Metrics} from "../metrics/index.js"; import {INetwork, NetworkEvent, NetworkEventData} from "../network/index.js"; -import { - beaconBlocksMaybeBlobsByRoot, - unavailableBeaconBlobsByRoot, -} from "../network/reqresp/beaconBlocksMaybeBlobsByRoot.js"; import {byteArrayEquals} from "../util/bytes.js"; import {PeerIdStr} from "../util/peerId.js"; import {shuffle} from "../util/shuffle.js"; import {Result, wrapError} from "../util/wrapError.js"; -import {PendingBlock, PendingBlockStatus, PendingBlockType, UnknownBlock} from "./interface.js"; import {SyncOptions} from "./options.js"; -import {getAllDescendantBlocks, getDescendantBlocks, getUnknownAndAncestorBlocks} from "./utils/pendingBlocksTree.js"; +import { + BlockInputSyncCacheItem, + PendingBlockInput, + PendingBlockInputStatus, + PendingBlockType, + PendingRootHex, + isPendingBlockInput, +} from "./types.js"; +import { + getAllDescendantBlocks, + getDescendantBlocks, + getIncompleteAndAncestorBlocks, +} from "./utils/pendingBlocksTree.js"; const MAX_ATTEMPTS_PER_BLOCK = 5; const MAX_KNOWN_BAD_BLOCKS = 500; const MAX_PENDING_BLOCKS = 100; -export class UnknownBlockSync { +function getLogMeta( + block: BlockInputSyncCacheItem, + pendingBlocks?: Map +): Record { + const pendingBlocksLog: Record = pendingBlocks ? {pendingBlocks: pendingBlocks.size} : {}; + return isPendingBlockInput(block) + ? { + type: "pendingBlockInput", + ...pendingBlocksLog, + ...block.blockInput.getLogMeta(), + } + : { + type: "pendingRootHex", + ...pendingBlocksLog, + rootHex: prettyBytes(block.rootHex), + }; +} + +/** + * BlockInputSync is a class that handles ReqResp to find blocks and data related to a specific blockRoot. The + * blockRoot may have been found via object gossip, or the API. Gossip objects that can trigger a search are block, + * blobs, columns, attestations, etc. In the case of blocks and data this is generally during the current slot but + * can also be for items that are received late but are not fully verified and thus not in fork-choice (old blocks on + * an unknown fork). It can also be triggered via an attestation (or sync committee message or any other item that + * gets gossiped) that references a blockRoot that is not in fork-choice. In rare (and realistically should not happen) + * situations it can get triggered via the API when the validator attempts to publish a block, attestation, aggregate + * and proof or a sync committee contribution that has unknown information included (parentRoot for instance). + * + * The goal of the class is to make sure that all information that is necessary for import into fork-choice is pulled + * from peers so that the block and data can be processed, and thus the object that triggered the search can be + * referenced and validated. + * + * The most common case for this search is a set of block/data that comes across gossip for the current slot, during + * normal chain operation, but not everything was received before the gossip cutoff window happens so it is necessary + * to pull remaining data via req/resp so that fork-choice can be updated prior to making an attestation for the + * current slot. + * + * Event sources for old UnknownBlock + * + * - publishBlock + * - gossipHandlers + * - searchUnknownSlotRoot + * = produceSyncCommitteeContribution + * = validateGossipFnRetryUnknownRoot + * * submitPoolAttestationsV2 + * * publishAggregateAndProofsV2 + * = onPendingGossipsubMessage + * * NetworkEvent.pendingGossipsubMessage + * - onGossipsubMessage + */ +export class BlockInputSync { /** * block RootHex -> PendingBlock. To avoid finding same root at the same time */ - private readonly pendingBlocks = new Map(); + private readonly pendingBlocks = new Map(); private readonly knownBadBlocks = new Set(); private readonly proposerBoostSecWindow: number; private readonly maxPendingBlocks; private subscribedToNetworkEvents = false; - private engineGetBlobsCache = new Map(); - private blockInputsRetryTrackerCache = new Set(); - constructor( private readonly config: ChainForkConfig, private readonly network: INetwork, @@ -50,35 +104,36 @@ export class UnknownBlockSync { this.proposerBoostSecWindow = this.config.SECONDS_PER_SLOT / INTERVALS_PER_SLOT; if (metrics) { - metrics.syncUnknownBlock.pendingBlocks.addCollect(() => - metrics.syncUnknownBlock.pendingBlocks.set(this.pendingBlocks.size) + metrics.blockInputSync.pendingBlocks.addCollect(() => + metrics.blockInputSync.pendingBlocks.set(this.pendingBlocks.size) ); - metrics.syncUnknownBlock.knownBadBlocks.addCollect(() => - metrics.syncUnknownBlock.knownBadBlocks.set(this.knownBadBlocks.size) + metrics.blockInputSync.knownBadBlocks.addCollect(() => + metrics.blockInputSync.knownBadBlocks.set(this.knownBadBlocks.size) ); } } subscribeToNetwork(): void { - if (!this.opts?.disableUnknownBlockSync) { - // cannot chain to the above if or the log will be incorrect - if (!this.subscribedToNetworkEvents) { - this.logger.verbose("UnknownBlockSync enabled."); - this.chain.emitter.on(ChainEvent.unknownBlockRoot, this.onUnknownBlock); - this.chain.emitter.on(ChainEvent.incompleteBlockInput, this.onUnknownBlockInput); - this.chain.emitter.on(ChainEvent.unknownParent, this.onUnknownParent); - this.network.events.on(NetworkEvent.peerConnected, this.triggerUnknownBlockSearch); - this.subscribedToNetworkEvents = true; - } - } else { - this.logger.verbose("UnknownBlockSync disabled by disableUnknownBlockSync option."); + if (this.opts?.disableBlockInputSync) { + this.logger.verbose("BlockInputSync disabled by disableBlockInputSync option."); + return; + } + + // cannot chain to the above if or the log will be incorrect + if (!this.subscribedToNetworkEvents) { + this.logger.verbose("BlockInputSync enabled."); + this.chain.emitter.on(ChainEvent.unknownBlockRoot, this.onUnknownBlockRoot); + this.chain.emitter.on(ChainEvent.incompleteBlockInput, this.onIncompleteBlockInput); + this.chain.emitter.on(ChainEvent.unknownParent, this.onUnknownParent); + this.network.events.on(NetworkEvent.peerConnected, this.triggerUnknownBlockSearch); + this.subscribedToNetworkEvents = true; } } unsubscribeFromNetwork(): void { - this.logger.verbose("UnknownBlockSync disabled."); - this.chain.emitter.off(ChainEvent.unknownBlockRoot, this.onUnknownBlock); - this.chain.emitter.off(ChainEvent.incompleteBlockInput, this.onUnknownBlockInput); + this.logger.verbose("BlockInputSync disabled."); + this.chain.emitter.off(ChainEvent.unknownBlockRoot, this.onUnknownBlockRoot); + this.chain.emitter.off(ChainEvent.incompleteBlockInput, this.onIncompleteBlockInput); this.chain.emitter.off(ChainEvent.unknownParent, this.onUnknownParent); this.network.events.off(NetworkEvent.peerConnected, this.triggerUnknownBlockSearch); this.subscribedToNetworkEvents = false; @@ -86,7 +141,6 @@ export class UnknownBlockSync { close(): void { this.unsubscribeFromNetwork(); - // add more in the future if needed } isSubscribedToNetwork(): boolean { @@ -96,26 +150,28 @@ export class UnknownBlockSync { /** * Process an unknownBlock event and register the block in `pendingBlocks` Map. */ - private onUnknownBlock = (data: ChainEventData[ChainEvent.unknownBlockRoot]): void => { + private onUnknownBlockRoot = (data: ChainEventData[ChainEvent.unknownBlockRoot]): void => { try { - const unknownBlockType = this.addUnknownBlock(data.rootHex, data.peer); + this.addByRootHex(data.rootHex, data.peer); this.triggerUnknownBlockSearch(); - this.metrics?.syncUnknownBlock.requests.inc({type: unknownBlockType}); + this.metrics?.blockInputSync.requests.inc({type: PendingBlockType.UNKNOWN_BLOCK_ROOT}); + this.metrics?.blockInputSync.source.inc({source: data.source}); } catch (e) { - this.logger.debug("Error handling unknownBlock event", {}, e as Error); + this.logger.debug("Error handling unknownBlockRoot event", {}, e as Error); } }; /** * Process an unknownBlockInput event and register the block in `pendingBlocks` Map. */ - private onUnknownBlockInput = (data: ChainEventData[ChainEvent.incompleteBlockInput]): void => { + private onIncompleteBlockInput = (data: ChainEventData[ChainEvent.incompleteBlockInput]): void => { try { - const unknownBlockType = this.addUnknownBlock(data.blockInput, data.peer); + this.addByBlockInput(data.blockInput, data.peer); this.triggerUnknownBlockSearch(); - this.metrics?.syncUnknownBlock.requests.inc({type: unknownBlockType}); + this.metrics?.blockInputSync.requests.inc({type: PendingBlockType.INCOMPLETE_BLOCK_INPUT}); + this.metrics?.blockInputSync.source.inc({source: data.source}); } catch (e) { - this.logger.debug("Error handling unknownBlockInput event", {}, e as Error); + this.logger.debug("Error handling incompleteBlockInput event", {}, e as Error); } }; @@ -124,119 +180,74 @@ export class UnknownBlockSync { */ private onUnknownParent = (data: ChainEventData[ChainEvent.unknownParent]): void => { try { - this.addUnknownParent(data.blockInput, data.peer); + this.addByRootHex(data.blockInput.parentRootHex, data.peer); + this.addByBlockInput(data.blockInput, data.peer); this.triggerUnknownBlockSearch(); - this.metrics?.syncUnknownBlock.requests.inc({type: PendingBlockType.UNKNOWN_PARENT}); + this.metrics?.blockInputSync.requests.inc({type: PendingBlockType.UNKNOWN_PARENT}); + this.metrics?.blockInputSync.source.inc({source: data.source}); } catch (e) { - this.logger.debug("Error handling unknownBlockParent event", {}, e as Error); + this.logger.debug("Error handling unknownParent event", {}, e as Error); } }; - /** - * When a blockInput comes with an unknown parent: - * - add the block to pendingBlocks with status downloaded or pending blockRootHex as key. This is similar to - * an `onUnknownBlock` event, but the blocks is downloaded. - * - add the parent root to pendingBlocks with status pending, parentBlockRootHex as key. This is - * the same to an `onUnknownBlock` event with parentBlockRootHex as root. - */ - private addUnknownParent(blockInput: BlockInput, peerIdStr: string): void { - const block = blockInput.block.message; - const blockRoot = this.config.getForkTypes(block.slot).BeaconBlock.hashTreeRoot(block); - const blockRootHex = toRootHex(blockRoot); - const parentBlockRootHex = toRootHex(block.parentRoot); - - // add 1 pending block with status downloaded - let pendingBlock = this.pendingBlocks.get(blockRootHex); + private addByRootHex = (rootHex: RootHex, peerIdStr?: PeerIdStr): void => { + let pendingBlock = this.pendingBlocks.get(rootHex) as PendingRootHex; if (!pendingBlock) { - pendingBlock = - blockInput.type === BlockInputType.dataPromise - ? { - unknownBlockType: PendingBlockType.UNKNOWN_DATA, - blockRootHex, - // this will be set after we download block - parentBlockRootHex: null, - blockInput, - peerIdStrs: new Set(), - status: PendingBlockStatus.pending, - downloadAttempts: 0, - } - : { - blockRootHex, - parentBlockRootHex, - blockInput, - peerIdStrs: new Set(), - status: PendingBlockStatus.downloaded, - downloadAttempts: 0, - }; - this.pendingBlocks.set(blockRootHex, pendingBlock); - this.logger.verbose("Added unknown block parent to pendingBlocks", { - root: blockRootHex, - parent: parentBlockRootHex, + pendingBlock = { + status: PendingBlockInputStatus.pending, + rootHex: rootHex, + peerIdStrings: new Set(), + timeAddedSec: Date.now() / 1000, + }; + this.pendingBlocks.set(rootHex, pendingBlock); + + this.logger.verbose("Added new rootHex to BlockInputSync.pendingBlocks", { + rootHex: prettyBytes(pendingBlock.rootHex), + peerIdStr: peerIdStr ?? "unknown peer", }); } - pendingBlock.peerIdStrs.add(peerIdStr); - // add 1 pending block with status pending - this.addUnknownBlock(parentBlockRootHex, peerIdStr); - } - - private addUnknownBlock( - blockInputOrRootHex: RootHex | BlockInput | NullBlockInput, - peerIdStr?: string - ): Exclude { - let blockRootHex: RootHex; - let blockInput: BlockInput | NullBlockInput | null; - let unknownBlockType: Exclude; + if (peerIdStr) { + pendingBlock.peerIdStrings.add(peerIdStr); + } - if (typeof blockInputOrRootHex === "string") { - blockRootHex = blockInputOrRootHex; - blockInput = null; - unknownBlockType = PendingBlockType.UNKNOWN_BLOCK; - } else { - if (blockInputOrRootHex.block !== null) { - const {block} = blockInputOrRootHex; - blockRootHex = toRootHex(this.config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message)); - unknownBlockType = PendingBlockType.UNKNOWN_DATA; - } else { - unknownBlockType = PendingBlockType.UNKNOWN_BLOCKINPUT; - blockRootHex = blockInputOrRootHex.blockRootHex; - } - blockInput = blockInputOrRootHex; + // TODO: check this prune methodology + // Limit pending blocks to prevent DOS attacks that cause OOM + const prunedItemCount = pruneSetToMax(this.pendingBlocks, this.maxPendingBlocks); + if (prunedItemCount > 0) { + this.logger.verbose(`Pruned ${prunedItemCount} items from BlockInputSync.pendingBlocks`); } + }; - let pendingBlock = this.pendingBlocks.get(blockRootHex); - if (!pendingBlock) { + private addByBlockInput = (blockInput: IBlockInput, peerIdStr?: string): void => { + let pendingBlock = this.pendingBlocks.get(blockInput.blockRootHex) as PendingBlockInput; + // if entry is missing or was added via rootHex and now we have more complete information overwrite + // the existing information with the more complete cache entry + if (!pendingBlock || !isPendingBlockInput(pendingBlock)) { pendingBlock = { - unknownBlockType, - blockRootHex, - // this will be set after we download block - parentBlockRootHex: null, + // can be added via unknown parent and we may already have full block input. need to check and set correctly + // so we pull the data if its missing or handle the block correctly in getIncompleteAndAncestorBlocks + status: blockInput.hasBlockAndAllData() ? PendingBlockInputStatus.downloaded : PendingBlockInputStatus.pending, blockInput, - peerIdStrs: new Set(), - status: PendingBlockStatus.pending, - downloadAttempts: 0, - } as PendingBlock; - this.pendingBlocks.set(blockRootHex, pendingBlock); - - this.logger.verbose("Added unknown block to pendingBlocks", { - unknownBlockType, - root: blockRootHex, - slot: blockInput?.block?.message.slot ?? "unknown", - }); + peerIdStrings: new Set(), + timeAddedSec: Date.now() / 1000, + }; + this.pendingBlocks.set(blockInput.blockRootHex, pendingBlock); + + this.logger.verbose("Added blockInput to BlockInputSync.pendingBlocks", pendingBlock.blockInput.getLogMeta()); } if (peerIdStr) { - pendingBlock.peerIdStrs.add(peerIdStr); + pendingBlock.peerIdStrings.add(peerIdStr); } + // TODO: check this prune methodology // Limit pending blocks to prevent DOS attacks that cause OOM const prunedItemCount = pruneSetToMax(this.pendingBlocks, this.maxPendingBlocks); if (prunedItemCount > 0) { - this.logger.warn(`Pruned ${prunedItemCount} pending blocks from UnknownBlockSync`); + this.logger.verbose(`Pruned ${prunedItemCount} items from BlockInputSync.pendingBlocks`); } - - return unknownBlockType; - } + }; /** * Gather tip parent blocks with unknown parent and do a search for all of them @@ -254,15 +265,15 @@ export class UnknownBlockSync { return; } - const {unknowns, ancestors} = getUnknownAndAncestorBlocks(this.pendingBlocks); + const {incomplete, ancestors} = getIncompleteAndAncestorBlocks(this.pendingBlocks); // it's rare when there is no unknown block // see https://github.com/ChainSafe/lodestar/issues/5649#issuecomment-1594213550 - if (unknowns.length === 0) { + if (incomplete.length === 0) { let processedBlocks = 0; for (const block of ancestors) { // when this happens, it's likely the block and parent block are processed by head sync - if (this.chain.forkChoice.hasBlockHex(block.parentBlockRootHex)) { + if (this.chain.forkChoice.hasBlockHex(block.blockInput.parentRootHex)) { processedBlocks++; this.processBlock(block).catch((e) => { this.logger.debug("Unexpected error - process old downloaded block", {}, e); @@ -279,15 +290,15 @@ export class UnknownBlockSync { } // most of the time there is exactly 1 unknown block - for (const block of unknowns) { + for (const block of incomplete) { this.downloadBlock(block, connectedPeers).catch((e) => { this.logger.debug("Unexpected error - downloadBlock", {root: block.blockRootHex}, e); }); } }; - private async downloadBlock(block: PendingBlock, allPeers: PeerIdStr[]): Promise { - if (block.status !== PendingBlockStatus.pending) { + private async downloadBlock(block: BlockInputSyncCacheItem, allPeers: PeerIdStr[]): Promise { + if (block.status !== PendingBlockInputStatus.pending) { return; } @@ -301,9 +312,9 @@ export class UnknownBlockSync { this.logger.verbose("Downloading unknown block", logCtx); - block.status = PendingBlockStatus.fetching; + block.status = PendingBlockInputStatus.fetching; - let res: Result<{blockInput: BlockInput; peerIdStr: string}>; + let res: Result<{blockInput: IBlockInput; peerIdStr: string}>; let connectedPeers: string[]; if (block.blockInput === null) { connectedPeers = allPeers; @@ -355,8 +366,8 @@ export class UnknownBlockSync { res = await wrapError(this.fetchUnavailableBlockInput(block.blockInput, connectedPeers)); } - if (res.err) this.metrics?.syncUnknownBlock.downloadedBlocksError.inc(); - else this.metrics?.syncUnknownBlock.downloadedBlocksSuccess.inc(); + if (res.err) this.metrics?.blockInputSync.downloadedBlocksError.inc(); + else this.metrics?.blockInputSync.downloadedBlocksSuccess.inc(); if (!res.err) { const {blockInput, peerIdStr} = res.result; @@ -392,7 +403,7 @@ export class UnknownBlockSync { const blockSlot = blockInput.block.message.slot; const finalizedSlot = this.chain.forkChoice.getFinalizedBlock().slot; const delaySec = Date.now() / 1000 - (this.chain.genesisTime + blockSlot * this.config.SECONDS_PER_SLOT); - this.metrics?.syncUnknownBlock.elapsedTimeTillReceived.observe(delaySec); + this.metrics?.blockInputSync.elapsedTimeTillReceived.observe(delaySec); const parentInForkchoice = this.chain.forkChoice.hasBlock(blockInput.block.message.parentRoot); this.logger.verbose("Downloaded unknown block", { @@ -448,10 +459,10 @@ export class UnknownBlockSync { * On error, remove and downscore all descendants. * This function could run recursively for all descendant blocks */ - private async processBlock(pendingBlock: PendingBlock): Promise { + private async processBlock(pendingBlock: PendingBlockInput): Promise { // pending block status is `downloaded` right after `downloadBlock` // but could be `pending` if added by `onUnknownBlockParent` event and this function is called recursively - if (pendingBlock.status !== PendingBlockStatus.downloaded) { + if (pendingBlock.status !== PendingBlockInputStatus.downloaded) { if (pendingBlock.status === PendingBlockStatus.pending) { const connectedPeers = this.network.getConnectedPeers(); if (connectedPeers.length === 0) { @@ -502,8 +513,8 @@ export class UnknownBlockSync { }) ); - if (res.err) this.metrics?.syncUnknownBlock.processedBlocksError.inc(); - else this.metrics?.syncUnknownBlock.processedBlocksSuccess.inc(); + if (res.err) this.metrics?.blockInputSync.processedBlocksError.inc(); + else this.metrics?.blockInputSync.processedBlocksSuccess.inc(); if (!res.err) { // no need to update status to "processed", delete anyway @@ -814,7 +825,7 @@ export class UnknownBlockSync { // Get all blocks that are a descendant of this one const badPendingBlocks = [block, ...getAllDescendantBlocks(block.blockRootHex, this.pendingBlocks)]; - this.metrics?.syncUnknownBlock.removedBlocks.inc(badPendingBlocks.length); + this.metrics?.blockInputSync.removedBlocks.inc(badPendingBlocks.length); for (const block of badPendingBlocks) { this.pendingBlocks.delete(block.blockRootHex); diff --git a/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts b/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts index deefba91f366..f42863d557b3 100644 --- a/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts +++ b/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts @@ -1,20 +1,23 @@ import {RootHex} from "@lodestar/types"; import {MapDef} from "@lodestar/utils"; -import {BlockInputType} from "../../chain/blocks/types.js"; +// import {DownloadedBlock, PendingBlock, PendingBlockStatus, UnknownBlock} from "../interface.js"; import { - DownloadedBlock, - PendingBlock, - PendingBlockStatus, - UnknownAndAncestorBlocks, - UnknownBlock, -} from "../interface.js"; + BlockInputSyncCacheItem, + PendingBlockInput, + PendingBlockInputStatus, + getBlockInputSyncCacheItemRootHex, + isPendingBlockInput, +} from "../types.js"; -export function getAllDescendantBlocks(blockRootHex: RootHex, blocks: Map): PendingBlock[] { +export function getAllDescendantBlocks( + blockRootHex: RootHex, + blocks: Map +): BlockInputSyncCacheItem[] { // Do one pass over all blocks to index by parent - const byParent = new MapDef(() => []); + const byParent = new MapDef(() => []); for (const block of blocks.values()) { - if (block.parentBlockRootHex != null) { - byParent.getOrDefault(block.parentBlockRootHex).push(block); + if (isPendingBlockInput(block)) { + byParent.getOrDefault(block.blockInput.parentRootHex).push(block); } } @@ -25,9 +28,9 @@ export function getAllDescendantBlocks(blockRootHex: RootHex, blocks: Map, - descendantBlocks: PendingBlock[] = [] -): PendingBlock[] { + byParent: Map, + descendantBlocks: BlockInputSyncCacheItem[] = [] +): BlockInputSyncCacheItem[] { const firstDescendantBlocks = byParent.get(childBlockRootHex); if (firstDescendantBlocks) { for (const firstDescendantBlock of firstDescendantBlocks) { @@ -38,8 +41,11 @@ function addToDescendantBlocks( return descendantBlocks; } -export function getDescendantBlocks(blockRootHex: RootHex, blocks: Map): PendingBlock[] { - const descendantBlocks: PendingBlock[] = []; +export function getDescendantBlocks( + blockRootHex: RootHex, + blocks: Map +): BlockInputSyncCacheItem[] { + const descendantBlocks: BlockInputSyncCacheItem[] = []; for (const block of blocks.values()) { if (block.parentBlockRootHex === blockRootHex) { @@ -50,31 +56,58 @@ export function getDescendantBlocks(blockRootHex: RootHex, blocks: Map downloaded block n + 1 => downloaded block n + 2 - * return `{unknowns: [n], ancestors: []}` + * Returns two arrays, one has the items that need to be pulled still and the other is items that + * are ready to be checked for rooting in fork-choice so the branch can be processed (or have their + * ancestor pulled to extend the branch backward until it does root in fork-choice) + * + * Given this chain segment incomplete block n => downloaded block n + 1 => downloaded block n + 2 + * return `{incomplete: [n], ancestors: []}` * * Given this chain segment: downloaded block n => downloaded block n + 1 => downloaded block n + 2 - * return {unknowns: [], ancestors: [n]} + * return {incomplete: [], ancestors: [n]} */ -export function getUnknownAndAncestorBlocks(blocks: Map): UnknownAndAncestorBlocks { - const unknowns: UnknownBlock[] = []; - const ancestors: DownloadedBlock[] = []; +export function getIncompleteAndAncestorBlocks( + blocks: Map +): IncompleteAndAncestorBlocks { + const incomplete = new Map(); + const ancestors = new Map(); for (const block of blocks.values()) { - const parentHex = block.parentBlockRootHex; + // check if the block was already added via getAllDescendants + if (incomplete.has(getBlockInputSyncCacheItemRootHex(block))) { + continue; + } + + // block and sidecars have bee fully downloaded and the parent is not in the pending block, attempt to find + // parentRootHex in fork-choice to determine if its ready to be processed if ( - block.status === PendingBlockStatus.pending && - (block.blockInput?.block == null || block.blockInput?.type === BlockInputType.dataPromise) && - parentHex == null + isPendingBlockInput(block) && + block.blockInput.hasBlockAndAllData() && + !blocks.has(block.blockInput.parentRootHex) ) { - unknowns.push(block); + ancestors.set(block.blockInput.blockRootHex, block); + const descendants = getAllDescendantBlocks(block); + for (const descendant of descendants) { + if (!isPendingBlockInput(descendant) || descendant.status !== PendingBlockInputStatus.downloaded) { + incomplete.set(getBlockInputSyncCacheItemRootHex(descendant), descendant); + } + } + continue; } - if (block.status === PendingBlockStatus.downloaded && parentHex && !blocks.has(parentHex)) { - ancestors.push(block); + if (block.status === PendingBlockInputStatus.pending) { + incomplete.push(block); } } - return {unknowns, ancestors}; + return { + incomplete, + ancestors, + }; } From 983cff0fe31914e2455631db8a2c3bf97108aedc Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 19 Aug 2025 04:33:59 +0700 Subject: [PATCH 034/173] refactor: rename unknownBlock.ts -> blockInputSync.ts --- .../beacon-node/src/sync/{unknownBlock.ts => blockInputSync.ts} | 0 packages/beacon-node/src/sync/sync.ts | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename packages/beacon-node/src/sync/{unknownBlock.ts => blockInputSync.ts} (100%) diff --git a/packages/beacon-node/src/sync/unknownBlock.ts b/packages/beacon-node/src/sync/blockInputSync.ts similarity index 100% rename from packages/beacon-node/src/sync/unknownBlock.ts rename to packages/beacon-node/src/sync/blockInputSync.ts diff --git a/packages/beacon-node/src/sync/sync.ts b/packages/beacon-node/src/sync/sync.ts index 3763c9bee78b..a667d93a1002 100644 --- a/packages/beacon-node/src/sync/sync.ts +++ b/packages/beacon-node/src/sync/sync.ts @@ -8,12 +8,12 @@ import {Metrics} from "../metrics/index.js"; import {INetwork, NetworkEvent, NetworkEventData} from "../network/index.js"; import {ClockEvent} from "../util/clock.js"; import {isOptimisticBlock} from "../util/forkChoice.js"; +import {BlockInputSync} from "./blockInputSync.js"; import {MIN_EPOCH_TO_START_GOSSIP} from "./constants.js"; import {IBeaconSync, SyncModules, SyncingStatus} from "./interface.js"; import {SyncChainDebugState, SyncState, syncStateMetric} from "./interface.js"; import {SyncOptions} from "./options.js"; import {RangeSync, RangeSyncEvent, RangeSyncStatus} from "./range/range.js"; -import {BlockInputSync} from "./unknownBlock.js"; import {PeerSyncType, getPeerSyncType, peerSyncTypes} from "./utils/remoteSyncType.js"; export class BeaconSync implements IBeaconSync { From 9380308e88109571db290d966738e403f47a9f9f Mon Sep 17 00:00:00 2001 From: Cayman Date: Mon, 18 Aug 2025 20:37:16 -0400 Subject: [PATCH 035/173] wip: more fixing up block input sync --- .../beacon-node/src/sync/blockInputSync.ts | 98 +++++++++---------- 1 file changed, 44 insertions(+), 54 deletions(-) diff --git a/packages/beacon-node/src/sync/blockInputSync.ts b/packages/beacon-node/src/sync/blockInputSync.ts index 9647490ca810..43934ad15eaf 100644 --- a/packages/beacon-node/src/sync/blockInputSync.ts +++ b/packages/beacon-node/src/sync/blockInputSync.ts @@ -4,7 +4,8 @@ import {ColumnIndex, Root, RootHex, deneb} from "@lodestar/types"; import {BlobAndProof} from "@lodestar/types/deneb"; import {Logger, fromHex, prettyBytes, pruneSetToMax, toRootHex} from "@lodestar/utils"; import {sleep} from "@lodestar/utils"; -import {IBlockInput} from "../chain/blocks/blockInput/types.js"; +import {isBlockInputColumns} from "../chain/blocks/blockInput/blockInput.js"; +import {BlockInputSource, IBlockInput} from "../chain/blocks/blockInput/types.js"; import {BlockError, BlockErrorCode} from "../chain/errors/index.js"; import {ChainEvent, ChainEventData, IBeaconChain} from "../chain/index.js"; import {Metrics} from "../metrics/index.js"; @@ -20,6 +21,7 @@ import { PendingBlockInputStatus, PendingBlockType, PendingRootHex, + getBlockInputSyncCacheItemRootHex, isPendingBlockInput, } from "./types.js"; import { @@ -292,7 +294,7 @@ export class BlockInputSync { // most of the time there is exactly 1 unknown block for (const block of incomplete) { this.downloadBlock(block, connectedPeers).catch((e) => { - this.logger.debug("Unexpected error - downloadBlock", {root: block.blockRootHex}, e); + this.logger.debug("Unexpected error - downloadBlock", {root: getBlockInputSyncCacheItemRootHex(block)}, e); }); } }; @@ -303,10 +305,11 @@ export class BlockInputSync { } const unknownBlockType = block.unknownBlockType; + const rootHex = getBlockInputSyncCacheItemRootHex(block); const logCtx = { - root: block.blockRootHex, + root: rootHex, pendingBlocks: this.pendingBlocks.size, - slot: block.blockInput?.block?.message.slot ?? "unknown", + slot: (block as PendingBlockInput).blockInput?.slot ?? "unknown", unknownBlockType, }; @@ -316,21 +319,13 @@ export class BlockInputSync { let res: Result<{blockInput: IBlockInput; peerIdStr: string}>; let connectedPeers: string[]; - if (block.blockInput === null) { + if (!isPendingBlockInput(block)) { connectedPeers = allPeers; // we only have block root, and nothing else - res = await wrapError(this.fetchUnknownBlockRoot(fromHex(block.blockRootHex), connectedPeers)); + res = await wrapError(this.fetchUnknownBlockRoot(fromHex(rootHex), connectedPeers)); } else { - const {cachedData} = block.blockInput; - if (cachedData.fork === ForkName.fulu) { - const {dataColumnsCache} = cachedData as CachedDataColumns; - const sampledColumns = this.network.custodyConfig.sampledColumns; - const neededColumns = sampledColumns.reduce((acc, elem) => { - if (dataColumnsCache.get(elem) === undefined) { - acc.push(elem); - } - return acc; - }, [] as number[]); + if (isBlockInputColumns(block.blockInput)) { + const neededColumns = block.blockInput.getMissingSampledColumnMeta().map((c) => c.index); connectedPeers = neededColumns.length <= 0 @@ -369,21 +364,21 @@ export class BlockInputSync { if (res.err) this.metrics?.blockInputSync.downloadedBlocksError.inc(); else this.metrics?.blockInputSync.downloadedBlocksSuccess.inc(); + let peerIdStr: PeerIdStr | undefined; if (!res.err) { - const {blockInput, peerIdStr} = res.result; - if (blockInput.type === BlockInputType.dataPromise) { + (block as PendingBlockInput).blockInput = res.result.blockInput; + peerIdStr = res.result.peerIdStr; + } + + if (isPendingBlockInput(block)) { + const blockInput = block.blockInput; + if (!blockInput.hasAllData()) { // if there were any peers who would have had the missing datacolumns, it would have resulted in err - block = { - ...block, - blockInput, - unknownBlockType: PendingBlockType.UNKNOWN_DATA, - } as UnknownBlock; - block.blockInput = blockInput; - this.pendingBlocks.set(block.blockRootHex, block); - block.status = PendingBlockStatus.pending; + block.status = PendingBlockInputStatus.pending; + this.pendingBlocks.set(blockInput.blockRootHex, block); // parentSlot > finalizedSlot, continue downloading parent of parent block.downloadAttempts += this.config.CUSTODY_REQUIREMENT / NUMBER_OF_COLUMNS; - const errorData = {root: block.blockRootHex, attempts: block.downloadAttempts, unknownBlockType}; + const errorData = {root: blockInput.blockRootHex, attempts: block.downloadAttempts, unknownBlockType}; if (block.downloadAttempts > MAX_ATTEMPTS_PER_BLOCK) { // Give up on this block and assume it does not exist, penalizing all peers as if it was a bad block this.logger.debug("Ignoring unknown block after many failed downloads", errorData); @@ -393,21 +388,16 @@ export class BlockInputSync { this.logger.debug("Error downloading full unknown block", errorData); } } else { - block = { - ...block, - status: PendingBlockStatus.downloaded, - blockInput, - parentBlockRootHex: toRootHex(blockInput.block.message.parentRoot), - }; - this.pendingBlocks.set(block.blockRootHex, block); - const blockSlot = blockInput.block.message.slot; + block.status = PendingBlockInputStatus.downloaded; + this.pendingBlocks.set(blockInput.blockRootHex, block); + const blockSlot = blockInput.slot; const finalizedSlot = this.chain.forkChoice.getFinalizedBlock().slot; const delaySec = Date.now() / 1000 - (this.chain.genesisTime + blockSlot * this.config.SECONDS_PER_SLOT); this.metrics?.blockInputSync.elapsedTimeTillReceived.observe(delaySec); - const parentInForkchoice = this.chain.forkChoice.hasBlock(blockInput.block.message.parentRoot); + const parentInForkchoice = this.chain.forkChoice.hasBlockHex(blockInput.parentRootHex); this.logger.verbose("Downloaded unknown block", { - root: block.blockRootHex, + root: blockInput.blockRootHex, pendingBlocks: this.pendingBlocks.size, parentInForkchoice, blockInputType: blockInput.type, @@ -425,7 +415,7 @@ export class BlockInputSync { // 0 - 1 - ... - n - finalizedSlot // \ // parent 1 - parent 2 - ... - unknownParent block - const blockRoot = this.config.getForkTypes(blockSlot).BeaconBlock.hashTreeRoot(blockInput.block.message); + const blockRoot = this.config.getForkTypes(blockSlot).BeaconBlock.hashTreeRoot(blockInput.getBlock().message); this.logger.debug("Downloaded block is before finalized slot", { finalizedSlot, blockSlot, @@ -434,15 +424,15 @@ export class BlockInputSync { }); this.removeAndDownscoreAllDescendants(block); } else { - this.onUnknownParent({blockInput, peer: peerIdStr}); + this.onUnknownParent({blockInput, peer: peerIdStr as string, source: BlockInputSource.byRoot}); } } } else { // this allows to retry the download of the block - block.status = PendingBlockStatus.pending; + block.status = PendingBlockInputStatus.pending; // parentSlot > finalizedSlot, continue downloading parent of parent block.downloadAttempts++; - const errorData = {root: block.blockRootHex, attempts: block.downloadAttempts, unknownBlockType}; + const errorData = {root: block.rootHex, attempts: block.downloadAttempts, unknownBlockType}; if (block.downloadAttempts > MAX_ATTEMPTS_PER_BLOCK) { // Give up on this block and assume it does not exist, penalizing all peers as if it was a bad block this.logger.debug("Ignoring unknown block root after many failed downloads", errorData, res.err); @@ -463,10 +453,12 @@ export class BlockInputSync { // pending block status is `downloaded` right after `downloadBlock` // but could be `pending` if added by `onUnknownBlockParent` event and this function is called recursively if (pendingBlock.status !== PendingBlockInputStatus.downloaded) { - if (pendingBlock.status === PendingBlockStatus.pending) { + if (pendingBlock.status === PendingBlockInputStatus.pending) { const connectedPeers = this.network.getConnectedPeers(); if (connectedPeers.length === 0) { - this.logger.debug("No connected peers, skipping download block", {blockRoot: pendingBlock.blockRootHex}); + this.logger.debug("No connected peers, skipping download block", { + blockRoot: pendingBlock.blockInput.blockRootHex, + }); return; } // if the download is a success we'll call `processBlock()` for this block @@ -475,22 +467,20 @@ export class BlockInputSync { return; } - pendingBlock.status = PendingBlockStatus.processing; + pendingBlock.status = PendingBlockInputStatus.processing; // this prevents unbundling attack // see https://lighthouse-blog.sigmaprime.io/mev-unbundling-rpc.html - const {slot: blockSlot, proposerIndex} = pendingBlock.blockInput.block.message; + const {slot: blockSlot, proposerIndex} = pendingBlock.blockInput.getBlock().message; + const blockRootHex = pendingBlock.blockInput.blockRootHex; if ( this.chain.clock.secFromSlot(blockSlot) < this.proposerBoostSecWindow && this.chain.seenBlockProposers.isKnown(blockSlot, proposerIndex) ) { // proposer is known by a gossip block already, wait a bit to make sure this block is not // eligible for proposer boost to prevent unbundling attack - const blockRoot = this.config - .getForkTypes(blockSlot) - .BeaconBlock.hashTreeRoot(pendingBlock.blockInput.block.message); this.logger.verbose("Avoid proposer boost for this block of known proposer", { blockSlot, - blockRoot: toRootHex(blockRoot), + blockRoot: blockRootHex, proposerIndex, }); await sleep(this.proposerBoostSecWindow * 1000); @@ -518,16 +508,16 @@ export class BlockInputSync { if (!res.err) { // no need to update status to "processed", delete anyway - this.pendingBlocks.delete(pendingBlock.blockRootHex); + this.pendingBlocks.delete(blockRootHex); // Send child blocks to the processor - for (const descendantBlock of getDescendantBlocks(pendingBlock.blockRootHex, this.pendingBlocks)) { + for (const descendantBlock of getDescendantBlocks(blockRootHex, this.pendingBlocks)) { this.processBlock(descendantBlock).catch((e) => { this.logger.debug("Unexpected error - process descendant block", {}, e); }); } } else { - const errorData = {root: pendingBlock.blockRootHex, slot: pendingBlock.blockInput.block.message.slot}; + const errorData = {root: blockRootHex, slot: blockSlot}; if (res.err instanceof BlockError) { switch (res.err.type.code) { // This cases are already handled with `{ignoreIfKnown: true}` @@ -538,7 +528,7 @@ export class BlockInputSync { case BlockErrorCode.PRESTATE_MISSING: // Should not happen, mark as downloaded to try again latter this.logger.debug("Attempted to process block but its parent was still unknown", errorData, res.err); - pendingBlock.status = PendingBlockStatus.downloaded; + pendingBlock.status = PendingBlockInputStatus.downloaded; break; case BlockErrorCode.EXECUTION_ENGINE_ERROR: @@ -574,7 +564,7 @@ export class BlockInputSync { private async fetchUnknownBlockRoot( blockRoot: Root, connectedPeers: PeerIdStr[] - ): Promise<{blockInput: BlockInput; peerIdStr: string}> { + ): Promise<{blockInput: IBlockInput; peerIdStr: string}> { const shuffledPeers = shuffle(connectedPeers); const blockRootHex = toRootHex(blockRoot); From e48d21b98541cfb3b9c084d77d9993f23c1973f9 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Wed, 20 Aug 2025 07:25:54 +0700 Subject: [PATCH 036/173] wip: get BlockInputSync class cleaned up after merging tuyen PR --- .../beacon-node/src/sync/blockInputSync.ts | 370 ++++++------------ packages/beacon-node/src/sync/types.ts | 6 +- .../src/sync/utils/downloadByRoot.ts | 2 +- 3 files changed, 130 insertions(+), 248 deletions(-) diff --git a/packages/beacon-node/src/sync/blockInputSync.ts b/packages/beacon-node/src/sync/blockInputSync.ts index ae25989dd52c..66e45df90e86 100644 --- a/packages/beacon-node/src/sync/blockInputSync.ts +++ b/packages/beacon-node/src/sync/blockInputSync.ts @@ -2,14 +2,14 @@ import {ChainForkConfig} from "@lodestar/config"; import {ForkName, ForkSeq, INTERVALS_PER_SLOT} from "@lodestar/params"; import {ColumnIndex, Root, RootHex, deneb} from "@lodestar/types"; import {BlobAndProof} from "@lodestar/types/deneb"; -import {Logger, fromHex, prettyBytes, pruneSetToMax, toRootHex} from "@lodestar/utils"; +import {Logger, fromHex, prettyBytes, prettyPrintIndices, pruneSetToMax, toRootHex} from "@lodestar/utils"; import {sleep} from "@lodestar/utils"; -import {isBlockInputColumns} from "../chain/blocks/blockInput/blockInput.js"; +import {isBlockInputBlobs, isBlockInputColumns} from "../chain/blocks/blockInput/blockInput.js"; import {BlockInputSource, IBlockInput} from "../chain/blocks/blockInput/types.js"; import {BlockError, BlockErrorCode} from "../chain/errors/index.js"; import {ChainEvent, ChainEventData, IBeaconChain} from "../chain/index.js"; import {Metrics} from "../metrics/index.js"; -import {INetwork, NetworkEvent, NetworkEventData} from "../network/index.js"; +import {INetwork, NetworkEvent, NetworkEventData, prettyPrintPeerIdStr} from "../network/index.js"; import {PeerSyncMeta} from "../network/peers/peersData.js"; import {byteArrayEquals} from "../util/bytes.js"; import {CustodyConfig} from "../util/dataColumns.js"; @@ -26,8 +26,10 @@ import { PendingBlockType, PendingRootHex, getBlockInputSyncCacheItemRootHex, + getBlockInputSyncCacheItemSlot, isPendingBlockInput, } from "./types.js"; +import {downloadByRoot} from "./utils/downloadByRoot.js"; import { getAllDescendantBlocks, getDescendantBlocks, @@ -323,66 +325,43 @@ export class BlockInputSync { } }; - private async downloadBlock(block: PendingBlock): Promise { - if (block.status !== PendingBlockStatus.pending) { + private async downloadBlock(block: BlockInputSyncCacheItem): Promise { + if (block.status !== PendingBlockInputStatus.pending) { return; } - const unknownBlockType = block.unknownBlockType; + const rootHex = getBlockInputSyncCacheItemRootHex(block); const logCtx = { - root: block.blockRootHex, + blockRoot: prettyBytes(rootHex), pendingBlocks: this.pendingBlocks.size, - slot: block.blockInput?.block?.message.slot ?? "unknown", - unknownBlockType, + slot: getBlockInputSyncCacheItemSlot(block), }; - this.logger.verbose("Downloading unknown block", logCtx); + this.logger.verbose("BlockInputSync.downloadBlock()", logCtx); - block.status = PendingBlockStatus.fetching; + block.status = PendingBlockInputStatus.fetching; - let res: Result<{blockInput: BlockInput; peerIdStr: string}>; - if (block.blockInput === null) { - // we only have block root, and nothing else - res = await wrapError(this.fetchUnknownBlockRoot(fromHex(block.blockRootHex))); - } else { - res = await wrapError(this.fetchUnavailableBlockInput(block.blockInput)); - } - - if (res.err) this.metrics?.syncUnknownBlock.downloadedBlocksError.inc(); - else this.metrics?.syncUnknownBlock.downloadedBlocksSuccess.inc(); + const res = await wrapError(this.fetchBlockInput(block)); if (!res.err) { - const {blockInput, peerIdStr} = res.result; - // fetchUnknownBlockRoot and fetchUnavailableBlockInput should return available data BlockInput, throw error if not - if (blockInput.type === BlockInputType.dataPromise) { - // if there were any peers who would have had the missing datacolumns, it would have resulted in err - throw Error(`Expected BlockInput to be available, got dataPromise for ${block.blockRootHex}`); - } - - block = { - ...block, - status: PendingBlockStatus.downloaded, - blockInput, - parentBlockRootHex: toRootHex(blockInput.block.message.parentRoot), - }; - this.pendingBlocks.set(block.blockRootHex, block); - const blockSlot = blockInput.block.message.slot; + this.metrics?.blockInputSync.downloadedBlocksSuccess.inc(); + const pending = res.result; + this.pendingBlocks.set(pending.blockInput.blockRootHex, pending); + const blockSlot = pending.blockInput.slot; const finalizedSlot = this.chain.forkChoice.getFinalizedBlock().slot; const delaySec = Date.now() / 1000 - (this.chain.genesisTime + blockSlot * this.config.SECONDS_PER_SLOT); - this.metrics?.syncUnknownBlock.elapsedTimeTillReceived.observe(delaySec); + this.metrics?.blockInputSync.elapsedTimeTillReceived.observe(delaySec); - const parentInForkchoice = this.chain.forkChoice.hasBlock(blockInput.block.message.parentRoot); + const parentInForkChoice = this.chain.forkChoice.hasBlock(pending.blockInput.getBlock().message.parentRoot); this.logger.verbose("Downloaded unknown block", { - root: block.blockRootHex, + blockRoot: rootHex, pendingBlocks: this.pendingBlocks.size, - parentInForkchoice, - blockInputType: blockInput.type, - unknownBlockType, + parentInForkChoice, }); - if (parentInForkchoice) { + if (parentInForkChoice) { // Bingo! Process block. Add to pending blocks anyway for recycle the cache that prevents duplicate processing - this.processBlock(block).catch((e) => { + this.processBlock(pending).catch((e) => { this.logger.debug("Unexpected error - process newly downloaded block", {}, e); }); } else if (blockSlot <= finalizedSlot) { @@ -391,31 +370,29 @@ export class BlockInputSync { // 0 - 1 - ... - n - finalizedSlot // \ // parent 1 - parent 2 - ... - unknownParent block - const blockRoot = this.config.getForkTypes(blockSlot).BeaconBlock.hashTreeRoot(blockInput.block.message); this.logger.debug("Downloaded block is before finalized slot", { finalizedSlot, blockSlot, - parentRoot: toRootHex(blockRoot), - unknownBlockType, + blockRoot: pending.blockInput.blockRootHex, }); - this.removeAndDownscoreAllDescendants(block); + this.removeAndDownScoreAllDescendants(block); } else { - this.onUnknownParent({blockInput, peer: peerIdStr}); + this.onUnknownBlockRoot({rootHex: pending.blockInput.parentRootHex, source: BlockInputSource.byRoot}); } } else { + this.metrics?.blockInputSync.downloadedBlocksError.inc(); // block download has error, this allows to retry the download of the block - block.status = PendingBlockStatus.pending; - // parentSlot > finalizedSlot, continue downloading parent of parent - block.downloadAttempts++; - const errorData = {root: block.blockRootHex, attempts: block.downloadAttempts, unknownBlockType}; - if (block.downloadAttempts > MAX_ATTEMPTS_PER_BLOCK) { - // Give up on this block and assume it does not exist, penalizing all peers as if it was a bad block - this.logger.debug("Ignoring unknown block root after many failed downloads", errorData, res.err); - this.removeAndDownscoreAllDescendants(block); - } else { - // Try again when a new peer connects, its status changes, or a new unknownBlockParent event happens - this.logger.debug("Error downloading unknown block root", errorData, res.err); - } + block.status = PendingBlockInputStatus.pending; + // const errorData = {blockRoot: rootHex}; + // TODO(fulu): removed outer retry loop. Need to look at how to down score for errors here + // if (block.downloadAttempts > MAX_ATTEMPTS_PER_BLOCK) { + // // Give up on this block and assume it does not exist, penalizing all peers as if it was a bad block + // this.logger.debug("Ignoring unknown block root after many failed downloads", errorData, res.err); + this.removeAndDownScoreAllDescendants(block); + // } else { + // // Try again when a new peer connects, its status changes, or a new unknownBlockParent event happens + // this.logger.debug("Error downloading unknown block root", errorData, res.err); + // } } } @@ -424,14 +401,16 @@ export class BlockInputSync { * On error, remove and downscore all descendants. * This function could run recursively for all descendant blocks */ - private async processBlock(pendingBlock: PendingBlock): Promise { + private async processBlock(pendingBlock: PendingBlockInput): Promise { // pending block status is `downloaded` right after `downloadBlock` // but could be `pending` if added by `onUnknownBlockParent` event and this function is called recursively - if (pendingBlock.status !== PendingBlockStatus.downloaded) { - if (pendingBlock.status === PendingBlockStatus.pending) { + if (pendingBlock.status !== PendingBlockInputStatus.downloaded) { + if (pendingBlock.status === PendingBlockInputStatus.pending) { const connectedPeers = this.network.getConnectedPeers(); if (connectedPeers.length === 0) { - this.logger.debug("No connected peers, skipping download block", {blockRoot: pendingBlock.blockRootHex}); + this.logger.debug("No connected peers, skipping download block", { + blockRoot: pendingBlock.blockInput.blockRootHex, + }); return; } // if the download is a success we'll call `processBlock()` for this block @@ -440,22 +419,19 @@ export class BlockInputSync { return; } - pendingBlock.status = PendingBlockStatus.processing; + pendingBlock.status = PendingBlockInputStatus.processing; // this prevents unbundling attack // see https://lighthouse-blog.sigmaprime.io/mev-unbundling-rpc.html - const {slot: blockSlot, proposerIndex} = pendingBlock.blockInput.block.message; + const {slot: blockSlot, proposerIndex} = pendingBlock.blockInput.getBlock().message; if ( this.chain.clock.secFromSlot(blockSlot) < this.proposerBoostSecWindow && this.chain.seenBlockProposers.isKnown(blockSlot, proposerIndex) ) { // proposer is known by a gossip block already, wait a bit to make sure this block is not // eligible for proposer boost to prevent unbundling attack - const blockRoot = this.config - .getForkTypes(blockSlot) - .BeaconBlock.hashTreeRoot(pendingBlock.blockInput.block.message); this.logger.verbose("Avoid proposer boost for this block of known proposer", { blockSlot, - blockRoot: toRootHex(blockRoot), + blockRoot: prettyBytes(pendingBlock.blockInput.blockRootHex), proposerIndex, }); await sleep(this.proposerBoostSecWindow * 1000); @@ -478,21 +454,24 @@ export class BlockInputSync { }) ); - if (res.err) this.metrics?.syncUnknownBlock.processedBlocksError.inc(); - else this.metrics?.syncUnknownBlock.processedBlocksSuccess.inc(); + if (res.err) this.metrics?.blockInputSync.processedBlocksError.inc(); + else this.metrics?.blockInputSync.processedBlocksSuccess.inc(); if (!res.err) { // no need to update status to "processed", delete anyway - this.pendingBlocks.delete(pendingBlock.blockRootHex); + this.pendingBlocks.delete(pendingBlock.blockInput.blockRootHex); // Send child blocks to the processor - for (const descendantBlock of getDescendantBlocks(pendingBlock.blockRootHex, this.pendingBlocks)) { - this.processBlock(descendantBlock).catch((e) => { - this.logger.debug("Unexpected error - process descendant block", {}, e); - }); + for (const descendantBlock of getDescendantBlocks(pendingBlock.blockInput.blockRootHex, this.pendingBlocks)) { + // TODO(fulu): this might cause sync to get stuck... need to resolve + if (isPendingBlockInput(descendantBlock)) { + this.processBlock(descendantBlock).catch((e) => { + this.logger.debug("Unexpected error - process descendant block", {}, e); + }); + } } } else { - const errorData = {root: pendingBlock.blockRootHex, slot: pendingBlock.blockInput.block.message.slot}; + const errorData = {root: pendingBlock.blockInput.blockRootHex, slot: pendingBlock.blockInput.slot}; if (res.err instanceof BlockError) { switch (res.err.type.code) { // This cases are already handled with `{ignoreIfKnown: true}` @@ -503,7 +482,7 @@ export class BlockInputSync { case BlockErrorCode.PRESTATE_MISSING: // Should not happen, mark as downloaded to try again latter this.logger.debug("Attempted to process block but its parent was still unknown", errorData, res.err); - pendingBlock.status = PendingBlockStatus.downloaded; + pendingBlock.status = PendingBlockInputStatus.downloaded; break; case BlockErrorCode.EXECUTION_ENGINE_ERROR: @@ -515,14 +494,14 @@ export class BlockInputSync { default: // Block is not correct with respect to our chain. Log error loudly this.logger.debug("Error processing block from unknown parent sync", errorData, res.err); - this.removeAndDownscoreAllDescendants(pendingBlock); + this.removeAndDownScoreAllDescendants(pendingBlock); } } // Probably a queue error or something unwanted happened, mark as pending to try again latter else { this.logger.debug("Unknown error processing block from unknown block sync", errorData, res.err); - pendingBlock.status = PendingBlockStatus.downloaded; + pendingBlock.status = PendingBlockInputStatus.downloaded; } } } @@ -536,188 +515,80 @@ export class BlockInputSync { * prefulu, will attempt a max of `MAX_ATTEMPTS_PER_BLOCK` on different peers, postfulu we may attempt more as defined in `getMaxDownloadAttempts()` function * Also verifies the received block root + returns the peer that provided the block for future downscoring. */ - private async fetchUnknownBlockRoot(blockRoot: Root): Promise<{blockInput: BlockInput; peerIdStr: string}> { - const blockRootHex = toRootHex(blockRoot); - + private async fetchBlockInput(cacheItem: BlockInputSyncCacheItem): Promise { + const rootHex = getBlockInputSyncCacheItemRootHex(cacheItem); const excludedPeers = new Set(); - let partialDownload: PartialDownload | null = null; const defaultPendingColumns = this.config.getForkSeq(this.chain.clock.currentSlot) >= ForkSeq.fulu ? new Set(this.network.custodyConfig.sampleGroups) : null; - let lastError: Error | null = null; + let i = 0; while (i++ < this.getMaxDownloadAttempts()) { - // pendingDataColumns is null prefulu - const peer = this.peerBalancer.bestPeerForPendingColumns( - partialDownload ? new Set(partialDownload.pendingDataColumns) : defaultPendingColumns, - excludedPeers - ); + const pendingColumns = + isPendingBlockInput(cacheItem) && isBlockInputColumns(cacheItem.blockInput) + ? new Set(cacheItem.blockInput.getMissingSampledColumnMeta().map((meta) => meta.index)) + : defaultPendingColumns; + // pendingDataColumns is null pre-fulu + const peer = this.peerBalancer.bestPeerForPendingColumns(pendingColumns, excludedPeers); if (peer === null) { // no more peer with needed columns to try, throw error - throw Error( - `Error fetching UnknownBlockRoot after ${i}: cannot find peer with needed columns ${partialDownload?.pendingDataColumns.join(", ")}` - ); + let message = `Error fetching UnknownBlockRoot after ${i}: cannot find peer`; + if (pendingColumns) { + message += ` with needed columns=${prettyPrintIndices(Array.from(pendingColumns))}`; + } + throw Error(message); } const {peerId, client: peerClient} = peer; excludedPeers.add(peerId); try { - const { - blocks: [blockInput], - pendingDataColumns, - } = await beaconBlocksMaybeBlobsByRoot( - this.config, - this.network, - peerId, - [blockRoot], - partialDownload, - peerClient, - this.metrics, - this.logger - ); - - // Peer does not have the block, try with next peer - if (blockInput === undefined) { - continue; - } - - if (pendingDataColumns !== null) { - partialDownload = {blocks: [blockInput], pendingDataColumns}; - continue; - } - - // data is available, verify block root is correct - const block = blockInput.block.message; - const receivedBlockRoot = this.config.getForkTypes(block.slot).BeaconBlock.hashTreeRoot(block); - if (!byteArrayEquals(receivedBlockRoot, blockRoot)) { - throw Error(`Wrong block received by peer, got ${toRootHex(receivedBlockRoot)} expected ${blockRootHex}`); - } - - return {blockInput, peerIdStr: peerId}; + cacheItem = await downloadByRoot({ + pending: cacheItem, + peerIdStr: peer.peerId, + config: this.config, + network: this.network, + cache: this.chain.seenBlockInputCache, + executionEngine: this.chain.executionEngine, + }); } catch (e) { - this.logger.debug("Error fetching UnknownBlockRoot", {attempt: i, blockRootHex, peer: peerId}, e as Error); - lastError = e as Error; + this.logger.debug( + "Error downloading in BlockInputSync.fetchBlockInput", + {attempt: i, rootHex, peer: peerId, peerClient}, + e as Error + ); } finally { this.peerBalancer.onRequestCompleted(peerId); } - } - - if (lastError) { - lastError.message = `Error fetching UnknownBlockRoot after ${i} attempts: ${lastError.message}`; - throw lastError; - } - throw Error( - `Error fetching UnknownBlockRoot after ${i}: cannot download all blobs or data columns for block ${blockRootHex}` - ); - } - - /** - * We have partial block input: - * - we have block but not have all blobs (deneb) or needed columns (fulu) - * - we don't have block and have some blobs (deneb) or some columns (fulu) - * Fetches missing block/data columns/block for the blockinput. This function returns either preData or availableData BlockInput. - */ - private async fetchUnavailableBlockInput( - unavailableBlockInput: BlockInput | NullBlockInput - ): Promise<{blockInput: BlockInput; peerIdStr: string}> { - if (unavailableBlockInput.block !== null && unavailableBlockInput.type !== BlockInputType.dataPromise) { - return {blockInput: unavailableBlockInput, peerIdStr: ""}; - } - - let blockRootHex: RootHex; - let blobKzgCommitmentsLen: number | undefined; - let blockRoot: Uint8Array; - const dataMeta: Record = {}; - let sampledColumns: ColumnIndex[] = []; - - if (unavailableBlockInput.block === null) { - blockRootHex = unavailableBlockInput.blockRootHex; - blockRoot = fromHex(blockRootHex); - } else { - const {cachedData, block: unavailableBlock} = unavailableBlockInput; - blockRoot = this.config - .getForkTypes(unavailableBlock.message.slot) - .BeaconBlock.hashTreeRoot(unavailableBlock.message); - blockRootHex = toRootHex(blockRoot); - blobKzgCommitmentsLen = (unavailableBlock.message.body as deneb.BeaconBlockBody).blobKzgCommitments.length; - - if (cachedData.fork === ForkName.deneb || cachedData.fork === ForkName.electra) { - const pendingBlobs = blobKzgCommitmentsLen - cachedData.blobsCache.size; - Object.assign(dataMeta, {pendingBlobs}); - } else if (cachedData.fork === ForkName.fulu) { - sampledColumns = this.network.custodyConfig.sampledColumns; - const pendingColumns = sampledColumns.length - (cachedData as CachedDataColumns).dataColumnsCache.size; - Object.assign(dataMeta, {pendingColumns}); + if (isPendingBlockInput(cacheItem) && cacheItem.blockInput.hasBlockAndAllData()) { + cacheItem.status = PendingBlockInputStatus.downloaded; + cacheItem.peerIdStrings.add(peerId); + cacheItem.timeSyncedSec = Date.now() / 1000; + return cacheItem; } } - let lastError: Error | null = null; - let i = 0; - const excludedPeers = new Set(); - while (i++ < this.getMaxDownloadAttempts()) { - const bestPeer = this.peerBalancer.bestPeerForBlockInput(unavailableBlockInput, excludedPeers); - if (bestPeer === null) { - // no more peer to try, throw error - throw Error( - `Error fetching UnavailableBlockInput after ${i}: cannot find peer with needed columns ${sampledColumns.join(", ")}` - ); - } - const {peerId, client: peerClient} = bestPeer; - excludedPeers.add(peerId); - - try { - const blockInput = await unavailableBeaconBlobsByRoot( - this.config, - this.network, - peerId, - peerClient, - unavailableBlockInput, - { - metrics: this.metrics, - logger: this.logger, - executionEngine: this.chain.executionEngine, - emitter: this.chain.emitter, - blockInputsRetryTrackerCache: this.blockInputsRetryTrackerCache, - engineGetBlobsCache: this.engineGetBlobsCache, - } - ); - - if (unavailableBlockInput.block !== null && blockInput.type === BlockInputType.dataPromise) { - // all datacolumns were not downloaded we can continue with other peers - // as unavailableBlockInput.block's dataColumnsCache would be updated - continue; - } - - // data is available, verify block root is correct - const block = blockInput.block.message; - const receivedBlockRoot = this.config.getForkTypes(block.slot).BeaconBlock.hashTreeRoot(block); - - if (!byteArrayEquals(receivedBlockRoot, blockRoot)) { - throw Error(`Wrong block received by peer, got ${toRootHex(receivedBlockRoot)} expected ${blockRootHex}`); + let message = `Error fetching BlockInput with blockRoot=${prettyBytes(rootHex)} after ${i} attempts.`; + if (!isPendingBlockInput(cacheItem)) { + message += " No block and no data was found"; + } else { + if (!cacheItem.blockInput.hasBlock()) { + message += " Block was not found."; + } else if (isBlockInputBlobs(cacheItem.blockInput)) { + const missing = cacheItem.blockInput.getMissingBlobMeta().map((b) => b.index); + if (missing.length) { + message += ` Missing blob indices=${prettyPrintIndices(missing)}`; } - if (unavailableBlockInput.block === null) { - this.logger.debug("Fetched NullBlockInput", {attempts: i, blockRootHex}); - } else { - this.logger.debug("Fetched UnavailableBlockInput", {attempts: i, ...dataMeta, blobKzgCommitmentsLen}); + } else if (isBlockInputColumns(cacheItem.blockInput)) { + const missing = cacheItem.blockInput.getMissingSampledColumnMeta().map((b) => b.index); + if (missing.length) { + message += ` Missing column indices=${prettyPrintIndices(missing)}`; } - - return {blockInput, peerIdStr: peerId}; - } catch (e) { - this.logger.debug("Error fetching UnavailableBlockInput", {attempt: i, blockRootHex, peer: peerId}, e as Error); - lastError = e as Error; - } finally { - this.peerBalancer.onRequestCompleted(peerId); } } - if (lastError) { - lastError.message = `Error fetching UnavailableBlockInput after ${i} attempts: ${lastError.message}`; - throw lastError; - } - - throw Error(`Error fetching UnavailableBlockInput after ${i}: unknown error`); + throw Error(message); } /** @@ -726,21 +597,26 @@ export class BlockInputSync { * Downscore all peers that have referenced any of this bad blocks. May report peers multiple times if they have * referenced more than one bad block. */ - private removeAndDownscoreAllDescendants(block: PendingBlock): void { + private removeAndDownScoreAllDescendants(block: BlockInputSyncCacheItem): void { // Get all blocks that are a descendant of this one const badPendingBlocks = this.removeAllDescendants(block); // just console log and do not penalize on pending/bad blocks for debugging // console.log("removeAndDownscoreAllDescendants", {block}); for (const block of badPendingBlocks) { + // + // TODO(fulu): why is this commented out here? + // // this.knownBadBlocks.add(block.blockRootHex); // for (const peerIdStr of block.peerIdStrs) { // // TODO: Refactor peerRpcScores to work with peerIdStr only // this.network.reportPeer(peerIdStr, PeerAction.LowToleranceError, "BadBlockByRoot"); // } this.logger.debug("ignored Banning unknown block", { - root: block.blockRootHex, - peerIdStrs: Array.from(block.peerIdStrs).join(","), + root: getBlockInputSyncCacheItemRootHex(block), + peerIdStrings: Array.from(block.peerIdStrings) + .map((id) => prettyPrintPeerIdStr(id)) + .join(","), }); } @@ -748,16 +624,18 @@ export class BlockInputSync { pruneSetToMax(this.knownBadBlocks, MAX_KNOWN_BAD_BLOCKS); } - private removeAllDescendants(block: PendingBlock): PendingBlock[] { + private removeAllDescendants(block: BlockInputSyncCacheItem): BlockInputSyncCacheItem[] { + const rootHex = getBlockInputSyncCacheItemRootHex(block); // Get all blocks that are a descendant of this one - const badPendingBlocks = [block, ...getAllDescendantBlocks(block.blockRootHex, this.pendingBlocks)]; + const badPendingBlocks = [block, ...getAllDescendantBlocks(rootHex, this.pendingBlocks)]; - this.metrics?.syncUnknownBlock.removedBlocks.inc(badPendingBlocks.length); + this.metrics?.blockInputSync.removedBlocks.inc(badPendingBlocks.length); for (const block of badPendingBlocks) { - this.pendingBlocks.delete(block.blockRootHex); - this.logger.debug("Removing unknown parent block", { - root: block.blockRootHex, + const rootHex = getBlockInputSyncCacheItemRootHex(block); + this.pendingBlocks.delete(rootHex); + this.logger.debug("Removing bad/unknown/incomplete BlockInputSyncCacheItem", { + blockRoot: rootHex, }); } diff --git a/packages/beacon-node/src/sync/types.ts b/packages/beacon-node/src/sync/types.ts index 98f59d2292ac..d37dda1b5460 100644 --- a/packages/beacon-node/src/sync/types.ts +++ b/packages/beacon-node/src/sync/types.ts @@ -1,5 +1,5 @@ import {IBlockInput} from "@lodestar/beacon-node/src/chain/blocks/blockInput/index.js"; -import {RootHex} from "@lodestar/types"; +import {RootHex, Slot} from "@lodestar/types"; export enum PendingBlockType { /** @@ -51,3 +51,7 @@ export function isPendingBlockInput(pending: BlockInputSyncCacheItem): pending i export function getBlockInputSyncCacheItemRootHex(block: BlockInputSyncCacheItem): RootHex { return isPendingBlockInput(block) ? block.blockInput.blockRootHex : block.rootHex; } + +export function getBlockInputSyncCacheItemSlot(block: BlockInputSyncCacheItem): Slot | string { + return isPendingBlockInput(block) ? block.blockInput.slot : "unknown"; +} diff --git a/packages/beacon-node/src/sync/utils/downloadByRoot.ts b/packages/beacon-node/src/sync/utils/downloadByRoot.ts index df8c6d475316..7ee179c38406 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRoot.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRoot.ts @@ -25,7 +25,7 @@ export type DownloadBlockInputByRootProps = { peerIdStr: PeerIdStr; }; -export async function downloadBlockInputByRoot({ +export async function downloadByRoot({ config, network, cache, From e675be7c4038c9bc2b57be70af429f1302ea9dc0 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Wed, 20 Aug 2025 07:42:26 +0700 Subject: [PATCH 037/173] fix: types for UnknownBlockPeerBalancer --- .../beacon-node/src/sync/blockInputSync.ts | 34 +++---------------- 1 file changed, 5 insertions(+), 29 deletions(-) diff --git a/packages/beacon-node/src/sync/blockInputSync.ts b/packages/beacon-node/src/sync/blockInputSync.ts index 66e45df90e86..ebb5f2cc7b8a 100644 --- a/packages/beacon-node/src/sync/blockInputSync.ts +++ b/packages/beacon-node/src/sync/blockInputSync.ts @@ -710,44 +710,20 @@ export class UnknownBlockPeerBalancer { * called from fetchUnavailableBlockInput() where we have either BlockInput or NullBlockInput * excludedPeers are the peers that we requested already so we don't want to try again */ - bestPeerForBlockInput( - unavailableBlockInput: BlockInput | NullBlockInput, - excludedPeers: Set - ): PeerSyncMeta | null { - let cachedData: CachedData | undefined = undefined; - if (unavailableBlockInput.block === null) { - // NullBlockInput - cachedData = unavailableBlockInput.cachedData; - } else { - // BlockInput - if (unavailableBlockInput.type !== BlockInputType.dataPromise) { - throw Error( - `bestPeerForBlockInput called with BlockInput type ${unavailableBlockInput.type}, expected dataPromise` - ); - } - cachedData = unavailableBlockInput.cachedData; - } - + bestPeerForBlockInput(blockInput: IBlockInput, excludedPeers: Set): PeerSyncMeta | null { const eligiblePeers: PeerIdStr[] = []; - if (cachedData.fork === ForkName.fulu) { - // cached data is CachedDataColumns - const {dataColumnsCache} = cachedData; - const pendingDataColumns: Set = new Set(); - for (const column of this.custodyConfig.sampledColumns) { - if (!dataColumnsCache.has(column)) { - pendingDataColumns.add(column); - } - } + if (isBlockInputColumns(blockInput)) { + const pendingDataColumns: Set = new Set(blockInput.getMissingSampledColumnMeta().map((c) => c.index)); if (pendingDataColumns.size === 0) { // no pending columns, we can return null + // TODO(fulu): is this correct @twoeths? What if all the columns are fine but the block is missing? return null; } eligiblePeers.push(...this.filterPeers(pendingDataColumns, excludedPeers)); } else { // prefulu - const pendingDataColumns = null; - eligiblePeers.push(...this.filterPeers(pendingDataColumns, excludedPeers)); + eligiblePeers.push(...this.filterPeers(null, excludedPeers)); } if (eligiblePeers.length === 0) { From 86aba63bb26c0d8d38d55eeddea200b01c49c55c Mon Sep 17 00:00:00 2001 From: Cayman Date: Wed, 20 Aug 2025 07:30:29 -0400 Subject: [PATCH 038/173] chore: fix last build errors --- .../beacon-node/src/sync/utils/downloadByRange.ts | 12 ++++++++++-- .../src/sync/utils/pendingBlocksTree.ts | 14 +++++++------- 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index 5a3d7bacf804..897a4c027af2 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -1,6 +1,5 @@ import {ChainForkConfig} from "@lodestar/config"; import {ForkPostDeneb, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; -import {DataAvailabilityStatus} from "@lodestar/state-transition"; import {RootHex, SignedBeaconBlock, Slot, deneb, fulu, phase0} from "@lodestar/types"; import {LodestarError, Logger, prettyBytes, prettyPrintIndices, toRootHex} from "@lodestar/utils"; import { @@ -163,7 +162,7 @@ export function cacheByRangeResponses({ ); } else { updatedBatchBlocks.push( - cache.getByBlob({ + cache.getByColumn({ columnSidecar, source, peerIdStr, @@ -520,6 +519,15 @@ export function validateResponses({ "No blocks to validate requests against" ); } + if (!blocksRequest) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.MISSING_BLOCKS_RESPONSE, + slotRange: slotRangeString, + }, + "No blocks request to validate requests against" + ); + } const {missingSlots, extraSlots} = compareBlockByRangeRequestAndResponse(blocksRequest, blocks); if (missingSlots) { diff --git a/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts b/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts index f42863d557b3..72c47961cbb9 100644 --- a/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts +++ b/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts @@ -35,7 +35,7 @@ function addToDescendantBlocks( if (firstDescendantBlocks) { for (const firstDescendantBlock of firstDescendantBlocks) { descendantBlocks.push(firstDescendantBlock); - addToDescendantBlocks(firstDescendantBlock.blockRootHex, byParent, descendantBlocks); + addToDescendantBlocks(getBlockInputSyncCacheItemRootHex(firstDescendantBlock), byParent, descendantBlocks); } } return descendantBlocks; @@ -48,7 +48,7 @@ export function getDescendantBlocks( const descendantBlocks: BlockInputSyncCacheItem[] = []; for (const block of blocks.values()) { - if (block.parentBlockRootHex === blockRootHex) { + if ((isPendingBlockInput(block) ? block.blockInput.parentRootHex : undefined) === blockRootHex) { descendantBlocks.push(block); } } @@ -76,7 +76,7 @@ export function getIncompleteAndAncestorBlocks( blocks: Map ): IncompleteAndAncestorBlocks { const incomplete = new Map(); - const ancestors = new Map(); + const ancestors = new Map(); for (const block of blocks.values()) { // check if the block was already added via getAllDescendants @@ -92,7 +92,7 @@ export function getIncompleteAndAncestorBlocks( !blocks.has(block.blockInput.parentRootHex) ) { ancestors.set(block.blockInput.blockRootHex, block); - const descendants = getAllDescendantBlocks(block); + const descendants = getAllDescendantBlocks(block.blockInput.blockRootHex, blocks); for (const descendant of descendants) { if (!isPendingBlockInput(descendant) || descendant.status !== PendingBlockInputStatus.downloaded) { incomplete.set(getBlockInputSyncCacheItemRootHex(descendant), descendant); @@ -102,12 +102,12 @@ export function getIncompleteAndAncestorBlocks( } if (block.status === PendingBlockInputStatus.pending) { - incomplete.push(block); + incomplete.set(getBlockInputSyncCacheItemRootHex(block), block); } } return { - incomplete, - ancestors, + incomplete: Array.from(incomplete.values()), + ancestors: Array.from(ancestors.values()), }; } From 821dd39b87c49e44604ffe3d9a8191cf12e30e3a Mon Sep 17 00:00:00 2001 From: Cayman Date: Wed, 20 Aug 2025 07:42:46 -0400 Subject: [PATCH 039/173] chore: move blockInput.ts to unknownBlock.ts --- packages/beacon-node/src/sync/sync.ts | 2 +- .../beacon-node/src/sync/{blockInputSync.ts => unknownBlock.ts} | 0 packages/beacon-node/test/unit/sync/unknownBlock.test.ts | 2 +- 3 files changed, 2 insertions(+), 2 deletions(-) rename packages/beacon-node/src/sync/{blockInputSync.ts => unknownBlock.ts} (100%) diff --git a/packages/beacon-node/src/sync/sync.ts b/packages/beacon-node/src/sync/sync.ts index a667d93a1002..3763c9bee78b 100644 --- a/packages/beacon-node/src/sync/sync.ts +++ b/packages/beacon-node/src/sync/sync.ts @@ -8,12 +8,12 @@ import {Metrics} from "../metrics/index.js"; import {INetwork, NetworkEvent, NetworkEventData} from "../network/index.js"; import {ClockEvent} from "../util/clock.js"; import {isOptimisticBlock} from "../util/forkChoice.js"; -import {BlockInputSync} from "./blockInputSync.js"; import {MIN_EPOCH_TO_START_GOSSIP} from "./constants.js"; import {IBeaconSync, SyncModules, SyncingStatus} from "./interface.js"; import {SyncChainDebugState, SyncState, syncStateMetric} from "./interface.js"; import {SyncOptions} from "./options.js"; import {RangeSync, RangeSyncEvent, RangeSyncStatus} from "./range/range.js"; +import {BlockInputSync} from "./unknownBlock.js"; import {PeerSyncType, getPeerSyncType, peerSyncTypes} from "./utils/remoteSyncType.js"; export class BeaconSync implements IBeaconSync { diff --git a/packages/beacon-node/src/sync/blockInputSync.ts b/packages/beacon-node/src/sync/unknownBlock.ts similarity index 100% rename from packages/beacon-node/src/sync/blockInputSync.ts rename to packages/beacon-node/src/sync/unknownBlock.ts diff --git a/packages/beacon-node/test/unit/sync/unknownBlock.test.ts b/packages/beacon-node/test/unit/sync/unknownBlock.test.ts index 7062a42fe7fb..d04c0fa3c1a7 100644 --- a/packages/beacon-node/test/unit/sync/unknownBlock.test.ts +++ b/packages/beacon-node/test/unit/sync/unknownBlock.test.ts @@ -21,8 +21,8 @@ import {SeenBlockProposers} from "../../../src/chain/seenCache/seenBlockProposer import {ZERO_HASH} from "../../../src/constants/constants.js"; import {INetwork, NetworkEvent, NetworkEventBus, PeerAction} from "../../../src/network/index.js"; import {PeerSyncMeta} from "../../../src/network/peers/peersData.js"; -import {BlockInputSync, UnknownBlockPeerBalancer} from "../../../src/sync/blockInputSync.js"; import {defaultSyncOptions} from "../../../src/sync/options.js"; +import {BlockInputSync, UnknownBlockPeerBalancer} from "../../../src/sync/unknownBlock.js"; import {CustodyConfig} from "../../../src/util/dataColumns.js"; import {PeerIdStr} from "../../../src/util/peerId.js"; import {ClockStopped} from "../../mocks/clock.js"; From 0f1b2a3344d92e1430da086a200ca5a7828be873 Mon Sep 17 00:00:00 2001 From: Cayman Date: Wed, 20 Aug 2025 09:29:43 -0400 Subject: [PATCH 040/173] chore: clean up getUnknownAndAncestorBlocks --- packages/beacon-node/src/sync/unknownBlock.ts | 22 ++++------ .../src/sync/utils/pendingBlocksTree.ts | 44 ++++++------------- 2 files changed, 21 insertions(+), 45 deletions(-) diff --git a/packages/beacon-node/src/sync/unknownBlock.ts b/packages/beacon-node/src/sync/unknownBlock.ts index ebb5f2cc7b8a..ed02578344b1 100644 --- a/packages/beacon-node/src/sync/unknownBlock.ts +++ b/packages/beacon-node/src/sync/unknownBlock.ts @@ -1,8 +1,7 @@ import {ChainForkConfig} from "@lodestar/config"; -import {ForkName, ForkSeq, INTERVALS_PER_SLOT} from "@lodestar/params"; -import {ColumnIndex, Root, RootHex, deneb} from "@lodestar/types"; -import {BlobAndProof} from "@lodestar/types/deneb"; -import {Logger, fromHex, prettyBytes, prettyPrintIndices, pruneSetToMax, toRootHex} from "@lodestar/utils"; +import {ForkSeq, INTERVALS_PER_SLOT} from "@lodestar/params"; +import {RootHex} from "@lodestar/types"; +import {Logger, prettyBytes, prettyPrintIndices, pruneSetToMax} from "@lodestar/utils"; import {sleep} from "@lodestar/utils"; import {isBlockInputBlobs, isBlockInputColumns} from "../chain/blocks/blockInput/blockInput.js"; import {BlockInputSource, IBlockInput} from "../chain/blocks/blockInput/types.js"; @@ -11,12 +10,11 @@ import {ChainEvent, ChainEventData, IBeaconChain} from "../chain/index.js"; import {Metrics} from "../metrics/index.js"; import {INetwork, NetworkEvent, NetworkEventData, prettyPrintPeerIdStr} from "../network/index.js"; import {PeerSyncMeta} from "../network/peers/peersData.js"; -import {byteArrayEquals} from "../util/bytes.js"; import {CustodyConfig} from "../util/dataColumns.js"; import {PeerIdStr} from "../util/peerId.js"; import {shuffle} from "../util/shuffle.js"; import {sortBy} from "../util/sortBy.js"; -import {Result, wrapError} from "../util/wrapError.js"; +import {wrapError} from "../util/wrapError.js"; import {MAX_CONCURRENT_REQUESTS} from "./constants.js"; import {SyncOptions} from "./options.js"; import { @@ -30,11 +28,7 @@ import { isPendingBlockInput, } from "./types.js"; import {downloadByRoot} from "./utils/downloadByRoot.js"; -import { - getAllDescendantBlocks, - getDescendantBlocks, - getIncompleteAndAncestorBlocks, -} from "./utils/pendingBlocksTree.js"; +import {getAllDescendantBlocks, getDescendantBlocks, getUnknownAndAncestorBlocks} from "./utils/pendingBlocksTree.js"; const MAX_ATTEMPTS_PER_BLOCK = 5; const MAX_KNOWN_BAD_BLOCKS = 500; @@ -293,10 +287,10 @@ export class BlockInputSync { return; } - const {incomplete, ancestors} = getIncompleteAndAncestorBlocks(this.pendingBlocks); + const {unknowns, ancestors} = getUnknownAndAncestorBlocks(this.pendingBlocks); // it's rare when there is no unknown block // see https://github.com/ChainSafe/lodestar/issues/5649#issuecomment-1594213550 - if (incomplete.length === 0) { + if (unknowns.length === 0) { let processedBlocks = 0; for (const block of ancestors) { @@ -318,7 +312,7 @@ export class BlockInputSync { } // most of the time there is exactly 1 unknown block - for (const block of incomplete) { + for (const block of unknowns) { this.downloadBlock(block).catch((e) => { this.logger.debug("Unexpected error - downloadBlock", {root: getBlockInputSyncCacheItemRootHex(block)}, e); }); diff --git a/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts b/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts index 72c47961cbb9..d3731bca60d0 100644 --- a/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts +++ b/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts @@ -56,58 +56,40 @@ export function getDescendantBlocks( return descendantBlocks; } -export type IncompleteAndAncestorBlocks = { - incomplete: BlockInputSyncCacheItem[]; +export type UnknownAndAncestorBlocks = { + unknowns: BlockInputSyncCacheItem[]; ancestors: PendingBlockInput[]; }; /** - * Returns two arrays, one has the items that need to be pulled still and the other is items that - * are ready to be checked for rooting in fork-choice so the branch can be processed (or have their - * ancestor pulled to extend the branch backward until it does root in fork-choice) + * Returns two arrays. + * The first one has the earliest blocks that are not linked to fork-choice yet, meaning they require parent blocks to be pulled. + * The second one has the earliest blocks that are linked to fork-choice, meaning they are ready to be processed. * - * Given this chain segment incomplete block n => downloaded block n + 1 => downloaded block n + 2 + * Given this chain segment unknown block n => downloaded block n + 1 => downloaded block n + 2 * return `{incomplete: [n], ancestors: []}` * * Given this chain segment: downloaded block n => downloaded block n + 1 => downloaded block n + 2 * return {incomplete: [], ancestors: [n]} */ -export function getIncompleteAndAncestorBlocks( - blocks: Map -): IncompleteAndAncestorBlocks { - const incomplete = new Map(); +export function getUnknownAndAncestorBlocks(blocks: Map): UnknownAndAncestorBlocks { + const unknowns = new Map(); const ancestors = new Map(); for (const block of blocks.values()) { - // check if the block was already added via getAllDescendants - if (incomplete.has(getBlockInputSyncCacheItemRootHex(block))) { - continue; - } - - // block and sidecars have bee fully downloaded and the parent is not in the pending block, attempt to find - // parentRootHex in fork-choice to determine if its ready to be processed - if ( + if (!isPendingBlockInput(block) && block.status !== PendingBlockInputStatus.pending) { + unknowns.set(getBlockInputSyncCacheItemRootHex(block), block); + } else if ( isPendingBlockInput(block) && - block.blockInput.hasBlockAndAllData() && + block.status === PendingBlockInputStatus.downloaded && !blocks.has(block.blockInput.parentRootHex) ) { ancestors.set(block.blockInput.blockRootHex, block); - const descendants = getAllDescendantBlocks(block.blockInput.blockRootHex, blocks); - for (const descendant of descendants) { - if (!isPendingBlockInput(descendant) || descendant.status !== PendingBlockInputStatus.downloaded) { - incomplete.set(getBlockInputSyncCacheItemRootHex(descendant), descendant); - } - } - continue; - } - - if (block.status === PendingBlockInputStatus.pending) { - incomplete.set(getBlockInputSyncCacheItemRootHex(block), block); } } return { - incomplete: Array.from(incomplete.values()), + unknowns: Array.from(unknowns.values()), ancestors: Array.from(ancestors.values()), }; } From 1c49e124f1d5574130ccbd0b3f16dce22243fe36 Mon Sep 17 00:00:00 2001 From: Cayman Date: Wed, 20 Aug 2025 09:32:59 -0400 Subject: [PATCH 041/173] chore: fix getUnknownAndAncestorBlocks --- packages/beacon-node/src/sync/utils/pendingBlocksTree.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts b/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts index d3731bca60d0..fc6976486738 100644 --- a/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts +++ b/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts @@ -77,7 +77,7 @@ export function getUnknownAndAncestorBlocks(blocks: Map(); for (const block of blocks.values()) { - if (!isPendingBlockInput(block) && block.status !== PendingBlockInputStatus.pending) { + if (!isPendingBlockInput(block) && block.status === PendingBlockInputStatus.pending) { unknowns.set(getBlockInputSyncCacheItemRootHex(block), block); } else if ( isPendingBlockInput(block) && From edd7dc2910b5fc03a54f3364ccd5404c7614d8be Mon Sep 17 00:00:00 2001 From: Cayman Date: Wed, 20 Aug 2025 10:11:44 -0400 Subject: [PATCH 042/173] chore: another fix to getUnknownAndAncestorBlocks --- packages/beacon-node/src/sync/utils/pendingBlocksTree.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts b/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts index fc6976486738..dadb05861205 100644 --- a/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts +++ b/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts @@ -77,7 +77,10 @@ export function getUnknownAndAncestorBlocks(blocks: Map(); for (const block of blocks.values()) { - if (!isPendingBlockInput(block) && block.status === PendingBlockInputStatus.pending) { + if ( + block.status === PendingBlockInputStatus.pending && + (isPendingBlockInput(block) ? !block.blockInput.hasBlockAndAllData() : true) + ) { unknowns.set(getBlockInputSyncCacheItemRootHex(block), block); } else if ( isPendingBlockInput(block) && From bf70001f8081f17de37f796c9b0d4fb3c7c56a23 Mon Sep 17 00:00:00 2001 From: Cayman Date: Wed, 20 Aug 2025 10:30:07 -0400 Subject: [PATCH 043/173] chore: fix up downloadBlock and processBlock --- packages/beacon-node/src/sync/unknownBlock.ts | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/packages/beacon-node/src/sync/unknownBlock.ts b/packages/beacon-node/src/sync/unknownBlock.ts index ed02578344b1..d1e069e19a23 100644 --- a/packages/beacon-node/src/sync/unknownBlock.ts +++ b/packages/beacon-node/src/sync/unknownBlock.ts @@ -346,7 +346,7 @@ export class BlockInputSync { const delaySec = Date.now() / 1000 - (this.chain.genesisTime + blockSlot * this.config.SECONDS_PER_SLOT); this.metrics?.blockInputSync.elapsedTimeTillReceived.observe(delaySec); - const parentInForkChoice = this.chain.forkChoice.hasBlock(pending.blockInput.getBlock().message.parentRoot); + const parentInForkChoice = this.chain.forkChoice.hasBlockHex(pending.blockInput.parentRootHex); this.logger.verbose("Downloaded unknown block", { blockRoot: rootHex, pendingBlocks: this.pendingBlocks.size, @@ -375,18 +375,8 @@ export class BlockInputSync { } } else { this.metrics?.blockInputSync.downloadedBlocksError.inc(); - // block download has error, this allows to retry the download of the block - block.status = PendingBlockInputStatus.pending; - // const errorData = {blockRoot: rootHex}; - // TODO(fulu): removed outer retry loop. Need to look at how to down score for errors here - // if (block.downloadAttempts > MAX_ATTEMPTS_PER_BLOCK) { - // // Give up on this block and assume it does not exist, penalizing all peers as if it was a bad block - // this.logger.debug("Ignoring unknown block root after many failed downloads", errorData, res.err); + this.logger.debug("Ignoring unknown block root after many failed downloads", {blockRoot: rootHex}, res.err); this.removeAndDownScoreAllDescendants(block); - // } else { - // // Try again when a new peer connects, its status changes, or a new unknownBlockParent event happens - // this.logger.debug("Error downloading unknown block root", errorData, res.err); - // } } } @@ -457,7 +447,6 @@ export class BlockInputSync { // Send child blocks to the processor for (const descendantBlock of getDescendantBlocks(pendingBlock.blockInput.blockRootHex, this.pendingBlocks)) { - // TODO(fulu): this might cause sync to get stuck... need to resolve if (isPendingBlockInput(descendantBlock)) { this.processBlock(descendantBlock).catch((e) => { this.logger.debug("Unexpected error - process descendant block", {}, e); From ed4275213ee6923ee61abe8aec72ee42ed506775 Mon Sep 17 00:00:00 2001 From: Cayman Date: Wed, 20 Aug 2025 10:53:21 -0400 Subject: [PATCH 044/173] chore: remove unused code in unknownBlock.ts --- packages/beacon-node/src/sync/unknownBlock.ts | 25 ++----------------- .../test/unit/sync/unknownBlock.test.ts | 2 +- 2 files changed, 3 insertions(+), 24 deletions(-) diff --git a/packages/beacon-node/src/sync/unknownBlock.ts b/packages/beacon-node/src/sync/unknownBlock.ts index d1e069e19a23..d0deefec3cc8 100644 --- a/packages/beacon-node/src/sync/unknownBlock.ts +++ b/packages/beacon-node/src/sync/unknownBlock.ts @@ -10,7 +10,6 @@ import {ChainEvent, ChainEventData, IBeaconChain} from "../chain/index.js"; import {Metrics} from "../metrics/index.js"; import {INetwork, NetworkEvent, NetworkEventData, prettyPrintPeerIdStr} from "../network/index.js"; import {PeerSyncMeta} from "../network/peers/peersData.js"; -import {CustodyConfig} from "../util/dataColumns.js"; import {PeerIdStr} from "../util/peerId.js"; import {shuffle} from "../util/shuffle.js"; import {sortBy} from "../util/sortBy.js"; @@ -34,24 +33,6 @@ const MAX_ATTEMPTS_PER_BLOCK = 5; const MAX_KNOWN_BAD_BLOCKS = 500; const MAX_PENDING_BLOCKS = 100; -function getLogMeta( - block: BlockInputSyncCacheItem, - pendingBlocks?: Map -): Record { - const pendingBlocksLog: Record = pendingBlocks ? {pendingBlocks: pendingBlocks.size} : {}; - return isPendingBlockInput(block) - ? { - type: "pendingBlockInput", - ...pendingBlocksLog, - ...block.blockInput.getLogMeta(), - } - : { - type: "pendingRootHex", - ...pendingBlocksLog, - rootHex: prettyBytes(block.rootHex), - }; -} - /** * BlockInputSync is a class that handles ReqResp to find blocks and data related to a specific blockRoot. The * blockRoot may have been found via object gossip, or the API. Gossip objects that can trigger a search are block, @@ -105,7 +86,7 @@ export class BlockInputSync { ) { this.maxPendingBlocks = opts?.maxPendingBlocks ?? MAX_PENDING_BLOCKS; this.proposerBoostSecWindow = this.config.SECONDS_PER_SLOT / INTERVALS_PER_SLOT; - this.peerBalancer = new UnknownBlockPeerBalancer(this.network.custodyConfig); + this.peerBalancer = new UnknownBlockPeerBalancer(); if (metrics) { metrics.blockInputSync.pendingBlocks.addCollect(() => @@ -645,12 +626,10 @@ export class BlockInputSync { export class UnknownBlockPeerBalancer { readonly peersMeta: Map; readonly activeRequests: Map; - private readonly custodyConfig: CustodyConfig; - constructor(custodyConfig: CustodyConfig) { + constructor() { this.peersMeta = new Map(); this.activeRequests = new Map(); - this.custodyConfig = custodyConfig; } /** Trigger on each peer re-status */ diff --git a/packages/beacon-node/test/unit/sync/unknownBlock.test.ts b/packages/beacon-node/test/unit/sync/unknownBlock.test.ts index d04c0fa3c1a7..e5bd4954fcd9 100644 --- a/packages/beacon-node/test/unit/sync/unknownBlock.test.ts +++ b/packages/beacon-node/test/unit/sync/unknownBlock.test.ts @@ -360,7 +360,7 @@ describe("UnknownBlockPeerBalancer", async () => { let peerBalancer: UnknownBlockPeerBalancer; beforeEach(() => { - peerBalancer = new UnknownBlockPeerBalancer(custodyConfig); + peerBalancer = new UnknownBlockPeerBalancer(); for (const [peerId, peerMeta] of peersMeta.entries()) { peerBalancer.onPeerConnected(peerId, peerMeta); } From be7e3a15d45670ca6e643e3dd38ca03e646e76cf Mon Sep 17 00:00:00 2001 From: Cayman Date: Wed, 20 Aug 2025 11:03:07 -0400 Subject: [PATCH 045/173] chore: use sampledColumns vs sampledGroups --- packages/beacon-node/src/sync/unknownBlock.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/beacon-node/src/sync/unknownBlock.ts b/packages/beacon-node/src/sync/unknownBlock.ts index d0deefec3cc8..a93bbf7aa2c9 100644 --- a/packages/beacon-node/src/sync/unknownBlock.ts +++ b/packages/beacon-node/src/sync/unknownBlock.ts @@ -10,6 +10,7 @@ import {ChainEvent, ChainEventData, IBeaconChain} from "../chain/index.js"; import {Metrics} from "../metrics/index.js"; import {INetwork, NetworkEvent, NetworkEventData, prettyPrintPeerIdStr} from "../network/index.js"; import {PeerSyncMeta} from "../network/peers/peersData.js"; +import {computeColumnsForCustodyGroup} from "../util/dataColumns.js"; import {PeerIdStr} from "../util/peerId.js"; import {shuffle} from "../util/shuffle.js"; import {sortBy} from "../util/sortBy.js"; @@ -484,7 +485,7 @@ export class BlockInputSync { const excludedPeers = new Set(); const defaultPendingColumns = this.config.getForkSeq(this.chain.clock.currentSlot) >= ForkSeq.fulu - ? new Set(this.network.custodyConfig.sampleGroups) + ? new Set(this.network.custodyConfig.sampledColumns) : null; let i = 0; From b25ab647397474b1cca785bead5ce998701c10be Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Thu, 21 Aug 2025 04:59:26 +0700 Subject: [PATCH 046/173] feat: build out downloadByRoot --- .../src/chain/validation/blobSidecar.ts | 10 +- .../src/sync/utils/downloadByRoot.ts | 593 ++++++++++++++---- 2 files changed, 469 insertions(+), 134 deletions(-) diff --git a/packages/beacon-node/src/chain/validation/blobSidecar.ts b/packages/beacon-node/src/chain/validation/blobSidecar.ts index ef77fed3258d..5d31d6a948c8 100644 --- a/packages/beacon-node/src/chain/validation/blobSidecar.ts +++ b/packages/beacon-node/src/chain/validation/blobSidecar.ts @@ -135,7 +135,7 @@ export async function validateGossipBlobSidecar( } // verify if the blob inclusion proof is correct - if (!validateInclusionProof(blobSidecar)) { + if (!validateBlobSidecarInclusionProof(blobSidecar)) { throw new BlobSidecarGossipError(GossipAction.REJECT, { code: BlobSidecarErrorCode.INCLUSION_PROOF_INVALID, slot: blobSidecar.signedBlockHeader.message.slot, @@ -164,7 +164,7 @@ export async function validateGossipBlobSidecar( // blob, proof and commitment as a valid BLS G1 point gets verified in batch validation try { - await validateBlobsAndProofs([blobSidecar.kzgCommitment], [blobSidecar.blob], [blobSidecar.kzgProof]); + await validateBlobsAndBlobProofs([blobSidecar.kzgCommitment], [blobSidecar.blob], [blobSidecar.kzgProof]); } catch (_e) { throw new BlobSidecarGossipError(GossipAction.REJECT, { code: BlobSidecarErrorCode.INVALID_KZG_PROOF, @@ -214,12 +214,12 @@ export async function validateBlobSidecars( } if (!opts.skipProofsCheck) { - await validateBlobsAndProofs(expectedKzgCommitments, blobs, proofs); + await validateBlobsAndBlobProofs(expectedKzgCommitments, blobs, proofs); } } } -async function validateBlobsAndProofs( +export async function validateBlobsAndBlobProofs( expectedKzgCommitments: deneb.BlobKzgCommitments, blobs: deneb.Blobs, proofs: deneb.KZGProofs @@ -237,7 +237,7 @@ async function validateBlobsAndProofs( } } -function validateInclusionProof(blobSidecar: deneb.BlobSidecar): boolean { +export function validateBlobSidecarInclusionProof(blobSidecar: deneb.BlobSidecar): boolean { return verifyMerkleBranch( ssz.deneb.KZGCommitment.hashTreeRoot(blobSidecar.kzgCommitment), blobSidecar.kzgCommitmentInclusionProof, diff --git a/packages/beacon-node/src/sync/utils/downloadByRoot.ts b/packages/beacon-node/src/sync/utils/downloadByRoot.ts index 7ee179c38406..124ae06c4926 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRoot.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRoot.ts @@ -1,182 +1,517 @@ import {ChainForkConfig} from "@lodestar/config"; -import {ForkPreFulu} from "@lodestar/params"; -import {signedBlockToSignedHeader} from "@lodestar/state-transition"; -import {deneb} from "@lodestar/types"; -import {LodestarError, fromHex, prettyBytes, toHex} from "@lodestar/utils"; -import {BlockInputSource, DAType, IBlockInput, isBlockInputBlobs} from "../../chain/blocks/blockInput/index.js"; -import {SeenBlockInput} from "../../chain/seenCache/seenGossipBlockInput.js"; -import {IExecutionEngine} from "../../execution/index.js"; -import {INetwork} from "../../network/index.js"; -import {computeInclusionProof} from "../../util/blobs.js"; -import {PeerIdStr} from "../../util/peerId.js"; +import {ForkPostDeneb, NUMBER_OF_COLUMNS, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; +import {BlobIndex, ColumnIndex, RootHex, SignedBeaconBlock, deneb, fulu, phase0} from "@lodestar/types"; +import {LodestarError, fromHex, prettyBytes, toRootHex} from "@lodestar/utils"; +import {isBlockInputBlobs, isBlockInputColumns} from "../../chain/blocks/blockInput/blockInput.js"; +import {IBlockInput} from "../../chain/blocks/blockInput/types.js"; +import {validateBlobSidecarInclusionProof, validateBlobsAndBlobProofs} from "../../chain/validation/blobSidecar.js"; import { - BlockInputSyncCacheItem, - PendingBlockInput, - getBlockInputSyncCacheItemRootHex, - isPendingBlockInput, -} from "../types.js"; + verifyDataColumnSidecarInclusionProof, + verifyDataColumnSidecarKzgProofs, +} from "../../chain/validation/dataColumnSidecar.js"; +import {INetwork} from "../../network/interface.js"; +import {prettyPrintPeerIdStr} from "../../network/util.js"; +import {byteArrayEquals} from "../../util/bytes.js"; +import {PeerIdStr} from "../../util/peerId.js"; +import {BlobSidecarsByRootRequest} from "../../util/types.js"; +import {BlockInputSyncCacheItem, getBlockInputSyncCacheItemRootHex, isPendingBlockInput} from "../types.js"; -export type DownloadBlockInputByRootProps = { +export type DownloadByRootCoreProps = { config: ChainForkConfig; network: INetwork; - cache: SeenBlockInput; - executionEngine?: IExecutionEngine; - pending: BlockInputSyncCacheItem; peerIdStr: PeerIdStr; }; +export type DownloadByRootProps = DownloadByRootCoreProps & { + cacheItem: BlockInputSyncCacheItem; +}; +export type DownloadAndValidateBlockProps = DownloadByRootCoreProps & {blockRoot: Uint8Array}; +export type DownloadAndValidateBlobsProps = DownloadAndValidateBlockProps & {blobIndices: BlobIndex[]}; +export type DownloadAndValidateColumnsProps = DownloadAndValidateBlockProps & {columnIndices: ColumnIndex[]}; +export type DownloadByRootResponses = { + block: SignedBeaconBlock; + blobSidecars?: deneb.BlobSidecars; + columnSidecars?: fulu.DataColumnSidecars; +}; export async function downloadByRoot({ config, network, - cache, - executionEngine, - pending, peerIdStr, -}: DownloadBlockInputByRootProps): Promise { - if (!isPendingBlockInput(pending) || !pending.blockInput.hasBlock()) { - pending = await downloadAndCacheBlock({ - network, - cache, - pending, - peerIdStr, - }); - } + cacheItem, +}: DownloadByRootProps): Promise { + let block: SignedBeaconBlock; + let blobSidecars: deneb.BlobSidecars | undefined; + let columnSidecars: fulu.DataColumnSidecars | undefined; + + const rootHex = getBlockInputSyncCacheItemRootHex(cacheItem); + const blockRoot = fromHex(rootHex); - if (!pending.blockInput.hasAllData()) { - await downloadAndCacheData({ + if (isPendingBlockInput(cacheItem)) { + if (cacheItem.blockInput.hasBlock()) { + block = cacheItem.blockInput.getBlock(); + } else { + block = await downloadAndValidateBlock({ + config, + network, + peerIdStr, + blockRoot, + }); + } + + if (!cacheItem.blockInput.hasAllData()) { + if (isBlockInputBlobs(cacheItem.blockInput)) { + blobSidecars = await downloadAndValidateBlobs({ + config, + network, + peerIdStr, + blockRoot, + blobIndices: cacheItem.blockInput.getMissingBlobMeta().map((b) => b.index), + }); + } + if (isBlockInputColumns(cacheItem.blockInput)) { + columnSidecars = await downloadAndValidateColumns({ + config, + network, + peerIdStr, + blockRoot, + columnIndices: cacheItem.blockInput.getMissingSampledColumnMeta().map((c) => c.index), + }); + } + } + } else { + block = await downloadAndValidateBlock({ config, network, - executionEngine, peerIdStr, - blockInput: pending.blockInput, + blockRoot, }); + const forkName = config.getForkName(block.message.slot); + if (isForkPostFulu(forkName)) { + columnSidecars = await downloadAndValidateColumns({ + config, + network, + peerIdStr, + blockRoot, + columnIndices: network.custodyConfig.sampledColumns, + }); + } else if (isForkPostDeneb(forkName)) { + const blobCount = (block as SignedBeaconBlock).message.body.blobKzgCommitments.length; + blobSidecars = await downloadAndValidateBlobs({ + config, + network, + peerIdStr, + blockRoot, + blobIndices: Array.from({length: blobCount}, (_, i) => i), + }); + } } - return pending; + return { + block, + blobSidecars, + columnSidecars, + }; } -export async function downloadAndCacheBlock({ +export async function downloadAndValidateBlock({ + config, network, - cache, - pending, peerIdStr, -}: Omit): Promise { - const blockRootHex = getBlockInputSyncCacheItemRootHex(pending); - const blockRoot = fromHex(blockRootHex); - const [response] = await network.sendBeaconBlocksByRoot(peerIdStr, [blockRoot]); - if (isPendingBlockInput(pending)) { - pending.blockInput.addBlock({ - blockRootHex, - block: response.data, - source: { - seenTimestampSec: Date.now() / 1000, - source: BlockInputSource.byRoot, - peerIdStr, + blockRoot, +}: DownloadAndValidateBlockProps): Promise { + const response = await network.sendBeaconBlocksByRoot(peerIdStr, [blockRoot]); + const block = response.at(0)?.data; + if (!block) { + throw new DownloadByRootError({ + code: DownloadByRootErrorCode.MISSING_BLOCK_RESPONSE, + peer: prettyPrintPeerIdStr(peerIdStr), + blockRoot: prettyBytes(toRootHex(blockRoot)), + }); + } + const receivedRoot = config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message); + if (!byteArrayEquals(receivedRoot, blockRoot)) { + throw new DownloadByRootError( + { + code: DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT, + peer: prettyPrintPeerIdStr(peerIdStr), + requestedBlockRoot: prettyBytes(toRootHex(blockRoot)), + receivedBlockRoot: prettyBytes(toRootHex(receivedRoot)), }, + "block does not match requested root" + ); + } + return block; +} + +export async function downloadAndValidateBlobs({ + config, + network, + peerIdStr, + blockRoot, + blobIndices, +}: DownloadAndValidateBlobsProps): Promise { + const blobsRequest = blobIndices.map((index) => ({blockRoot, index})); + const blobSidecars = await network.sendBlobSidecarsByRoot(peerIdStr, blobsRequest); + + for (const blobSidecar of blobSidecars) { + if (!blobIndices.includes(blobSidecar.index)) { + throw new DownloadByRootError( + { + code: DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED, + peer: prettyPrintPeerIdStr(peerIdStr), + blockRoot: prettyBytes(toRootHex(blockRoot)), + invalidIndex: blobSidecar.index, + }, + "received a blobSidecar that was not requested" + ); + } + const headerRoot = config + .getForkTypes(blobSidecar.signedBlockHeader.message.slot) + .BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message); + if (byteArrayEquals(blockRoot, headerRoot)) { + throw new DownloadByRootError( + { + code: DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT, + peer: prettyPrintPeerIdStr(peerIdStr), + requestedBlockRoot: prettyBytes(toRootHex(blockRoot)), + receivedBlockRoot: prettyBytes(toRootHex(headerRoot)), + }, + `blobSidecar.signedBlockHeader not match requested blockRoot for index=${blobSidecar.index}` + ); + } + + if (!validateBlobSidecarInclusionProof(blobSidecar)) { + throw new DownloadByRootError({ + code: DownloadByRootErrorCode.INVALID_INCLUSION_PROOF, + peer: prettyPrintPeerIdStr(peerIdStr), + blockRoot: prettyBytes(toRootHex(blockRoot)), + sidecarIndex: blobSidecar.index, + }); + } + } + + try { + await validateBlobsAndBlobProofs( + blobSidecars.map((b) => b.kzgCommitment), + blobSidecars.map((b) => b.blob), + blobSidecars.map((b) => b.kzgProof) + ); + } catch { + throw new DownloadByRootError({ + code: DownloadByRootErrorCode.INVALID_KZG_PROOF, + peer: prettyPrintPeerIdStr(peerIdStr), + blockRoot: prettyBytes(toRootHex(blockRoot)), }); - return pending; } - const blockInput = cache.getByBlock({ - block: response.data, - source: BlockInputSource.byRoot, - seenTimestampSec: Date.now() / 1000, - peerIdStr, - }); - return { - status: pending.status, - blockInput, - timeAddedSec: pending.timeAddedSec, - peerIdStrings: pending.peerIdStrings, - timeSyncedSec: pending.timeSyncedSec, - }; + return blobSidecars; } -export async function downloadAndCacheData({ +export async function downloadAndValidateColumns({ config, network, - executionEngine, - blockInput, peerIdStr, -}: Omit & {blockInput: IBlockInput}): Promise { - if (isBlockInputBlobs(blockInput)) { - const missingBlobsMeta = blockInput.getMissingBlobMeta(); - if (executionEngine) { - const forkName = blockInput.forkName as ForkPreFulu; - const response = await executionEngine.getBlobs( - forkName, - missingBlobsMeta.map(({versionHash}) => versionHash) + blockRoot, + columnIndices, +}: DownloadAndValidateColumnsProps): Promise { + const columnSidecars = await network.sendDataColumnSidecarsByRoot(peerIdStr, [{blockRoot, columns: columnIndices}]); + + for (const columnSidecar of columnSidecars) { + if (!columnIndices.includes(columnSidecar.index)) { + throw new DownloadByRootError( + { + code: DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED, + peer: prettyPrintPeerIdStr(peerIdStr), + blockRoot: prettyBytes(toRootHex(blockRoot)), + invalidIndex: columnSidecar.index, + }, + "received a columnSidecar that was not requested" ); - const signedBeaconBlock = blockInput.getBlock(); - const blockBody = signedBeaconBlock.message.body; - for (const [requestIndex, blobAndProof] of response.entries()) { - if (blobAndProof) { - const {blob, proof} = blobAndProof; - const {index} = missingBlobsMeta[requestIndex]; - const kzgCommitmentInclusionProof = computeInclusionProof(forkName, blockBody, index); - const blobSidecar: deneb.BlobSidecar = { - blob, - index, - kzgProof: proof, - kzgCommitment: blockBody.blobKzgCommitments[index], - kzgCommitmentInclusionProof, - signedBlockHeader: signedBlockToSignedHeader(config, signedBeaconBlock), - }; - blockInput.addBlob({ - blobSidecar, - blockRootHex: blockInput.blockRootHex, - seenTimestampSec: Date.now() / 1000, - source: BlockInputSource.engine, - }); - } - } + } - if (blockInput.hasAllData()) { - return; - } + const headerRoot = config + .getForkTypes(columnSidecar.signedBlockHeader.message.slot) + .BeaconBlockHeader.hashTreeRoot(columnSidecar.signedBlockHeader.message); + if (byteArrayEquals(blockRoot, headerRoot)) { + throw new DownloadByRootError( + { + code: DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT, + peer: prettyPrintPeerIdStr(peerIdStr), + requestedBlockRoot: prettyBytes(toRootHex(blockRoot)), + receivedBlockRoot: prettyBytes(toRootHex(headerRoot)), + }, + `columnSidecar.signedBlockHeader not match requested blockRoot for index=${columnSidecar.index}` + ); } - const response = await network.sendBlobSidecarsByRoot( - peerIdStr, - missingBlobsMeta.map(({blockRoot, index}) => ({blockRoot, index})) - ); - const seenTimestampSec = Date.now() / 1000; - - for (const blobSidecar of response) { - const blockRoot = config - .getForkTypes(blobSidecar.signedBlockHeader.message.slot) - .BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message); - blockInput.addBlob({ - blobSidecar, - peerIdStr, - seenTimestampSec, - blockRootHex: toHex(blockRoot), - source: BlockInputSource.byRoot, + if (!verifyDataColumnSidecarInclusionProof(columnSidecar)) { + throw new DownloadByRootError({ + code: DownloadByRootErrorCode.INVALID_INCLUSION_PROOF, + peer: prettyPrintPeerIdStr(peerIdStr), + blockRoot: prettyBytes(toRootHex(blockRoot)), + sidecarIndex: columnSidecar.index, }); } + } - return; + try { + // TODO(fulu): need to double check that the construction of these arrays is correct + await verifyDataColumnSidecarKzgProofs( + columnSidecars.flatMap((c) => c.kzgCommitments), + columnSidecars.flatMap((c) => Array.from({length: c.column.length}, () => c.index)), + columnSidecars.flatMap((c) => c.column), + columnSidecars.flatMap((c) => c.kzgProofs) + ); + } catch { + throw new DownloadByRootError({ + code: DownloadByRootErrorCode.INVALID_KZG_PROOF, + peer: prettyPrintPeerIdStr(peerIdStr), + blockRoot: prettyBytes(toRootHex(blockRoot)), + }); } - throw new DownloadByRootError({ - code: DownloadByRootErrorCode.INVALID_BLOCK_INPUT_TYPE, - blockRoot: prettyBytes(blockInput.blockRootHex), - type: blockInput.type, - }); + return columnSidecars; } +// export function compareIndices( +// expected: number[], +// received: number[] +// ): { +// missingIndices: number; +// extraIndices: number; +// } { +// const missingIndices: number[] = []; +// const extraIndices: number[] = []; + +// for (const index of received) { +// if (!expected.includes(index)) { +// extraIndices.push(index); +// } +// } +// for (const index of expected) { +// if (!received.includes(index)) { +// missingIndices.push(index); +// } +// } + +// return { +// missingIndices, +// extraIndices, +// }; +// } + +// export async function validateColumnSidecars( +// config: ChainForkConfig, +// rootHex: RootHex, +// requestedIndices: ColumnIndex[], +// columnSidecars: fulu.DataColumnSidecars +// ): void { +// for (const columnSidecar of columnSidecars) { +// if (!requestedIndices.includes(columnSidecar.index)) { +// throw new DownloadByRootError(); +// } + +// const headerRoot = config +// .getForkTypes(columnSidecar.signedBlockHeader.message.slot) +// .BeaconBlockHeader.hashTreeRoot(columnSidecar.signedBlockHeader.message); +// if (rootHex !== toRootHex(headerRoot)) { +// throw new DownloadByRootError(); +// } + +// if (!verifyDataColumnSidecarInclusionProof(columnSidecar)) { +// throw new DownloadByRootError(); +// } +// } + +// try { +// // TODO(fulu): need to double check that the construction of these arrays is correct +// await verifyDataColumnSidecarKzgProofs( +// columnSidecars.flatMap((c) => c.kzgCommitments), +// columnSidecars.flatMap((c) => Array.from({length: c.column.length}, () => c.index)), +// columnSidecars.flatMap((c) => c.column), +// columnSidecars.flatMap((c) => c.kzgProofs) +// ); +// } catch { +// throw new DownloadByRootError(); +// } +// } +// export async function validateBlobSidecars( +// config: ChainForkConfig, +// rootHex: RootHex, +// requestedIndices: ColumnIndex[], +// blobSidecars: fulu.DataColumnSidecars +// ): void { +// for (const blobSidecar of blobSidecars) { +// if (!requestedIndices.includes(blobSidecar.index)) { +// throw new DownloadByRootError(); +// } +// const headerRoot = config +// .getForkTypes(blobSidecar.signedBlockHeader.message.slot) +// .BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message); +// if (rootHex !== toRootHex(headerRoot)) { +// throw new DownloadByRootError(); +// } + +// if (!validateBlobSidecarInclusionProof(blobSidecar)) { +// throw new DownloadByRootError(); +// } +// } + +// try { +// await validateBlobsAndBlobProofs( +// blobSidecars.map((b) => b.kzgCommitment), +// blobSidecars.map((b) => b.blob), +// blobSidecars.map((b) => b.kzgProof) +// ); +// } catch { +// throw new DownloadByRootError(); +// } +// } + +// export async function fetchByRoot({ +// config, +// peerIdStr, +// network, +// blockRoot, +// block, +// blobIndices, +// columnIndices, +// }: FetchByRootProps): DownloadByRootResponses { +// let blobSidecars: deneb.BlobSidecars | undefined; +// let columnSidecars: fulu.DataColumnSidecars | undefined; + +// if (!block) { +// block = await network.sendBeaconBlocksByRoot(peerIdStr, [blockRoot]); +// } + +// const forkName = config.getForkName(block.message.slot); +// if (isForkPostFulu(forkName)) { +// if (!columnIndices) { +// throw new DownloadByRootError({ +// code: DownloadByRootErrorCode.MISSING_COLUMN_INDICES, +// blockRoot: prettyBytes(toRootHex(blockRoot)), +// }); +// } +// columnSidecars = await network.sendDataColumnSidecarsByRoot(peerIdStr, [{blockRoot, columns: columnIndices}]); +// } else if (isForkPostDeneb(forkName)) { +// if (!blobIndices) { +// const blobCount = (block as SignedBeaconBlock).message.body.blobKzgCommitments?.length; +// blobIndices = Array.from({length: blobCount}, (_, i) => i); +// } +// const blobsRequest = blobIndices.map((index) => ({blockRoot, index})); +// blobSidecars = await network.sendBlobSidecarsByRoot(peerIdStr, blobsRequest); +// } + +// return { +// block, +// blobSidecars, +// columnSidecars, +// }; +// } + +// export type ValidateByRootResponses = DownloadByRootResponses & {cacheItem: BlockInputSyncCacheItem}; +// export function validateByRootResponses({ +// cacheItem, +// block, +// blobSidecars, +// columnSidecars, +// }: ValidateByRootResponses): void { +// const blockRoot = this.config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message); +// const blockRootHex = toRootHex(blockRoot); + +// const rootHex = getBlockInputSyncCacheItemRootHex(cacheItem); +// if (rootHex !== blockRootHex) { +// } +// } + +// export type ValidateByRootResponses = DownloadByRootRequests & DownloadByRootResponses & {config: ChainForkConfig}; +// export function validateByRootResponses({ +// config, +// blocksRequest: blockRequest, +// blocks: block, +// blobsRequest, +// blobSidecars, +// columnsRequest, +// columnSidecars, +// }: ValidateByRootResponses): string { +// let blockRootHex: string | undefined; +// if (blockRequest) { +// if (!block) { +// throw new DownloadByRootError({ +// code: DownloadByRootErrorCode.MISSING_BLOCK_RESPONSE, +// }); +// } +// const blockRoot = config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message); +// blockRootHex = toRootHex(blockRoot); +// } +// if (blobsRequest) { +// if (!blobSidecars) { +// throw new DownloadByRootError({ +// code: DownloadByRootErrorCode.MISSING_BLOBS_RESPONSE, +// }); +// } +// for (const blobSidecar of blobSidecars) { +// const blockRoot = config +// .getForkTypes(blobSidecar.signedBlockHeader.message.slot) +// .BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message); +// const rootHex = toRootHex(blockRoot); +// if (!blockRootHex) { +// blockRootHex = rootHex; +// } else if (blockRootHex !== rootHex) { +// } +// } +// if (blockRootHex) { +// } +// } +// if (columnsRequest) { +// if (!columnSidecars) { +// throw new DownloadByRootError({ +// code: DownloadByRootErrorCode.MISSING_BLOBS_RESPONSE, +// }); +// } + +// const blockRoot = config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message); +// blockRootHex = toRootHex(blockRoot); +// } + +// return blockRootHex; +// } + export enum DownloadByRootErrorCode { - INVALID_BLOCK_INPUT_TYPE = "DOWNLOAD_BY_ROOT_ERROR_INVALID_BLOCK_INPUT_TYPE", - BLOCK_NOT_DOWNLOADED = "DOWNLOAD_BY_ROOT_ERROR_BLOCK_NOT_DOWNLOADED", + MISMATCH_BLOCK_ROOT = "DOWNLOAD_BY_ROOT_ERROR_MISMATCH_BLOCK_ROOT", + EXTRA_SIDECAR_RECEIVED = "DOWNLOAD_BY_ROOT_ERROR_EXTRA_SIDECAR_RECEIVED", + INVALID_INCLUSION_PROOF = "DOWNLOAD_BY_ROOT_ERROR_INVALID_INCLUSION_PROOF", + INVALID_KZG_PROOF = "DOWNLOAD_BY_ROOT_ERROR_INVALID_KZG_PROOF", + MISSING_BLOCK_RESPONSE = "DOWNLOAD_BY_ROOT_ERROR_MISSING_BLOCK_RESPONSE", + Z = "DOWNLOAD_BY_ROOT_ERROR_Z", } export type DownloadByRootErrorType = | { - code: DownloadByRootErrorCode.INVALID_BLOCK_INPUT_TYPE; + code: DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT; + peer: string; + requestedBlockRoot: string; + receivedBlockRoot: string; + } + | { + code: DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED; + peer: string; + blockRoot: string; + invalidIndex: number; + } + | { + code: DownloadByRootErrorCode.INVALID_INCLUSION_PROOF; + peer: string; + blockRoot: string; + sidecarIndex: number; + } + | { + code: DownloadByRootErrorCode.INVALID_KZG_PROOF; + peer: string; blockRoot: string; - type: DAType; } | { - code: DownloadByRootErrorCode.BLOCK_NOT_DOWNLOADED; + code: DownloadByRootErrorCode.MISSING_BLOCK_RESPONSE; + peer: string; blockRoot: string; }; From ab5b4d60e6ea8ff87bb4e885401512731291ecba Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Thu, 21 Aug 2025 05:04:30 +0700 Subject: [PATCH 047/173] refactor: rename download to fetch --- .../src/sync/utils/downloadByRoot.ts | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRoot.ts b/packages/beacon-node/src/sync/utils/downloadByRoot.ts index 124ae06c4926..efa00f64d182 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRoot.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRoot.ts @@ -16,29 +16,29 @@ import {PeerIdStr} from "../../util/peerId.js"; import {BlobSidecarsByRootRequest} from "../../util/types.js"; import {BlockInputSyncCacheItem, getBlockInputSyncCacheItemRootHex, isPendingBlockInput} from "../types.js"; -export type DownloadByRootCoreProps = { +export type FetchByRootCoreProps = { config: ChainForkConfig; network: INetwork; peerIdStr: PeerIdStr; }; -export type DownloadByRootProps = DownloadByRootCoreProps & { +export type FetchByRootProps = FetchByRootCoreProps & { cacheItem: BlockInputSyncCacheItem; }; -export type DownloadAndValidateBlockProps = DownloadByRootCoreProps & {blockRoot: Uint8Array}; -export type DownloadAndValidateBlobsProps = DownloadAndValidateBlockProps & {blobIndices: BlobIndex[]}; -export type DownloadAndValidateColumnsProps = DownloadAndValidateBlockProps & {columnIndices: ColumnIndex[]}; -export type DownloadByRootResponses = { +export type FetchByRootAndValidateBlockProps = FetchByRootCoreProps & {blockRoot: Uint8Array}; +export type FetchByRootAndValidateBlobsProps = FetchByRootAndValidateBlockProps & {blobIndices: BlobIndex[]}; +export type FetchByRootAndValidateColumnsProps = FetchByRootAndValidateBlockProps & {columnIndices: ColumnIndex[]}; +export type FetchByRootResponses = { block: SignedBeaconBlock; blobSidecars?: deneb.BlobSidecars; columnSidecars?: fulu.DataColumnSidecars; }; -export async function downloadByRoot({ +export async function fetchByRoot({ config, network, peerIdStr, cacheItem, -}: DownloadByRootProps): Promise { +}: FetchByRootProps): Promise { let block: SignedBeaconBlock; let blobSidecars: deneb.BlobSidecars | undefined; let columnSidecars: fulu.DataColumnSidecars | undefined; @@ -50,7 +50,7 @@ export async function downloadByRoot({ if (cacheItem.blockInput.hasBlock()) { block = cacheItem.blockInput.getBlock(); } else { - block = await downloadAndValidateBlock({ + block = await fetchAndValidateBlock({ config, network, peerIdStr, @@ -60,7 +60,7 @@ export async function downloadByRoot({ if (!cacheItem.blockInput.hasAllData()) { if (isBlockInputBlobs(cacheItem.blockInput)) { - blobSidecars = await downloadAndValidateBlobs({ + blobSidecars = await fetchAndValidateBlobs({ config, network, peerIdStr, @@ -69,7 +69,7 @@ export async function downloadByRoot({ }); } if (isBlockInputColumns(cacheItem.blockInput)) { - columnSidecars = await downloadAndValidateColumns({ + columnSidecars = await fetchAndValidateColumns({ config, network, peerIdStr, @@ -79,7 +79,7 @@ export async function downloadByRoot({ } } } else { - block = await downloadAndValidateBlock({ + block = await fetchAndValidateBlock({ config, network, peerIdStr, @@ -87,7 +87,7 @@ export async function downloadByRoot({ }); const forkName = config.getForkName(block.message.slot); if (isForkPostFulu(forkName)) { - columnSidecars = await downloadAndValidateColumns({ + columnSidecars = await fetchAndValidateColumns({ config, network, peerIdStr, @@ -96,7 +96,7 @@ export async function downloadByRoot({ }); } else if (isForkPostDeneb(forkName)) { const blobCount = (block as SignedBeaconBlock).message.body.blobKzgCommitments.length; - blobSidecars = await downloadAndValidateBlobs({ + blobSidecars = await fetchAndValidateBlobs({ config, network, peerIdStr, @@ -113,12 +113,12 @@ export async function downloadByRoot({ }; } -export async function downloadAndValidateBlock({ +export async function fetchAndValidateBlock({ config, network, peerIdStr, blockRoot, -}: DownloadAndValidateBlockProps): Promise { +}: FetchByRootAndValidateBlockProps): Promise { const response = await network.sendBeaconBlocksByRoot(peerIdStr, [blockRoot]); const block = response.at(0)?.data; if (!block) { @@ -143,13 +143,13 @@ export async function downloadAndValidateBlock({ return block; } -export async function downloadAndValidateBlobs({ +export async function fetchAndValidateBlobs({ config, network, peerIdStr, blockRoot, blobIndices, -}: DownloadAndValidateBlobsProps): Promise { +}: FetchByRootAndValidateBlobsProps): Promise { const blobsRequest = blobIndices.map((index) => ({blockRoot, index})); const blobSidecars = await network.sendBlobSidecarsByRoot(peerIdStr, blobsRequest); @@ -207,13 +207,13 @@ export async function downloadAndValidateBlobs({ return blobSidecars; } -export async function downloadAndValidateColumns({ +export async function fetchAndValidateColumns({ config, network, peerIdStr, blockRoot, columnIndices, -}: DownloadAndValidateColumnsProps): Promise { +}: FetchByRootAndValidateColumnsProps): Promise { const columnSidecars = await network.sendDataColumnSidecarsByRoot(peerIdStr, [{blockRoot, columns: columnIndices}]); for (const columnSidecar of columnSidecars) { From 612af54d39ef08ab2e51bc2385e3f8675d74cec5 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Thu, 21 Aug 2025 06:34:53 +0700 Subject: [PATCH 048/173] feat: update BlockInputCache to pass in blockRootHex instead of calculate it --- .../src/api/impl/beacon/blocks/index.ts | 1 + .../src/chain/blocks/blockInput/blockInput.ts | 61 +++++--- .../src/chain/blocks/blockInput/types.ts | 5 +- .../chain/seenCache/seenGossipBlockInput.ts | 58 ++++--- .../src/network/processor/gossipHandlers.ts | 3 + .../src/sync/utils/downloadByRange.ts | 147 ++---------------- 6 files changed, 91 insertions(+), 184 deletions(-) diff --git a/packages/beacon-node/src/api/impl/beacon/blocks/index.ts b/packages/beacon-node/src/api/impl/beacon/blocks/index.ts index 8b960330a051..a017fc244cd9 100644 --- a/packages/beacon-node/src/api/impl/beacon/blocks/index.ts +++ b/packages/beacon-node/src/api/impl/beacon/blocks/index.ts @@ -85,6 +85,7 @@ export function getBeaconBlockApi({ block: signedBlock, source: BlockInputSource.api, seenTimestampSec, + blockRootHex: blockRoot, }); let blobSidecars: deneb.BlobSidecars, dataColumnSidecars: fulu.DataColumnSidecars; diff --git a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts index 0be730804cb2..a1430f0f7eb9 100644 --- a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts +++ b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts @@ -206,7 +206,7 @@ export class BlockInputPreData extends AbstractBlockInput { static createFromBlock(props: AddBlock & CreateBlockInputMeta): BlockInputPreData { const init: BlockInputInit = { daOutOfRange: props.daOutOfRange, - timeCreated: props.source.seenTimestampSec, + timeCreated: props.seenTimestampSec, forkName: props.forkName, slot: props.block.message.slot, blockRootHex: props.blockRootHex, @@ -216,8 +216,12 @@ export class BlockInputPreData extends AbstractBlockInput { hasBlock: true, hasAllData: true, block: props.block, - source: props.source, - timeCompleteSec: props.source.seenTimestampSec, + source: { + source: props.source, + seenTimestampSec: props.seenTimestampSec, + peerIdStr: props.peerIdStr, + }, + timeCompleteSec: props.seenTimestampSec, }; return new BlockInputPreData(init, state); } @@ -285,12 +289,16 @@ export class BlockInputBlobs extends AbstractBlockInput, opts = {throwOnDuplicateAdd: true}): void { + addBlock( + {blockRootHex, block, source, seenTimestampSec, peerIdStr}: AddBlock, + opts = {throwOnDuplicateAdd: true} + ): void { // this check suffices for checking slot, parentRoot, and forkName if (blockRootHex !== this.blockRootHex) { throw new BlockInputError( @@ -345,8 +356,8 @@ export class BlockInputBlobs extends AbstractBlockInput = { +export type AddBlock = SourceMeta & { block: SignedBeaconBlock; blockRootHex: string; - source: SourceMeta; }; export type AddBlob = BlobWithSource & { diff --git a/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts b/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts index 640ad1fbc26c..d4d7b0fff57f 100644 --- a/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts +++ b/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts @@ -12,6 +12,7 @@ import { BlockInputBlobs, BlockInputColumns, BlockInputPreData, + BlockWithSource, DAType, ForkBlobsDA, IBlockInput, @@ -143,10 +144,7 @@ export class SeenBlockInput { this.pruneToMaxSize(); }; - getByBlock({block, source, seenTimestampSec, peerIdStr}: SourceMeta & {block: SignedBeaconBlock}): BlockInput { - const blockRoot = this.config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message); - const blockRootHex = toRootHex(blockRoot); - + getByBlock({blockRootHex, block, source, seenTimestampSec, peerIdStr}: BlockWithSource): BlockInput { // TODO(peerDAS): Why is it necessary to static cast this here. All conditional paths result in a valid value so should be defined correctly below let blockInput = this.blockInputs.get(blockRootHex) as IBlockInput; if (!blockInput) { @@ -157,11 +155,9 @@ export class SeenBlockInput { blockRootHex, daOutOfRange, forkName, - source: { - source, - seenTimestampSec, - peerIdStr, - }, + source, + seenTimestampSec, + peerIdStr, }); } else if (isForkPostFulu(forkName)) { blockInput = BlockInputColumns.createFromBlock({ @@ -171,11 +167,9 @@ export class SeenBlockInput { forkName, custodyColumns: this.custodyConfig.custodyColumns, sampledColumns: this.custodyConfig.sampledColumns, - source: { - source, - seenTimestampSec, - peerIdStr, - }, + source, + seenTimestampSec, + peerIdStr, }); } else { blockInput = BlockInputBlobs.createFromBlock({ @@ -183,18 +177,16 @@ export class SeenBlockInput { blockRootHex, daOutOfRange, forkName, - source: { - source, - seenTimestampSec, - peerIdStr, - }, + source, + seenTimestampSec, + peerIdStr, }); } this.blockInputs.set(blockInput.blockRootHex, blockInput); } if (!blockInput.hasBlock()) { - blockInput.addBlock({block, blockRootHex, source: {source, seenTimestampSec, peerIdStr}}); + blockInput.addBlock({block, blockRootHex, source, seenTimestampSec, peerIdStr}); } else { this.logger?.debug("Attempt to cache block but is already cached on BlockInput", blockInput.getLogMeta()); this.metrics?.seenCache.blockInput.duplicateBlockCount.inc({source}); @@ -204,14 +196,15 @@ export class SeenBlockInput { } getByBlob( - {blobSidecar, source, seenTimestampSec, peerIdStr}: SourceMeta & {blobSidecar: deneb.BlobSidecar}, + { + blockRootHex, + blobSidecar, + source, + seenTimestampSec, + peerIdStr, + }: SourceMeta & {blockRootHex: RootHex; blobSidecar: deneb.BlobSidecar}, opts: GetByBlobOptions = {} ): BlockInputBlobs { - const blockRoot = this.config - .getForkTypes(blobSidecar.signedBlockHeader.message.slot) - .BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message); - const blockRootHex = toRootHex(blockRoot); - // TODO(peerDAS): Why is it necessary to static cast this here. All conditional paths result in a valid value so should be defined correctly below let blockInput = this.blockInputs.get(blockRootHex) as IBlockInput; let created = false; @@ -263,14 +256,15 @@ export class SeenBlockInput { } getByColumn( - {columnSidecar, seenTimestampSec, source, peerIdStr}: SourceMeta & {columnSidecar: fulu.DataColumnSidecar}, + { + blockRootHex, + columnSidecar, + seenTimestampSec, + source, + peerIdStr, + }: SourceMeta & {blockRootHex: RootHex; columnSidecar: fulu.DataColumnSidecar}, opts: GetByBlobOptions = {} ): BlockInputColumns { - const blockRoot = this.config - .getForkTypes(columnSidecar.signedBlockHeader.message.slot) - .BeaconBlockHeader.hashTreeRoot(columnSidecar.signedBlockHeader.message); - const blockRootHex = toRootHex(blockRoot); - let blockInput = this.blockInputs.get(blockRootHex); let created = false; if (!blockInput) { diff --git a/packages/beacon-node/src/network/processor/gossipHandlers.ts b/packages/beacon-node/src/network/processor/gossipHandlers.ts index fd861de665db..90c16d4aa026 100644 --- a/packages/beacon-node/src/network/processor/gossipHandlers.ts +++ b/packages/beacon-node/src/network/processor/gossipHandlers.ts @@ -131,6 +131,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand // tracked in https://github.com/ChainSafe/lodestar/issues/7957 const blockInput = chain.seenGossipBlockInput.getByBlock({ block: signedBlock, + blockRootHex, source: BlockInputSource.gossip, seenTimestampSec, peerIdStr, @@ -208,6 +209,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand try { await validateGossipBlobSidecar(fork, chain, blobSidecar, subnet); const blockInput = chain.seenGossipBlockInput.getByBlob({ + blockRootHex, blobSidecar, source: BlockInputSource.gossip, seenTimestampSec, @@ -282,6 +284,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand try { await validateGossipDataColumnSidecar(chain, dataColumnSidecar, gossipSubnet, metrics); const blockInput = chain.seenGossipBlockInput.getByColumn({ + blockRootHex, columnSidecar: dataColumnSidecar, source: BlockInputSource.gossip, seenTimestampSec, diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index 897a4c027af2..84926581763c 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -66,19 +66,17 @@ export function cacheByRangeResponses({ for (const block of responses.blocks ?? []) { const existing = updatedBatchBlocks.find((b) => b.slot === block.message.slot); + const blockRoot = config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message); + const blockRootHex = toRootHex(blockRoot); if (existing) { - const blockRoot = config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message); - const blockRootHex = toRootHex(blockRoot); // will throw if root hex does not match (meaning we are following the wrong chain) existing.addBlock( { block, blockRootHex, - source: { - source, - peerIdStr, - seenTimestampSec, - }, + source, + peerIdStr, + seenTimestampSec, }, {throwOnDuplicateAdd: false} ); @@ -86,6 +84,7 @@ export function cacheByRangeResponses({ updatedBatchBlocks.push( cache.getByBlock({ block, + blockRootHex, source, peerIdStr, seenTimestampSec, @@ -95,12 +94,12 @@ export function cacheByRangeResponses({ } for (const blobSidecar of responses.blobSidecars ?? []) { + const blockRoot = config + .getForkTypes(blobSidecar.signedBlockHeader.message.slot) + .BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message); + const blockRootHex = toRootHex(blockRoot); const existing = updatedBatchBlocks.find((b) => b.slot === blobSidecar.signedBlockHeader.message.slot); if (existing) { - const blockRoot = config - .getForkTypes(existing.slot) - .BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message); - const blockRootHex = toRootHex(blockRoot); if (!isBlockInputBlobs(existing)) { throw new DownloadByRangeError({ code: DownloadByRangeErrorCode.MISMATCH_BLOCK_INPUT_TYPE, @@ -124,6 +123,7 @@ export function cacheByRangeResponses({ } else { updatedBatchBlocks.push( cache.getByBlob({ + blockRootHex, blobSidecar, source, peerIdStr, @@ -134,12 +134,12 @@ export function cacheByRangeResponses({ } for (const columnSidecar of responses.columnSidecars ?? []) { + const blockRoot = config + .getForkTypes(columnSidecar.signedBlockHeader.message.slot) + .BeaconBlockHeader.hashTreeRoot(columnSidecar.signedBlockHeader.message); + const blockRootHex = toRootHex(blockRoot); const existing = updatedBatchBlocks.find((b) => b.slot === columnSidecar.signedBlockHeader.message.slot); if (existing) { - const blockRoot = config - .getForkTypes(existing.slot) - .BeaconBlockHeader.hashTreeRoot(columnSidecar.signedBlockHeader.message); - const blockRootHex = toRootHex(blockRoot); if (!isBlockInputColumns(existing)) { throw new DownloadByRangeError({ code: DownloadByRangeErrorCode.MISMATCH_BLOCK_INPUT_TYPE, @@ -163,6 +163,7 @@ export function cacheByRangeResponses({ } else { updatedBatchBlocks.push( cache.getByColumn({ + blockRootHex, columnSidecar, source, peerIdStr, @@ -175,122 +176,6 @@ export function cacheByRangeResponses({ return updatedBatchBlocks; } -export async function downloadAndCacheByRange( - request: DownloadAndCacheByRangeProps -): Promise { - const {logger, cache, peerIdStr} = request; - const {blocks, blobSidecars, columnSidecars} = await downloadByRange(request); - const blockInputs = new Map(); - const seenTimestampSec = Date.now() / 1000; - - function uncache() { - for (const [rootHex] of blockInputs) { - try { - cache.remove(rootHex); - } catch (e) { - logger.error( - "Error removing blockInput from seenBlockInputCache", - {blockRoot: prettyBytes(rootHex)}, - e as Error - ); - } - } - } - - let numberOfBlocks = 0; - if (blocks) { - try { - for (const block of blocks) { - const blockInput = cache.getByBlock({ - block, - seenTimestampSec, - source: BlockInputSource.byRange, - peerIdStr, - }); - numberOfBlocks++; - blockInputs.set(blockInput.blockRootHex, blockInput); - } - } catch (err) { - uncache(); - throw new DownloadByRangeError( - { - code: DownloadByRangeErrorCode.CACHING_ERROR, - peerId: prettyPrintPeerIdStr(peerIdStr), - message: (err as Error).message, - }, - "Error caching ByRange fetched block" - ); - } - } - - const processedBlobs = new Map(); - let numberOfBlobs = 0; - if (blobSidecars) { - try { - for (const blobSidecar of blobSidecars) { - const blockInput = cache.getByBlob({ - peerIdStr, - blobSidecar, - seenTimestampSec, - source: BlockInputSource.byRange, - }); - numberOfBlobs++; - blockInputs.set(blockInput.blockRootHex, blockInput); - const indices = processedBlobs.get(blockInput.blockRootHex) ?? []; - indices.push(blobSidecar.index); - processedBlobs.set(blockInput.blockRootHex, indices); - } - } catch (err) { - uncache(); - throw new DownloadByRangeError( - { - code: DownloadByRangeErrorCode.CACHING_ERROR, - peerId: prettyPrintPeerIdStr(peerIdStr), - message: (err as Error).message, - }, - "Error caching ByRange fetched blob" - ); - } - } - - const processedColumns = new Map(); - let numberOfColumns = 0; - if (columnSidecars) { - try { - for (const columnSidecar of columnSidecars) { - const blockInput = cache.getByColumn({ - peerIdStr, - columnSidecar, - seenTimestampSec, - source: BlockInputSource.byRange, - }); - numberOfColumns++; - blockInputs.set(blockInput.blockRootHex, blockInput); - const indices = processedColumns.get(blockInput.blockRootHex) ?? []; - indices.push(columnSidecar.index); - processedColumns.set(blockInput.blockRootHex, indices); - } - } catch (err) { - uncache(); - throw new DownloadByRangeError( - { - code: DownloadByRangeErrorCode.CACHING_ERROR, - peerId: prettyPrintPeerIdStr(peerIdStr), - message: (err as Error).message, - }, - "Error caching ByRange fetched column" - ); - } - } - - return { - blockInputs: Array.from(blockInputs.values()), - numberOfBlocks, - numberOfBlobs, - numberOfColumns, - }; -} - export async function downloadByRange({ config, network, From 43ec65f84eae4fc5f1405bea429cfa521c93a312 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Thu, 21 Aug 2025 06:35:46 +0700 Subject: [PATCH 049/173] fix: add caching to downloadByRoot and pass in executionEngine --- packages/beacon-node/src/sync/unknownBlock.ts | 15 +- .../src/sync/utils/downloadByRoot.ts | 354 +++++++----------- 2 files changed, 151 insertions(+), 218 deletions(-) diff --git a/packages/beacon-node/src/sync/unknownBlock.ts b/packages/beacon-node/src/sync/unknownBlock.ts index 1f64a857e4c3..4e0cf89bb074 100644 --- a/packages/beacon-node/src/sync/unknownBlock.ts +++ b/packages/beacon-node/src/sync/unknownBlock.ts @@ -506,14 +506,16 @@ export class BlockInputSync { const {peerId, client: peerClient} = peer; excludedPeers.add(peerId); + cacheItem.peerIdStrings.add(peerId); + try { cacheItem = await downloadByRoot({ - pending: cacheItem, - peerIdStr: peer.peerId, config: this.config, network: this.network, - cache: this.chain.seenBlockInputCache, + seenCache: this.chain.seenGossipBlockInput, executionEngine: this.chain.executionEngine, + peerIdStr: peerId, + cacheItem, }); } catch (e) { this.logger.debug( @@ -525,10 +527,9 @@ export class BlockInputSync { this.peerBalancer.onRequestCompleted(peerId); } - if (isPendingBlockInput(cacheItem) && cacheItem.blockInput.hasBlockAndAllData()) { - cacheItem.status = PendingBlockInputStatus.downloaded; - cacheItem.peerIdStrings.add(peerId); - cacheItem.timeSyncedSec = Date.now() / 1000; + this.pendingBlocks.set(getBlockInputSyncCacheItemRootHex(cacheItem), cacheItem); + + if (cacheItem.status === PendingBlockInputStatus.downloaded) { return cacheItem; } } diff --git a/packages/beacon-node/src/sync/utils/downloadByRoot.ts b/packages/beacon-node/src/sync/utils/downloadByRoot.ts index efa00f64d182..c4f6caac62d0 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRoot.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRoot.ts @@ -3,18 +3,26 @@ import {ForkPostDeneb, NUMBER_OF_COLUMNS, isForkPostDeneb, isForkPostFulu} from import {BlobIndex, ColumnIndex, RootHex, SignedBeaconBlock, deneb, fulu, phase0} from "@lodestar/types"; import {LodestarError, fromHex, prettyBytes, toRootHex} from "@lodestar/utils"; import {isBlockInputBlobs, isBlockInputColumns} from "../../chain/blocks/blockInput/blockInput.js"; -import {IBlockInput} from "../../chain/blocks/blockInput/types.js"; +import {BlockInputSource, IBlockInput} from "../../chain/blocks/blockInput/types.js"; +import {SeenBlockInput} from "../../chain/seenCache/seenGossipBlockInput.js"; import {validateBlobSidecarInclusionProof, validateBlobsAndBlobProofs} from "../../chain/validation/blobSidecar.js"; import { verifyDataColumnSidecarInclusionProof, verifyDataColumnSidecarKzgProofs, } from "../../chain/validation/dataColumnSidecar.js"; +import {IExecutionEngine} from "../../execution/index.js"; import {INetwork} from "../../network/interface.js"; import {prettyPrintPeerIdStr} from "../../network/util.js"; import {byteArrayEquals} from "../../util/bytes.js"; import {PeerIdStr} from "../../util/peerId.js"; import {BlobSidecarsByRootRequest} from "../../util/types.js"; -import {BlockInputSyncCacheItem, getBlockInputSyncCacheItemRootHex, isPendingBlockInput} from "../types.js"; +import { + BlockInputSyncCacheItem, + PendingBlockInput, + PendingBlockInputStatus, + getBlockInputSyncCacheItemRootHex, + isPendingBlockInput, +} from "../types.js"; export type FetchByRootCoreProps = { config: ChainForkConfig; @@ -23,29 +31,139 @@ export type FetchByRootCoreProps = { }; export type FetchByRootProps = FetchByRootCoreProps & { cacheItem: BlockInputSyncCacheItem; + executionEngine: IExecutionEngine; + blockRoot: Uint8Array; }; export type FetchByRootAndValidateBlockProps = FetchByRootCoreProps & {blockRoot: Uint8Array}; -export type FetchByRootAndValidateBlobsProps = FetchByRootAndValidateBlockProps & {blobIndices: BlobIndex[]}; -export type FetchByRootAndValidateColumnsProps = FetchByRootAndValidateBlockProps & {columnIndices: ColumnIndex[]}; +export type FetchByRootAndValidateBlobsProps = FetchByRootAndValidateBlockProps & { + executionEngine: IExecutionEngine; + blobIndices: BlobIndex[]; +}; +export type FetchByRootAndValidateColumnsProps = FetchByRootAndValidateBlockProps & { + executionEngine: IExecutionEngine; + columnIndices: ColumnIndex[]; +}; export type FetchByRootResponses = { block: SignedBeaconBlock; blobSidecars?: deneb.BlobSidecars; columnSidecars?: fulu.DataColumnSidecars; }; +export type DownloadByRootProps = FetchByRootCoreProps & { + cacheItem: BlockInputSyncCacheItem; + seenCache: SeenBlockInput; + executionEngine: IExecutionEngine; +}; +export async function downloadByRoot({ + config, + seenCache, + network, + executionEngine, + peerIdStr, + cacheItem, +}: DownloadByRootProps): Promise { + const rootHex = getBlockInputSyncCacheItemRootHex(cacheItem); + const blockRoot = fromHex(rootHex); + + const {block, blobSidecars, columnSidecars} = await fetchByRoot({ + config, + network, + executionEngine, + cacheItem, + blockRoot, + peerIdStr, + }); + + let blockInput: IBlockInput; + if (isPendingBlockInput(cacheItem)) { + blockInput = cacheItem.blockInput; + if (!blockInput.hasBlock()) { + blockInput.addBlock({ + block, + blockRootHex: rootHex, + source: BlockInputSource.byRoot, + seenTimestampSec: Date.now(), + peerIdStr, + }); + } + } else { + blockInput = seenCache.getByBlock({ + block, + peerIdStr, + blockRootHex: rootHex, + seenTimestampSec: Date.now(), + source: BlockInputSource.byRoot, + }); + } + + if (isBlockInputBlobs(blockInput)) { + if (!blobSidecars) { + throw new DownloadByRootError({ + code: DownloadByRootErrorCode.MISSING_BLOB_RESPONSE, + blockRoot: prettyBytes(rootHex), + peer: peerIdStr, + }); + } + for (const blobSidecar of blobSidecars) { + blockInput.addBlob({ + blobSidecar, + blockRootHex: rootHex, + seenTimestampSec: Date.now(), + source: BlockInputSource.byRoot, + peerIdStr, + }); + } + } + + if (isBlockInputColumns(blockInput)) { + if (!columnSidecars) { + throw new DownloadByRootError({ + code: DownloadByRootErrorCode.MISSING_COLUMN_RESPONSE, + blockRoot: prettyBytes(rootHex), + peer: peerIdStr, + }); + } + for (const columnSidecar of columnSidecars) { + blockInput.addColumn({ + columnSidecar, + blockRootHex: rootHex, + seenTimestampSec: Date.now(), + source: BlockInputSource.byRoot, + peerIdStr, + }); + } + } + + let status: PendingBlockInputStatus; + let timeSyncedSec: number | undefined; + if (blockInput.hasBlockAndAllData()) { + status = PendingBlockInputStatus.downloaded; + timeSyncedSec = Date.now() / 1000; + } else { + status = PendingBlockInputStatus.pending; + } + + return { + status, + blockInput, + timeSyncedSec, + timeAddedSec: cacheItem.timeAddedSec, + peerIdStrings: cacheItem.peerIdStrings, + }; +} + export async function fetchByRoot({ config, network, + executionEngine, peerIdStr, + blockRoot, cacheItem, }: FetchByRootProps): Promise { let block: SignedBeaconBlock; let blobSidecars: deneb.BlobSidecars | undefined; let columnSidecars: fulu.DataColumnSidecars | undefined; - const rootHex = getBlockInputSyncCacheItemRootHex(cacheItem); - const blockRoot = fromHex(rootHex); - if (isPendingBlockInput(cacheItem)) { if (cacheItem.blockInput.hasBlock()) { block = cacheItem.blockInput.getBlock(); @@ -63,6 +181,7 @@ export async function fetchByRoot({ blobSidecars = await fetchAndValidateBlobs({ config, network, + executionEngine, peerIdStr, blockRoot, blobIndices: cacheItem.blockInput.getMissingBlobMeta().map((b) => b.index), @@ -72,6 +191,7 @@ export async function fetchByRoot({ columnSidecars = await fetchAndValidateColumns({ config, network, + executionEngine, peerIdStr, blockRoot, columnIndices: cacheItem.blockInput.getMissingSampledColumnMeta().map((c) => c.index), @@ -90,6 +210,7 @@ export async function fetchByRoot({ columnSidecars = await fetchAndValidateColumns({ config, network, + executionEngine, peerIdStr, blockRoot, columnIndices: network.custodyConfig.sampledColumns, @@ -99,6 +220,7 @@ export async function fetchByRoot({ blobSidecars = await fetchAndValidateBlobs({ config, network, + executionEngine, peerIdStr, blockRoot, blobIndices: Array.from({length: blobCount}, (_, i) => i), @@ -146,6 +268,7 @@ export async function fetchAndValidateBlock({ export async function fetchAndValidateBlobs({ config, network, + // executionEngine, peerIdStr, blockRoot, blobIndices, @@ -210,6 +333,7 @@ export async function fetchAndValidateBlobs({ export async function fetchAndValidateColumns({ config, network, + // executionEngine, peerIdStr, blockRoot, columnIndices, @@ -273,216 +397,14 @@ export async function fetchAndValidateColumns({ return columnSidecars; } -// export function compareIndices( -// expected: number[], -// received: number[] -// ): { -// missingIndices: number; -// extraIndices: number; -// } { -// const missingIndices: number[] = []; -// const extraIndices: number[] = []; - -// for (const index of received) { -// if (!expected.includes(index)) { -// extraIndices.push(index); -// } -// } -// for (const index of expected) { -// if (!received.includes(index)) { -// missingIndices.push(index); -// } -// } - -// return { -// missingIndices, -// extraIndices, -// }; -// } - -// export async function validateColumnSidecars( -// config: ChainForkConfig, -// rootHex: RootHex, -// requestedIndices: ColumnIndex[], -// columnSidecars: fulu.DataColumnSidecars -// ): void { -// for (const columnSidecar of columnSidecars) { -// if (!requestedIndices.includes(columnSidecar.index)) { -// throw new DownloadByRootError(); -// } - -// const headerRoot = config -// .getForkTypes(columnSidecar.signedBlockHeader.message.slot) -// .BeaconBlockHeader.hashTreeRoot(columnSidecar.signedBlockHeader.message); -// if (rootHex !== toRootHex(headerRoot)) { -// throw new DownloadByRootError(); -// } - -// if (!verifyDataColumnSidecarInclusionProof(columnSidecar)) { -// throw new DownloadByRootError(); -// } -// } - -// try { -// // TODO(fulu): need to double check that the construction of these arrays is correct -// await verifyDataColumnSidecarKzgProofs( -// columnSidecars.flatMap((c) => c.kzgCommitments), -// columnSidecars.flatMap((c) => Array.from({length: c.column.length}, () => c.index)), -// columnSidecars.flatMap((c) => c.column), -// columnSidecars.flatMap((c) => c.kzgProofs) -// ); -// } catch { -// throw new DownloadByRootError(); -// } -// } -// export async function validateBlobSidecars( -// config: ChainForkConfig, -// rootHex: RootHex, -// requestedIndices: ColumnIndex[], -// blobSidecars: fulu.DataColumnSidecars -// ): void { -// for (const blobSidecar of blobSidecars) { -// if (!requestedIndices.includes(blobSidecar.index)) { -// throw new DownloadByRootError(); -// } -// const headerRoot = config -// .getForkTypes(blobSidecar.signedBlockHeader.message.slot) -// .BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message); -// if (rootHex !== toRootHex(headerRoot)) { -// throw new DownloadByRootError(); -// } - -// if (!validateBlobSidecarInclusionProof(blobSidecar)) { -// throw new DownloadByRootError(); -// } -// } - -// try { -// await validateBlobsAndBlobProofs( -// blobSidecars.map((b) => b.kzgCommitment), -// blobSidecars.map((b) => b.blob), -// blobSidecars.map((b) => b.kzgProof) -// ); -// } catch { -// throw new DownloadByRootError(); -// } -// } - -// export async function fetchByRoot({ -// config, -// peerIdStr, -// network, -// blockRoot, -// block, -// blobIndices, -// columnIndices, -// }: FetchByRootProps): DownloadByRootResponses { -// let blobSidecars: deneb.BlobSidecars | undefined; -// let columnSidecars: fulu.DataColumnSidecars | undefined; - -// if (!block) { -// block = await network.sendBeaconBlocksByRoot(peerIdStr, [blockRoot]); -// } - -// const forkName = config.getForkName(block.message.slot); -// if (isForkPostFulu(forkName)) { -// if (!columnIndices) { -// throw new DownloadByRootError({ -// code: DownloadByRootErrorCode.MISSING_COLUMN_INDICES, -// blockRoot: prettyBytes(toRootHex(blockRoot)), -// }); -// } -// columnSidecars = await network.sendDataColumnSidecarsByRoot(peerIdStr, [{blockRoot, columns: columnIndices}]); -// } else if (isForkPostDeneb(forkName)) { -// if (!blobIndices) { -// const blobCount = (block as SignedBeaconBlock).message.body.blobKzgCommitments?.length; -// blobIndices = Array.from({length: blobCount}, (_, i) => i); -// } -// const blobsRequest = blobIndices.map((index) => ({blockRoot, index})); -// blobSidecars = await network.sendBlobSidecarsByRoot(peerIdStr, blobsRequest); -// } - -// return { -// block, -// blobSidecars, -// columnSidecars, -// }; -// } - -// export type ValidateByRootResponses = DownloadByRootResponses & {cacheItem: BlockInputSyncCacheItem}; -// export function validateByRootResponses({ -// cacheItem, -// block, -// blobSidecars, -// columnSidecars, -// }: ValidateByRootResponses): void { -// const blockRoot = this.config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message); -// const blockRootHex = toRootHex(blockRoot); - -// const rootHex = getBlockInputSyncCacheItemRootHex(cacheItem); -// if (rootHex !== blockRootHex) { -// } -// } - -// export type ValidateByRootResponses = DownloadByRootRequests & DownloadByRootResponses & {config: ChainForkConfig}; -// export function validateByRootResponses({ -// config, -// blocksRequest: blockRequest, -// blocks: block, -// blobsRequest, -// blobSidecars, -// columnsRequest, -// columnSidecars, -// }: ValidateByRootResponses): string { -// let blockRootHex: string | undefined; -// if (blockRequest) { -// if (!block) { -// throw new DownloadByRootError({ -// code: DownloadByRootErrorCode.MISSING_BLOCK_RESPONSE, -// }); -// } -// const blockRoot = config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message); -// blockRootHex = toRootHex(blockRoot); -// } -// if (blobsRequest) { -// if (!blobSidecars) { -// throw new DownloadByRootError({ -// code: DownloadByRootErrorCode.MISSING_BLOBS_RESPONSE, -// }); -// } -// for (const blobSidecar of blobSidecars) { -// const blockRoot = config -// .getForkTypes(blobSidecar.signedBlockHeader.message.slot) -// .BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message); -// const rootHex = toRootHex(blockRoot); -// if (!blockRootHex) { -// blockRootHex = rootHex; -// } else if (blockRootHex !== rootHex) { -// } -// } -// if (blockRootHex) { -// } -// } -// if (columnsRequest) { -// if (!columnSidecars) { -// throw new DownloadByRootError({ -// code: DownloadByRootErrorCode.MISSING_BLOBS_RESPONSE, -// }); -// } - -// const blockRoot = config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message); -// blockRootHex = toRootHex(blockRoot); -// } - -// return blockRootHex; -// } - export enum DownloadByRootErrorCode { MISMATCH_BLOCK_ROOT = "DOWNLOAD_BY_ROOT_ERROR_MISMATCH_BLOCK_ROOT", EXTRA_SIDECAR_RECEIVED = "DOWNLOAD_BY_ROOT_ERROR_EXTRA_SIDECAR_RECEIVED", INVALID_INCLUSION_PROOF = "DOWNLOAD_BY_ROOT_ERROR_INVALID_INCLUSION_PROOF", INVALID_KZG_PROOF = "DOWNLOAD_BY_ROOT_ERROR_INVALID_KZG_PROOF", MISSING_BLOCK_RESPONSE = "DOWNLOAD_BY_ROOT_ERROR_MISSING_BLOCK_RESPONSE", + MISSING_BLOB_RESPONSE = "DOWNLOAD_BY_ROOT_ERROR_MISSING_BLOB_RESPONSE", + MISSING_COLUMN_RESPONSE = "DOWNLOAD_BY_ROOT_ERROR_MISSING_COLUMN_RESPONSE", Z = "DOWNLOAD_BY_ROOT_ERROR_Z", } export type DownloadByRootErrorType = @@ -513,6 +435,16 @@ export type DownloadByRootErrorType = code: DownloadByRootErrorCode.MISSING_BLOCK_RESPONSE; peer: string; blockRoot: string; + } + | { + code: DownloadByRootErrorCode.MISSING_BLOB_RESPONSE; + peer: string; + blockRoot: string; + } + | { + code: DownloadByRootErrorCode.MISSING_COLUMN_RESPONSE; + peer: string; + blockRoot: string; }; export class DownloadByRootError extends LodestarError {} From dd819bca842863bb3c7a82f90da6302fafe81876 Mon Sep 17 00:00:00 2001 From: Cayman Date: Thu, 21 Aug 2025 12:05:46 -0400 Subject: [PATCH 050/173] chore: fix some of check-types --- .../src/api/impl/beacon/blocks/index.ts | 1 - .../src/chain/blocks/verifyBlock.ts | 2 +- .../blocks/verifyBlocksDataAvailability.ts | 2 +- .../blocks/verifyBlocksStateTransitionOnly.ts | 2 +- .../src/chain/blocks/writeBlockInputToDb.ts | 4 - packages/beacon-node/src/chain/chain.ts | 2 +- .../chain/seenCache/seenGossipBlockInput.ts | 2 +- packages/beacon-node/src/network/events.ts | 3 +- .../src/network/processor/gossipHandlers.ts | 4 +- .../onWorker/dataSerialization.test.ts | 32 -------- .../perf/chain/verifyImportBlocks.test.ts | 21 +++++- .../test/spec/presets/fork_choice.test.ts | 74 +++++++++++++------ .../test/unit/chain/blocks/blockInput.test.ts | 6 +- .../blocks/verifyBlocksSanityChecks.test.ts | 20 ++++- .../chain/seenCache/seenBlockInput.test.ts | 66 +++++++++++++---- 15 files changed, 148 insertions(+), 93 deletions(-) diff --git a/packages/beacon-node/src/api/impl/beacon/blocks/index.ts b/packages/beacon-node/src/api/impl/beacon/blocks/index.ts index a017fc244cd9..13d8e478d02a 100644 --- a/packages/beacon-node/src/api/impl/beacon/blocks/index.ts +++ b/packages/beacon-node/src/api/impl/beacon/blocks/index.ts @@ -37,7 +37,6 @@ import {BlockError, BlockErrorCode, BlockGossipError} from "../../../../chain/er import {ProduceFullBellatrix, ProduceFullDeneb, ProduceFullFulu} from "../../../../chain/produceBlock/index.js"; import {validateGossipBlock} from "../../../../chain/validation/block.js"; import {OpSource} from "../../../../chain/validatorMonitor.js"; -import {NetworkEvent} from "../../../../network/index.js"; import {getBlobSidecars, kzgCommitmentToVersionedHash, reconstructBlobs} from "../../../../util/blobs.js"; import {getDataColumnSidecarsFromBlock} from "../../../../util/dataColumns.js"; import {isOptimisticBlock} from "../../../../util/forkChoice.js"; diff --git a/packages/beacon-node/src/chain/blocks/verifyBlock.ts b/packages/beacon-node/src/chain/blocks/verifyBlock.ts index b4512713560c..5d5ec5a4e0d7 100644 --- a/packages/beacon-node/src/chain/blocks/verifyBlock.ts +++ b/packages/beacon-node/src/chain/blocks/verifyBlock.ts @@ -13,7 +13,7 @@ import type {BeaconChain} from "../chain.js"; import {BlockError, BlockErrorCode} from "../errors/index.js"; import {BlockProcessOpts} from "../options.js"; import {RegenCaller} from "../regen/index.js"; -import {BlockInput, DAType, IBlockInput} from "./blockInput/index.js"; +import {DAType, IBlockInput} from "./blockInput/index.js"; import {ImportBlockOpts} from "./types.js"; import {DENEB_BLOWFISH_BANNER} from "./utils/blowfishBanner.js"; import {ELECTRA_GIRAFFE_BANNER} from "./utils/giraffeBanner.js"; diff --git a/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts b/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts index 92ebbb447ce6..004fc64036ba 100644 --- a/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts +++ b/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts @@ -1,5 +1,5 @@ import {DataAvailabilityStatus} from "@lodestar/state-transition"; -import {BlockInput, DAType, IBlockInput} from "./blockInput/index.js"; +import {DAType, IBlockInput} from "./blockInput/index.js"; // we can now wait for full 12 seconds because unavailable block sync will try pulling // the blobs from the network anyway after 500ms of seeing the block diff --git a/packages/beacon-node/src/chain/blocks/verifyBlocksStateTransitionOnly.ts b/packages/beacon-node/src/chain/blocks/verifyBlocksStateTransitionOnly.ts index 231195b20bbe..b877dfa0910b 100644 --- a/packages/beacon-node/src/chain/blocks/verifyBlocksStateTransitionOnly.ts +++ b/packages/beacon-node/src/chain/blocks/verifyBlocksStateTransitionOnly.ts @@ -12,7 +12,7 @@ import {nextEventLoop} from "../../util/eventLoop.js"; import {BlockError, BlockErrorCode} from "../errors/index.js"; import {BlockProcessOpts} from "../options.js"; import {ValidatorMonitor} from "../validatorMonitor.js"; -import {BlockInput, IBlockInput} from "./blockInput/index.js"; +import {IBlockInput} from "./blockInput/index.js"; import {ImportBlockOpts} from "./types.js"; /** diff --git a/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts b/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts index 642e29c10f0d..781d9c2619f8 100644 --- a/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts +++ b/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts @@ -1,9 +1,5 @@ -import {KeyValue} from "@lodestar/db"; -import {NUMBER_OF_COLUMNS} from "@lodestar/params"; -import {ForkName, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; import {fulu} from "@lodestar/types"; import {prettyPrintIndices, toRootHex} from "@lodestar/utils"; -import {BlobSidecarsWrapper} from "../../db/repositories/blobSidecars.js"; import {BeaconChain} from "../chain.js"; import {IBlockInput, isBlockInputBlobs, isBlockInputColumns} from "./blockInput/index.js"; diff --git a/packages/beacon-node/src/chain/chain.ts b/packages/beacon-node/src/chain/chain.ts index afbe0d8d3143..7962eaf14447 100644 --- a/packages/beacon-node/src/chain/chain.ts +++ b/packages/beacon-node/src/chain/chain.ts @@ -57,7 +57,7 @@ import {SerializedCache} from "../util/serializedCache.js"; import {ArchiveStore} from "./archiveStore/archiveStore.js"; import {CheckpointBalancesCache} from "./balancesCache.js"; import {BeaconProposerCache} from "./beaconProposerCache.js"; -import {BlockInput, IBlockInput} from "./blocks/blockInput/index.js"; +import {IBlockInput} from "./blocks/blockInput/index.js"; import {BlockProcessor, ImportBlockOpts} from "./blocks/index.js"; import {BlsMultiThreadWorkerPool, BlsSingleThreadVerifier, IBlsVerifier} from "./bls/index.js"; import {ChainEvent, ChainEventEmitter} from "./emitter.js"; diff --git a/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts b/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts index d4d7b0fff57f..7be4c5e5c578 100644 --- a/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts +++ b/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts @@ -3,7 +3,7 @@ import {CheckpointWithHex} from "@lodestar/fork-choice"; import {ForkName, ForkPostFulu, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; import {computeStartSlotAtEpoch} from "@lodestar/state-transition"; import {RootHex, SignedBeaconBlock, Slot, deneb, fulu} from "@lodestar/types"; -import {LodestarError, Logger, toRootHex} from "@lodestar/utils"; +import {LodestarError, Logger} from "@lodestar/utils"; import {Metrics} from "../../metrics/metrics.js"; import {IClock} from "../../util/clock.js"; import {CustodyConfig} from "../../util/dataColumns.js"; diff --git a/packages/beacon-node/src/network/events.ts b/packages/beacon-node/src/network/events.ts index 7e0944495f4d..8650483f2447 100644 --- a/packages/beacon-node/src/network/events.ts +++ b/packages/beacon-node/src/network/events.ts @@ -1,7 +1,6 @@ import {EventEmitter} from "node:events"; import {PeerId, TopicValidatorResult} from "@libp2p/interface"; -import {CustodyIndex, RootHex, Status} from "@lodestar/types"; -import {BlockInput} from "../chain/blocks/blockInput/index.js"; +import {CustodyIndex, Status} from "@lodestar/types"; import {PeerIdStr} from "../util/peerId.js"; import {StrictEventEmitterSingleArg} from "../util/strictEvents.js"; import {EventDirection} from "../util/workerEvents.js"; diff --git a/packages/beacon-node/src/network/processor/gossipHandlers.ts b/packages/beacon-node/src/network/processor/gossipHandlers.ts index 90c16d4aa026..cb08f00b8f68 100644 --- a/packages/beacon-node/src/network/processor/gossipHandlers.ts +++ b/packages/beacon-node/src/network/processor/gossipHandlers.ts @@ -54,7 +54,7 @@ import {OpSource} from "../../chain/validatorMonitor.js"; import {Metrics} from "../../metrics/index.js"; import {kzgCommitmentToVersionedHash} from "../../util/blobs.js"; import {INetworkCore} from "../core/index.js"; -import {NetworkEvent, NetworkEventBus} from "../events.js"; +import {NetworkEventBus} from "../events.js"; import { BatchGossipHandlers, GossipHandlerParamGeneric, @@ -111,7 +111,7 @@ export function getGossipHandlers(modules: ValidatorFnsModules, options: GossipH * We only have a choice to do batch validation for beacon_attestation topic. */ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHandlerOpts): SequentialGossipHandlers { - const {chain, config, metrics, events, logger, core} = modules; + const {chain, config, metrics, logger, core} = modules; async function validateBeaconBlock( signedBlock: SignedBeaconBlock, diff --git a/packages/beacon-node/test/e2e/network/onWorker/dataSerialization.test.ts b/packages/beacon-node/test/e2e/network/onWorker/dataSerialization.test.ts index c247aeb1da7f..ae903cfc550f 100644 --- a/packages/beacon-node/test/e2e/network/onWorker/dataSerialization.test.ts +++ b/packages/beacon-node/test/e2e/network/onWorker/dataSerialization.test.ts @@ -5,7 +5,6 @@ import {config} from "@lodestar/config/default"; import {ForkName} from "@lodestar/params"; import {ssz} from "@lodestar/types"; import {afterAll, beforeAll, describe, expect, it} from "vitest"; -import {BlockInput, BlockInputType, BlockSource, DataColumnsSource} from "../../../../src/chain/blocks/types.js"; import {ZERO_HASH, ZERO_HASH_HEX} from "../../../../src/constants/constants.js"; import {ReqRespBridgeEventData} from "../../../../src/network/core/events.js"; import {ReqRespBridgeEvent} from "../../../../src/network/core/events.js"; @@ -79,22 +78,6 @@ describe("data serialization through worker boundary", () => { request: {method: ReqRespMethod.Status, body: statusZero}, peer: getValidPeerId(), }, - [NetworkEvent.unknownBlockParent]: { - blockInput: { - type: BlockInputType.preData, - block: ssz.capella.SignedBeaconBlock.defaultValue(), - source: BlockSource.gossip, - }, - peer, - }, - [NetworkEvent.unknownBlock]: { - rootHex: ZERO_HASH_HEX, - peer, - }, - [NetworkEvent.unknownBlockInput]: { - blockInput: getEmptyBlockInput(), - peer, - }, [NetworkEvent.pendingGossipsubMessage]: { topic: {type: GossipType.beacon_block, boundary: {fork: ForkName.altair, epoch: config.ALTAIR_FORK_EPOCH}}, msg: { @@ -253,18 +236,3 @@ describe("data serialization through worker boundary", () => { }); type Resolves> = T extends Promise ? (U extends void ? null : U) : never; - -function getEmptyBlockInput(): BlockInput { - // cannot return BlockInputType.dataPromise because it cannot be cloned through worker boundary - return { - block: ssz.fulu.SignedBeaconBlock.defaultValue(), - source: BlockSource.gossip, - type: BlockInputType.availableData, - blockData: { - fork: ForkName.fulu, - dataColumns: ssz.fulu.DataColumnSidecars.defaultValue(), - dataColumnsBytes: [], - dataColumnsSource: DataColumnsSource.gossip, - }, - }; -} diff --git a/packages/beacon-node/test/perf/chain/verifyImportBlocks.test.ts b/packages/beacon-node/test/perf/chain/verifyImportBlocks.test.ts index 0a37f6c6bf8d..ec62707ee936 100644 --- a/packages/beacon-node/test/perf/chain/verifyImportBlocks.test.ts +++ b/packages/beacon-node/test/perf/chain/verifyImportBlocks.test.ts @@ -3,12 +3,14 @@ import {generateKeyPair} from "@libp2p/crypto/keys"; import {config} from "@lodestar/config/default"; import {LevelDbController} from "@lodestar/db"; import {SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY, SLOTS_PER_EPOCH} from "@lodestar/params"; -import {sleep} from "@lodestar/utils"; +import {sleep, toHex} from "@lodestar/utils"; import {defaultOptions as defaultValidatorOptions} from "@lodestar/validator"; import {rangeSyncTest} from "../../../../state-transition/test/perf/params.js"; import {beforeValue} from "../../../../state-transition/test/utils/beforeValueBenchmark.js"; import {getNetworkCachedBlock, getNetworkCachedState} from "../../../../state-transition/test/utils/testFileCache.js"; -import {AttestationImportOpt, BlockSource, getBlockInput} from "../../../src/chain/blocks/types.js"; +import {BlockInputPreData} from "../../../src/chain/blocks/blockInput/blockInput.js"; +import {BlockInputSource} from "../../../src/chain/blocks/blockInput/types.js"; +import {AttestationImportOpt} from "../../../src/chain/blocks/types.js"; import {BeaconChain} from "../../../src/chain/index.js"; import {Eth1ForBlockProductionDisabled} from "../../../src/eth1/index.js"; import {ExecutionEngineDisabled} from "../../../src/execution/engine/index.js"; @@ -110,7 +112,20 @@ describe.skip("verify+import blocks - range sync perf test", () => { return chain; }, fn: async (chain) => { - const blocksImport = blocks.value.map((block) => getBlockInput.preData(chain.config, block, BlockSource.byRange)); + const blocksImport = blocks.value.map((block) => { + const blockRootHex = toHex( + chain.config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message) + ); + const forkName = chain.config.getForkName(block.message.slot); + return BlockInputPreData.createFromBlock({ + block, + blockRootHex, + forkName, + daOutOfRange: true, + source: BlockInputSource.byRange, + seenTimestampSec: Math.floor(Date.now() / 1000), + }); + }); await chain.processChainSegment(blocksImport, { // Only skip importing attestations for finalized sync. For head sync attestation are valuable. diff --git a/packages/beacon-node/test/spec/presets/fork_choice.test.ts b/packages/beacon-node/test/spec/presets/fork_choice.test.ts index 3c4b5249d64f..21293221ed06 100644 --- a/packages/beacon-node/test/spec/presets/fork_choice.test.ts +++ b/packages/beacon-node/test/spec/presets/fork_choice.test.ts @@ -3,7 +3,7 @@ import {toHexString} from "@chainsafe/ssz"; import {generateKeyPair} from "@libp2p/crypto/keys"; import {createBeaconConfig} from "@lodestar/config"; import {CheckpointWithHex, ForkChoice} from "@lodestar/fork-choice"; -import {ACTIVE_PRESET, ForkName, ForkSeq} from "@lodestar/params"; +import {ACTIVE_PRESET, ForkPostDeneb, ForkPostFulu, ForkSeq} from "@lodestar/params"; import {InputType} from "@lodestar/spec-test-util"; import {BeaconStateAllForks, isExecutionStateType, signedBlockToSignedHeader} from "@lodestar/state-transition"; import { @@ -18,17 +18,15 @@ import { ssz, sszTypesFor, } from "@lodestar/types"; -import {bnToNum, fromHex} from "@lodestar/utils"; +import {bnToNum, fromHex, toHex} from "@lodestar/utils"; import {expect} from "vitest"; import { - AttestationImportOpt, - BlobSidecarValidation, - BlobsSource, - BlockInputDataColumns, - BlockSource, - DataColumnsSource, - getBlockInput, -} from "../../../src/chain/blocks/types.js"; + BlockInputBlobs, + BlockInputColumns, + BlockInputPreData, + BlockInputSource, +} from "../../../src/chain/blocks/blockInput/index.js"; +import {AttestationImportOpt, BlobSidecarValidation} from "../../../src/chain/blocks/types.js"; import {BeaconChain, ChainEvent} from "../../../src/chain/index.js"; import {defaultChainOptions} from "../../../src/chain/options.js"; import { @@ -205,6 +203,7 @@ const forkChoiceTest = const blockRoot = config .getForkTypes(signedBlock.message.slot) .BeaconBlock.hashTreeRoot(signedBlock.message); + const blockRootHex = toHex(blockRoot); logger.debug(`Step ${i}/${stepsLen} block`, { slot, id: step.block, @@ -233,14 +232,25 @@ const forkChoiceTest = ); } - const blockData = { - fork, - dataColumns: columns, - dataColumnsBytes: columns.map(() => null), - dataColumnsSource: DataColumnsSource.gossip, - } as BlockInputDataColumns; - - blockImport = getBlockInput.availableData(config, signedBlock, BlockSource.gossip, blockData); + blockImport = BlockInputColumns.createFromBlock({ + forkName: fork, + block: signedBlock as SignedBeaconBlock, + blockRootHex, + custodyColumns: columns.map((c) => c.index), + sampledColumns: columns.map((c) => c.index), + source: BlockInputSource.gossip, + seenTimestampSec: 0, + daOutOfRange: false, + }); + for (const column of columns) { + blockImport.addColumn({ + blockRootHex, + columnSidecar: column, + source: BlockInputSource.gossip, + seenTimestampSec: 0, + }); + } + // getBlockInput.availableData(config, signedBlock, BlockSource.gossip, blockData); } else if (forkSeq >= ForkSeq.deneb && forkSeq < ForkSeq.fulu) { if (blobs === undefined) { // seems like some deneb tests don't have this and we are supposed to assume empty @@ -270,13 +280,31 @@ const forkChoiceTest = }; }); - blockImport = getBlockInput.availableData(config, signedBlock, BlockSource.gossip, { - fork: ForkName.deneb, - blobs: blobSidecars, - blobsSource: BlobsSource.gossip, + blockImport = BlockInputBlobs.createFromBlock({ + forkName: fork, + block: signedBlock as SignedBeaconBlock, + blockRootHex, + source: BlockInputSource.gossip, + seenTimestampSec: 0, + daOutOfRange: false, }); + for (const blob of blobSidecars) { + blockImport.addBlob({ + blockRootHex, + blobSidecar: blob, + source: BlockInputSource.gossip, + seenTimestampSec: 0, + }); + } } else { - blockImport = getBlockInput.preData(config, signedBlock, BlockSource.gossip); + blockImport = BlockInputPreData.createFromBlock({ + forkName: fork, + block: signedBlock as SignedBeaconBlock, + blockRootHex, + source: BlockInputSource.gossip, + seenTimestampSec: 0, + daOutOfRange: false, + }); } await chain.processBlock(blockImport, { diff --git a/packages/beacon-node/test/unit/chain/blocks/blockInput.test.ts b/packages/beacon-node/test/unit/chain/blocks/blockInput.test.ts index d4b44ee9d127..f3dc93538dba 100644 --- a/packages/beacon-node/test/unit/chain/blocks/blockInput.test.ts +++ b/packages/beacon-node/test/unit/chain/blocks/blockInput.test.ts @@ -135,10 +135,8 @@ describe("BlockInput", () => { blockRootHex: rootHex, daOutOfRange: false, forkName: ForkName.deneb, - source: { - source: BlockInputSource.gossip, - seenTimestampSec: Date.now(), - }, + source: BlockInputSource.gossip, + seenTimestampSec: Date.now(), } as AddBlock & CreateBlockInputMeta); for (const blobSidecar of blobSidecars) { testArray.push({ diff --git a/packages/beacon-node/test/unit/chain/blocks/verifyBlocksSanityChecks.test.ts b/packages/beacon-node/test/unit/chain/blocks/verifyBlocksSanityChecks.test.ts index abd81e7b3e29..02a519ce1298 100644 --- a/packages/beacon-node/test/unit/chain/blocks/verifyBlocksSanityChecks.test.ts +++ b/packages/beacon-node/test/unit/chain/blocks/verifyBlocksSanityChecks.test.ts @@ -5,7 +5,8 @@ import {SignedBeaconBlock, Slot, ssz} from "@lodestar/types"; import {toHex} from "@lodestar/utils"; import {toRootHex} from "@lodestar/utils"; import {beforeEach, describe, expect, it} from "vitest"; -import {BlockSource, getBlockInput} from "../../../../src/chain/blocks/types.js"; +import {BlockInputPreData} from "../../../../src/chain/blocks/blockInput/blockInput.js"; +import {BlockInputSource} from "../../../../src/chain/blocks/blockInput/index.js"; import {verifyBlocksSanityChecks as verifyBlocksImportSanityChecks} from "../../../../src/chain/blocks/verifyBlocksSanityChecks.js"; import {BlockErrorCode} from "../../../../src/chain/errors/index.js"; import {IChainOptions} from "../../../../src/chain/options.js"; @@ -131,11 +132,24 @@ function verifyBlocksSanityChecks( ): {relevantBlocks: SignedBeaconBlock[]; parentSlots: Slot[]; parentBlock: ProtoBlock | null} { const {relevantBlocks, parentSlots, parentBlock} = verifyBlocksImportSanityChecks( modules, - blocks.map((block) => getBlockInput.preData(config, block, BlockSource.byRange)), + blocks.map((block) => { + const blockRootHex = toHex( + modules.config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message) + ); + const forkName = modules.config.getForkName(block.message.slot); + return BlockInputPreData.createFromBlock({ + block, + blockRootHex, + forkName, + daOutOfRange: true, + source: BlockInputSource.byRange, + seenTimestampSec: Math.floor(Date.now() / 1000), + }); + }), opts ); return { - relevantBlocks: relevantBlocks.map(({block}) => block), + relevantBlocks: relevantBlocks.map((blockInput) => blockInput.getBlock()), parentSlots, parentBlock, }; diff --git a/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts b/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts index 326ea0120a1b..8d0464be8946 100644 --- a/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts +++ b/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts @@ -1,6 +1,6 @@ import {generateKeyPair} from "@libp2p/crypto/keys"; import {createChainForkConfig, defaultChainConfig} from "@lodestar/config"; -import {ForkName, ForkPostCapella, ForkPostDeneb} from "@lodestar/params"; +import {ForkName, ForkPostCapella, ForkPostDeneb, ForkPostFulu} from "@lodestar/params"; import {computeStartSlotAtEpoch, signedBlockToSignedHeader} from "@lodestar/state-transition"; import {SignedBeaconBlock, deneb, ssz} from "@lodestar/types"; import {toRootHex} from "@lodestar/utils"; @@ -13,14 +13,14 @@ import { isBlockInputPreDeneb, } from "../../../../src/chain/blocks/blockInput/index.js"; import {ChainEvent, ChainEventEmitter} from "../../../../src/chain/emitter.js"; -import {SeenBlockInputCache} from "../../../../src/chain/seenCache/seenBlockInput.js"; +import {SeenBlockInput} from "../../../../src/chain/seenCache/seenGossipBlockInput.js"; import {computeNodeIdFromPrivateKey} from "../../../../src/network/subnets/index.js"; import {Clock} from "../../../../src/util/clock.js"; import {CustodyConfig} from "../../../../src/util/dataColumns.js"; import {testLogger} from "../../../utils/logger.js"; describe("SeenBlockInputCache", async () => { - let cache: SeenBlockInputCache; + let cache: SeenBlockInput; let abortController: AbortController; let chainEvents: ChainEventEmitter; @@ -118,7 +118,7 @@ describe("SeenBlockInputCache", async () => { abortController = new AbortController(); const signal = abortController.signal; const genesisTime = Math.floor(Date.now() / 1000); - cache = new SeenBlockInputCache({ + cache = new SeenBlockInput({ config, custodyConfig, clock: new Clock({config, genesisTime, signal}), @@ -133,6 +133,7 @@ describe("SeenBlockInputCache", async () => { const {block, rootHex} = buildBlockTestSet(ForkName.capella); cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -142,6 +143,7 @@ describe("SeenBlockInputCache", async () => { const {block, blockRoot, rootHex} = buildBlockTestSet(ForkName.capella); cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -157,6 +159,7 @@ describe("SeenBlockInputCache", async () => { const {block, rootHex} = buildBlockTestSet(ForkName.capella); const blockInput = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -166,6 +169,7 @@ describe("SeenBlockInputCache", async () => { const {block, blockRoot, rootHex} = buildBlockTestSet(ForkName.capella); const blockInput = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -181,6 +185,7 @@ describe("SeenBlockInputCache", async () => { const {block, rootHex} = buildBlockTestSet(ForkName.capella); const blockInput = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -192,6 +197,7 @@ describe("SeenBlockInputCache", async () => { const {block, blockRoot, rootHex} = buildBlockTestSet(ForkName.capella); const blockInput = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -208,6 +214,7 @@ describe("SeenBlockInputCache", async () => { const {block, rootHex} = buildBlockTestSet(ForkName.capella); const blockInput = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -215,11 +222,13 @@ describe("SeenBlockInputCache", async () => { cache.prune(rootHex); expect(cache.get(rootHex)).toBeUndefined(); }); + it("should remove all ancestors of a BlockInput", () => { const {parentBlock, parentRootHex, childBlock, childRootHex} = buildParentAndChildBlockTestSet(ForkName.capella); const parentBlockInput = cache.getByBlock({ block: parentBlock, + blockRootHex: parentRootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -227,6 +236,7 @@ describe("SeenBlockInputCache", async () => { const childBlockInput = cache.getByBlock({ block: childBlock, + blockRootHex: childRootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -256,6 +266,7 @@ describe("SeenBlockInputCache", async () => { parentBlockInput = cache.getByBlock({ block: parentBlock, + blockRootHex: parentRootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -263,6 +274,7 @@ describe("SeenBlockInputCache", async () => { childBlockInput = cache.getByBlock({ block: childBlock, + blockRootHex: childRootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -293,6 +305,7 @@ describe("SeenBlockInputCache", async () => { expect(cache.get(rootHex)).toBeUndefined(); const blockInput = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -300,27 +313,30 @@ describe("SeenBlockInputCache", async () => { }); describe("should return the correct type of BlockInput for a given block root", () => { it("should return a BlockInputPreDeneb", () => { - const {block} = buildBlockTestSet(ForkName.capella); + const {block, rootHex} = buildBlockTestSet(ForkName.capella); const blockInput = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(isBlockInputPreDeneb(blockInput)).toBeTruthy(); }); it("should return a BlockInputBlobs", () => { - const {block} = buildBlockTestSet(ForkName.deneb); + const {block, rootHex} = buildBlockTestSet(ForkName.deneb); const blockInput = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(isBlockInputBlobs(blockInput)).toBeTruthy(); }); it("should return a BlockInputColumns", () => { - const {block} = buildBlockTestSet(ForkName.fulu); + const {block, rootHex} = buildBlockTestSet(ForkName.fulu); const blockInput = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -331,12 +347,14 @@ describe("SeenBlockInputCache", async () => { const {block, rootHex} = buildBlockTestSet(ForkName.capella); const blockInput1 = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(cache.get(rootHex)).toBe(blockInput1); const blockInput2 = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -346,34 +364,39 @@ describe("SeenBlockInputCache", async () => { const {block, rootHex} = buildBlockTestSet(ForkName.capella); const blockInput = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(() => blockInput.addBlock({ - block, + block: block as SignedBeaconBlock, blockRootHex: rootHex, - source: {source: BlockInputSource.gossip, seenTimestampSec: Date.now()}, + source: BlockInputSource.gossip, + seenTimestampSec: Date.now(), }) ).toThrow(); expect(() => cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }) ).not.toThrow(); }); it("should return the correct BlockInput for a BlockInput created by blob", () => { - const {block, blobSidecar} = buildBlockAndBlobTestSet(ForkName.deneb); + const {block, blobSidecar, rootHex} = buildBlockAndBlobTestSet(ForkName.deneb); const blockInput1 = cache.getByBlob({ blobSidecar, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); const blockInput2 = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -401,6 +424,7 @@ describe("SeenBlockInputCache", async () => { expect(cache.get(rootHex)).toBeUndefined(); const blockInput = cache.getByBlob({ blobSidecar, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -411,42 +435,52 @@ describe("SeenBlockInputCache", async () => { const blockInput1 = cache.getByBlob({ blobSidecar, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(cache.get(rootHex)).toBe(blockInput1); const blockInput2 = cache.getByBlob({ blobSidecar, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(blockInput1).toBe(blockInput2); }); it("should throw if attempting to add a blob to wrong type of BlockInput", () => { - const {block} = buildBlockTestSet(ForkName.capella); + const {block, rootHex} = buildBlockTestSet(ForkName.capella); const blockInput = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(isBlockInputPreDeneb(blockInput)).toBeTruthy(); - const {blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra); + const {blobSidecar, rootHex: rootHex2} = buildBlockAndBlobTestSet(ForkName.electra); blobSidecar.signedBlockHeader = signedBlockToSignedHeader(config, block); expect(() => - cache.getByBlob({blobSidecar, source: BlockInputSource.gossip, seenTimestampSec: Date.now()}) + cache.getByBlob({ + blobSidecar, + blockRootHex: rootHex2, + source: BlockInputSource.gossip, + seenTimestampSec: Date.now(), + }) ).toThrow(); }); it("should add blob to an existing BlockInput", () => { - const {block, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra); + const {block, blobSidecar, rootHex} = buildBlockAndBlobTestSet(ForkName.electra); const blockInput1 = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); const blockInput2 = cache.getByBlob({ blobSidecar, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -460,6 +494,7 @@ describe("SeenBlockInputCache", async () => { expect(cache.get(rootHex)).toBeUndefined(); const blockInput = cache.getByBlob({ blobSidecar, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -475,6 +510,7 @@ describe("SeenBlockInputCache", async () => { expect(() => cache.getByBlob({ blobSidecar, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }) @@ -487,6 +523,7 @@ describe("SeenBlockInputCache", async () => { const blockInput = cache.getByBlob( { blobSidecar, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }, @@ -497,6 +534,7 @@ describe("SeenBlockInputCache", async () => { cache.getByBlob( { blobSidecar, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }, From 8a48222b6768e21c3e542ca8d6e64630987a143d Mon Sep 17 00:00:00 2001 From: Cayman Date: Thu, 21 Aug 2025 12:09:13 -0400 Subject: [PATCH 051/173] chore: delete test --- .../seenCache/seenGossipBlockInput.test.ts | 207 ------------------ 1 file changed, 207 deletions(-) delete mode 100644 packages/beacon-node/test/unit/chain/seenCache/seenGossipBlockInput.test.ts diff --git a/packages/beacon-node/test/unit/chain/seenCache/seenGossipBlockInput.test.ts b/packages/beacon-node/test/unit/chain/seenCache/seenGossipBlockInput.test.ts deleted file mode 100644 index 36ca1bdc9f92..000000000000 --- a/packages/beacon-node/test/unit/chain/seenCache/seenGossipBlockInput.test.ts +++ /dev/null @@ -1,207 +0,0 @@ -import {createBeaconConfig, createChainForkConfig, defaultChainConfig} from "@lodestar/config"; -import {ssz} from "@lodestar/types"; -import {describe, expect, it} from "vitest"; - -import {ZERO_HASH_HEX} from "@lodestar/params"; -import {BlockInput, BlockInputType, GossipedInputType} from "../../../../src/chain/blocks/types.js"; -import {ChainEventEmitter} from "../../../../src/chain/emitter.js"; -import { - BlockInputMetaPendingBlockWithBlobs, - SeenGossipBlockInput, -} from "../../../../src/chain/seenCache/seenGossipBlockInput.js"; -import {getExecutionEngineFromBackend} from "../../../../src/execution/engine/index.js"; -import {ExecutionEngineMockBackend} from "../../../../src/execution/engine/mock.js"; -import {computeNodeId} from "../../../../src/network/subnets/index.js"; -import {IClock} from "../../../../src/util/clock.js"; -import {CustodyConfig} from "../../../../src/util/dataColumns.js"; -import {testLogger} from "../../../utils/logger.js"; -import {getValidPeerId} from "../../../utils/peer.js"; - -describe("SeenGossipBlockInput", () => { - const chainConfig = createChainForkConfig({ - ...defaultChainConfig, - ALTAIR_FORK_EPOCH: 0, - BELLATRIX_FORK_EPOCH: 0, - DENEB_FORK_EPOCH: 0, - }); - const genesisValidatorsRoot = Buffer.alloc(32, 0xaa); - const config = createBeaconConfig(chainConfig, genesisValidatorsRoot); - const nodeId = computeNodeId(getValidPeerId()); - - // Execution engine - const executionEngineBackend = new ExecutionEngineMockBackend({ - onlyPredefinedResponses: false, - genesisBlockHash: ZERO_HASH_HEX, - }); - const controller = new AbortController(); - const executionEngine = getExecutionEngineFromBackend(executionEngineBackend, { - signal: controller.signal, - logger: testLogger("executionEngine"), - }); - - const emitter = new ChainEventEmitter(); - // Not used in this test, but required by the constructor - const unusedClock = {} as unknown as IClock; - - const seenGossipBlockInput = new SeenGossipBlockInput( - new CustodyConfig({nodeId, config}), - executionEngine, - emitter, - unusedClock, - testLogger("seenGossipBlockInput") - ); - - // array of numBlobs, events where events are array of - // [block|blob11|blob2, pd | bp | null | error string reflecting the expected result] - const testCases: [string, number, [string, string | null][]][] = [ - ["no blobs", 0, [["block", "pd"]]], - [ - "1 blob, block first", - 1, - [ - ["block", "bp"], - ["blob0", "pd"], - ], - ], - [ - "1 blob, blob first", - 1, - [ - ["blob0", null], - ["block", "pd"], - ], - ], - [ - "6 blobs, block first", - 6, - [ - ["block", "bp"], - ["blob1", "bp"], - ["blob0", "bp"], - ["blob5", "bp"], - ["blob4", "bp"], - ["blob2", "bp"], - ["blob3", "pd"], - ], - ], - [ - "4 blobs, block in mid", - 4, - [ - ["blob1", null], - ["blob3", null], - ["block", "bp"], - ["blob0", "bp"], - ["blob2", "pd"], - ], - ], - [ - "3 blobs, block in end", - 3, - [ - ["blob1", null], - ["blob0", null], - ["blob2", null], - ["block", "pd"], - ], - ], - ]; - - // lets start from a random slot to build cases - let slot = 7456; - for (const testCase of testCases) { - const [testName, numBlobs, events] = testCase; - - it(`${testName}`, () => { - const signedBlock = ssz.deneb.SignedBeaconBlock.defaultValue(); - // assign slot and increment for the next block so as to keep these block testcases distinguished - // in the cache - signedBlock.message.slot = slot++; - signedBlock.message.body.blobKzgCommitments = Array.from({length: numBlobs}, () => - ssz.deneb.KZGCommitment.defaultValue() - ); - - // create a dummy signed block header with matching body root - const bodyRoot = ssz.deneb.BeaconBlockBody.hashTreeRoot(signedBlock.message.body); - const signedBlockHeader = ssz.phase0.SignedBeaconBlockHeader.defaultValue(); - signedBlockHeader.message.slot = signedBlock.message.slot; - signedBlockHeader.message.bodyRoot = bodyRoot; - - const blobSidecars = Array.from({length: numBlobs}, (_val, index) => { - const message = {...ssz.deneb.BlobSidecar.defaultValue(), signedBlockHeader, index}; - return message; - }); - - for (const testEvent of events) { - const [inputEvent, expectedRes] = testEvent; - const eventType = inputEvent.includes("block") ? GossipedInputType.block : GossipedInputType.blob; - const expectedResponseType = parseResponseType(expectedRes); - - try { - if (eventType === GossipedInputType.block) { - const blockInputRes = seenGossipBlockInput.getGossipBlockInput( - config, - { - type: GossipedInputType.block, - signedBlock, - }, - null - ); - - if (expectedResponseType instanceof Error) { - expect.fail(`expected to fail with error: ${expectedResponseType.message}`); - } else if (expectedResponseType === null) { - expect(blockInputRes).toBeNull(); - } else { - expect((blockInputRes.blockInput as BlockInput)?.type).toEqual(expectedResponseType); - } - } else { - const index = parseInt(inputEvent.split("blob")[1] ?? "0"); - const blobSidecar = blobSidecars[index]; - expect(blobSidecar).not.toBeUndefined(); - - const blobInputRes = seenGossipBlockInput.getGossipBlockInput( - config, - { - type: GossipedInputType.blob, - blobSidecar, - }, - null - ); - - if (expectedResponseType instanceof Error) { - expect.fail(`expected to fail with error: ${expectedResponseType.message}`); - } else if (expectedResponseType === null) { - expect(blobInputRes.blockInput.block).toBeNull(); - expect((blobInputRes.blockInputMeta as BlockInputMetaPendingBlockWithBlobs).expectedBlobs).toBeNull(); - } else { - expect((blobInputRes.blockInput as BlockInput)?.type).toEqual(expectedResponseType); - } - } - } catch (e) { - if ( - !(e as Error).message.includes("expected to fail with error") && - !(expectedResponseType instanceof Error) - ) { - expect.fail( - `expected not to fail with response=${expectedResponseType} but errored: ${(e as Error).message}` - ); - } - } - } - }); - } -}); - -function parseResponseType(expectedRes: string | null): BlockInputType | null | Error { - switch (expectedRes) { - case null: - return null; - case "pd": - return BlockInputType.availableData; - case "bp": - return BlockInputType.dataPromise; - default: - return Error(expectedRes); - } -} From a1e33519b921a9d392b17d7892f6153a36c3b9c4 Mon Sep 17 00:00:00 2001 From: Cayman Date: Thu, 21 Aug 2025 12:11:23 -0400 Subject: [PATCH 052/173] chore: remove old test files --- .../beaconBlocksMaybeBlobsByRange.test.ts | 136 -------- .../unavailableBeaconBlobsByRoot.test.ts | 315 ------------------ 2 files changed, 451 deletions(-) delete mode 100644 packages/beacon-node/test/unit/network/beaconBlocksMaybeBlobsByRange.test.ts delete mode 100644 packages/beacon-node/test/unit/network/unavailableBeaconBlobsByRoot.test.ts diff --git a/packages/beacon-node/test/unit/network/beaconBlocksMaybeBlobsByRange.test.ts b/packages/beacon-node/test/unit/network/beaconBlocksMaybeBlobsByRange.test.ts deleted file mode 100644 index ba3993b7b23c..000000000000 --- a/packages/beacon-node/test/unit/network/beaconBlocksMaybeBlobsByRange.test.ts +++ /dev/null @@ -1,136 +0,0 @@ -import {createBeaconConfig, createChainForkConfig, defaultChainConfig} from "@lodestar/config"; -import {ForkName} from "@lodestar/params"; -import {deneb, ssz} from "@lodestar/types"; -import {describe, expect, it} from "vitest"; - -import {BlobsSource, BlockSource, getBlockInput} from "../../../src/chain/blocks/types.js"; -import {ZERO_HASH} from "../../../src/constants/constants.js"; -import {INetwork} from "../../../src/network/interface.js"; -import {beaconBlocksMaybeBlobsByRange} from "../../../src/network/reqresp/index.js"; -import {RangeSyncType} from "../../../src/sync/utils/remoteSyncType.js"; -import {CustodyConfig} from "../../../src/util/dataColumns.js"; - -describe.skip("beaconBlocksMaybeBlobsByRange", () => { - const peerId = "Qma9T5YraSnpRDZqRR4krcSJabThc8nwZuJV3LercPHufi"; - - const chainConfig = createChainForkConfig({ - ...defaultChainConfig, - ALTAIR_FORK_EPOCH: 0, - BELLATRIX_FORK_EPOCH: 0, - CAPELLA_FORK_EPOCH: 0, - DENEB_FORK_EPOCH: 0, - ELECTRA_FORK_EPOCH: 0, - FULU_FORK_EPOCH: 0, - }); - const genesisValidatorsRoot = Buffer.alloc(32, 0xaa); - const config = createBeaconConfig(chainConfig, genesisValidatorsRoot); - const rangeRequest = ssz.phase0.BeaconBlocksByRangeRequest.defaultValue(); - rangeRequest.count = 1; - - const block1 = ssz.deneb.SignedBeaconBlock.defaultValue(); - const blockheader1 = ssz.phase0.SignedBeaconBlockHeader.defaultValue(); - blockheader1.message.slot = 1; - block1.message.slot = 1; - block1.message.body.blobKzgCommitments.push(ssz.deneb.KZGCommitment.defaultValue()); - const blobSidecar1 = ssz.deneb.BlobSidecar.defaultValue(); - blobSidecar1.signedBlockHeader = blockheader1; - - const block2 = ssz.deneb.SignedBeaconBlock.defaultValue(); - block2.message.slot = 2; - const blockheader2 = ssz.phase0.SignedBeaconBlockHeader.defaultValue(); - blockheader2.message.slot = 2; - - block2.message.body.blobKzgCommitments.push(ssz.deneb.KZGCommitment.defaultValue()); - const blobSidecar2 = ssz.deneb.BlobSidecar.defaultValue(); - blobSidecar2.signedBlockHeader = blockheader2; - - const block3 = ssz.deneb.SignedBeaconBlock.defaultValue(); - block3.message.slot = 3; - // no blobsidecar for block3 - - const block4 = ssz.deneb.SignedBeaconBlock.defaultValue(); - block4.message.slot = 4; - const blockheader4 = ssz.phase0.SignedBeaconBlockHeader.defaultValue(); - blockheader4.message.slot = 4; - - // two blobsidecars - block4.message.body.blobKzgCommitments.push(ssz.deneb.KZGCommitment.defaultValue()); - block4.message.body.blobKzgCommitments.push(ssz.deneb.KZGCommitment.defaultValue()); - const blobSidecar41 = ssz.deneb.BlobSidecar.defaultValue(); - - blobSidecar41.signedBlockHeader = blockheader4; - - const blobSidecar42 = ssz.deneb.BlobSidecar.defaultValue(); - blobSidecar42.signedBlockHeader = blockheader4; - blobSidecar42.index = 1; - - // Array of testcases which are array of matched blocks with/without (if empty) sidecars - const testCases: [string, [deneb.SignedBeaconBlock, deneb.BlobSidecar[] | undefined][]][] = [ - ["one block with sidecar", [[block1, [blobSidecar1]]]], - [ - "two blocks with sidecar", - [ - [block1, [blobSidecar1]], - [block2, [blobSidecar2]], - ], - ], - ["block with skipped sidecar", [[block3, undefined]]], - ["multiple blob sidecars per block", [[block4, [blobSidecar41, blobSidecar42]]]], - [ - "all blocks together", - [ - [block1, [blobSidecar1]], - [block2, [blobSidecar2]], - [block3, undefined], - [block4, [blobSidecar41, blobSidecar42]], - ], - ], - ]; - testCases.map(([testName, blocksWithBlobs]) => { - it(testName, async () => { - const blocks = blocksWithBlobs.map(([block, _blobs]) => block); - - const blobSidecars = blocksWithBlobs - .map(([_block, blobs]) => blobs as deneb.BlobSidecars) - .filter((blobs) => blobs !== undefined) - .reduce((acc, elem) => acc.concat(elem), []); - - const expectedResponse = blocksWithBlobs.map(([block, blobSidecars]) => { - const blobs = blobSidecars !== undefined ? blobSidecars : []; - return getBlockInput.availableData(config, block, BlockSource.byRange, { - fork: ForkName.electra, - blobs, - blobsSource: BlobsSource.byRange, - }); - }); - - const custodyConfig = new CustodyConfig({ - nodeId: new Uint8Array(32), - config, - }); - custodyConfig.sampledColumns = [2, 4, 6, 8]; - - const network = { - sendBeaconBlocksByRange: async () => - blocks.map((data) => ({ - data, - bytes: ZERO_HASH, - })), - sendBlobSidecarsByRange: async () => blobSidecars, - custodyConfig, - } as Partial as INetwork; - - const response = await beaconBlocksMaybeBlobsByRange( - config, - network, - {peerId, client: "PEER_CLIENT", custodyGroups: []}, - rangeRequest, - 0, - null, - RangeSyncType.Finalized, - null - ); - expect(response).toEqual(expectedResponse); - }); - }); -}); diff --git a/packages/beacon-node/test/unit/network/unavailableBeaconBlobsByRoot.test.ts b/packages/beacon-node/test/unit/network/unavailableBeaconBlobsByRoot.test.ts deleted file mode 100644 index b54c3ed63613..000000000000 --- a/packages/beacon-node/test/unit/network/unavailableBeaconBlobsByRoot.test.ts +++ /dev/null @@ -1,315 +0,0 @@ -import {toHexString} from "@chainsafe/ssz"; -import {createBeaconConfig, createChainForkConfig, defaultChainConfig} from "@lodestar/config"; -import {BYTES_PER_FIELD_ELEMENT, FIELD_ELEMENTS_PER_BLOB, ForkName} from "@lodestar/params"; -import {signedBlockToSignedHeader} from "@lodestar/state-transition"; -import {deneb, fulu, ssz} from "@lodestar/types"; -import {describe, expect, it, vi} from "vitest"; -import { - BlobsSource, - BlockInput, - BlockInputAvailableData, - BlockInputType, - BlockSource, - CachedData, - getBlockInput, -} from "../../../src/chain/blocks/types.js"; -import {ChainEventEmitter} from "../../../src/chain/emitter.js"; -import {getEmptyBlockInputCacheEntry} from "../../../src/chain/seenCache/seenGossipBlockInput.js"; -import {IExecutionEngine} from "../../../src/execution/index.js"; -import {INetwork} from "../../../src/network/interface.js"; -import {unavailableBeaconBlobsByRoot} from "../../../src/network/reqresp/index.js"; -import {computeNodeId} from "../../../src/network/subnets/index.js"; -import {computeInclusionProof, kzgCommitmentToVersionedHash} from "../../../src/util/blobs.js"; -import {CustodyConfig, getDataColumnSidecarsFromBlock} from "../../../src/util/dataColumns.js"; -import {kzg} from "../../../src/util/kzg.js"; -import {getValidPeerId} from "../../utils/peer.js"; - -describe("unavailableBeaconBlobsByRoot", () => { - describe("blobs", () => { - const chainConfig = createChainForkConfig({ - ...defaultChainConfig, - ALTAIR_FORK_EPOCH: 0, - BELLATRIX_FORK_EPOCH: 0, - CAPELLA_FORK_EPOCH: 0, - DENEB_FORK_EPOCH: 0, - }); - const genesisValidatorsRoot = Buffer.alloc(32, 0xaa); - const config = createBeaconConfig(chainConfig, genesisValidatorsRoot); - - const executionEngine = { - getBlobs: vi.fn(), - }; - - const network = { - sendBeaconBlocksByRoot: vi.fn(), - sendBlobSidecarsByRoot: vi.fn(), - }; - - const peerId = "mockPeerId"; - const engineGetBlobsCache = new Map(); - - it("should successfully resolve all blobs from engine and network", async () => { - // Simulate a block 1 with 5 blobs - const signedBlock = ssz.deneb.SignedBeaconBlock.defaultValue(); - signedBlock.message.slot = 1; - const blobscommitmentsandproofs = generateBlobs(5); - signedBlock.message.body.blobKzgCommitments.push(...blobscommitmentsandproofs.kzgCommitments); - const blockheader = signedBlockToSignedHeader(config, signedBlock); - - const unavailableBlockInput = { - block: signedBlock, - source: BlockSource.gossip, - blockBytes: null, - type: BlockInputType.dataPromise, - cachedData: getEmptyBlockInputCacheEntry(ForkName.deneb, 1).cachedData, - } as BlockInput; - - // total of 5 blobs - // blob 0. not in cache & to resolved by getBlobs - // blob 1. not in cache & to resolved by getBlobs - // blob 2. to be found in engineGetBlobsCache - // blob 3. null cached earlier so should directly go to network query and skip engine query - // blob 4. to hit getBlobs first with null response and then go to the network query - // - // engineGetBlobsCache caches 2 fully, and null for 3 - // getBlobs should see 0,1,4 and return first two non null and last null - // network should see 3,4 - - engineGetBlobsCache.set(toHexString(blobscommitmentsandproofs.blobVersionedHashes[2]), { - blob: blobscommitmentsandproofs.blobs[2], - proof: blobscommitmentsandproofs.kzgProofs[2], - }); - engineGetBlobsCache.set(toHexString(blobscommitmentsandproofs.blobVersionedHashes[3]), null); - - // Mock execution engine to return 2 blobs - executionEngine.getBlobs.mockResolvedValueOnce([ - { - blob: blobscommitmentsandproofs.blobs[0], - proof: blobscommitmentsandproofs.kzgProofs[0], - }, - { - blob: blobscommitmentsandproofs.blobs[1], - proof: blobscommitmentsandproofs.kzgProofs[1], - }, - null, - ]); - - // Mock network to return 2 blobs - network.sendBlobSidecarsByRoot.mockResolvedValueOnce([ - { - index: 3, - blob: blobscommitmentsandproofs.blobs[3], - kzgCommitment: blobscommitmentsandproofs.kzgCommitments[3], - kzgProof: blobscommitmentsandproofs.kzgProofs[3], - signedBlockHeader: blockheader, - kzgCommitmentInclusionProof: computeInclusionProof(ForkName.deneb, signedBlock.message.body, 3), - }, - { - index: 4, - blob: blobscommitmentsandproofs.blobs[4], - kzgCommitment: blobscommitmentsandproofs.kzgCommitments[4], - kzgProof: blobscommitmentsandproofs.kzgProofs[4], - signedBlockHeader: blockheader, - kzgCommitmentInclusionProof: computeInclusionProof(ForkName.deneb, signedBlock.message.body, 4), - }, - ]); - - const result = await unavailableBeaconBlobsByRoot( - config, - network as unknown as INetwork, - peerId, - "peerClient", - unavailableBlockInput, - { - executionEngine: executionEngine as unknown as IExecutionEngine, - emitter: new ChainEventEmitter(), - engineGetBlobsCache, - } - ); - - // Check if all blobs are aggregated - const allBlobs = [ - { - index: 0, - blob: blobscommitmentsandproofs.blobs[0], - kzgCommitment: blobscommitmentsandproofs.kzgCommitments[0], - kzgProof: blobscommitmentsandproofs.kzgProofs[0], - signedBlockHeader: blockheader, - kzgCommitmentInclusionProof: computeInclusionProof(ForkName.deneb, signedBlock.message.body, 0), - }, - { - index: 1, - blob: blobscommitmentsandproofs.blobs[1], - kzgCommitment: blobscommitmentsandproofs.kzgCommitments[1], - kzgProof: blobscommitmentsandproofs.kzgProofs[1], - signedBlockHeader: blockheader, - kzgCommitmentInclusionProof: computeInclusionProof(ForkName.deneb, signedBlock.message.body, 1), - }, - { - index: 2, - blob: blobscommitmentsandproofs.blobs[2], - kzgCommitment: blobscommitmentsandproofs.kzgCommitments[2], - kzgProof: blobscommitmentsandproofs.kzgProofs[2], - signedBlockHeader: blockheader, - kzgCommitmentInclusionProof: computeInclusionProof(ForkName.deneb, signedBlock.message.body, 2), - }, - { - index: 3, - blob: blobscommitmentsandproofs.blobs[3], - kzgCommitment: blobscommitmentsandproofs.kzgCommitments[3], - kzgProof: blobscommitmentsandproofs.kzgProofs[3], - signedBlockHeader: blockheader, - kzgCommitmentInclusionProof: computeInclusionProof(ForkName.deneb, signedBlock.message.body, 3), - }, - { - index: 4, - blob: blobscommitmentsandproofs.blobs[4], - kzgCommitment: blobscommitmentsandproofs.kzgCommitments[4], - kzgProof: blobscommitmentsandproofs.kzgProofs[4], - signedBlockHeader: blockheader, - kzgCommitmentInclusionProof: computeInclusionProof(ForkName.deneb, signedBlock.message.body, 4), - }, - ]; - - const blockData: BlockInputAvailableData = { - fork: ForkName.deneb, - blobs: allBlobs, - blobsSource: BlobsSource.byRoot, - }; - const resolvedBlobs = getBlockInput.availableData(config, signedBlock, BlockSource.byRoot, blockData); - - const engineReqIdentifiers = [...blobscommitmentsandproofs.blobVersionedHashes]; - // versionedHashes: 1,2,4 - engineReqIdentifiers.splice(2, 2); - expect(result).toBeDefined(); - expect(executionEngine.getBlobs).toHaveBeenCalledWith("deneb", engineReqIdentifiers); - expect(result).toEqual(resolvedBlobs); - }); - }); - - describe("data columns", () => { - const chainConfig = createChainForkConfig({ - ...defaultChainConfig, - ALTAIR_FORK_EPOCH: 0, - BELLATRIX_FORK_EPOCH: 0, - CAPELLA_FORK_EPOCH: 0, - DENEB_FORK_EPOCH: 0, - ELECTRA_FORK_EPOCH: 0, - FULU_FORK_EPOCH: 0, - }); - const genesisValidatorsRoot = Buffer.alloc(32, 0xaa); - const config = createBeaconConfig(chainConfig, genesisValidatorsRoot); - - const executionEngine = { - getBlobs: vi.fn(), - }; - - const network = { - sendBeaconBlocksByRoot: vi.fn(), - sendBlobSidecarsByRoot: vi.fn(), - custodyConfig: new CustodyConfig({ - nodeId: computeNodeId(getValidPeerId()), - config, - }), - }; - - const peerId = "mockPeerId"; - const engineGetBlobsCache = new Map(); - - it("should successfully resolve all data columns from engine", async () => { - // Simulate a block 1 with 3 blobs - const signedBlock = ssz.fulu.SignedBeaconBlock.defaultValue(); - signedBlock.message.slot = 1; - const blobscommitmentsandproofs = generateBlobsWithCellProofs(3); - signedBlock.message.body.blobKzgCommitments.push(...blobscommitmentsandproofs.map((b) => b.kzgCommitment)); - - const unavailableBlockInput: BlockInput = { - block: signedBlock, - source: BlockSource.gossip, - type: BlockInputType.dataPromise, - cachedData: getEmptyBlockInputCacheEntry(ForkName.fulu, 1).cachedData as CachedData, - }; - - const blobAndProof: fulu.BlobAndProofV2[] = blobscommitmentsandproofs.map((b) => ({ - blob: b.blob, - proofs: b.cellsAndProofs.proofs, - })); - - // Mock execution engine to return all blobs - executionEngine.getBlobs.mockImplementationOnce( - (): Promise => Promise.resolve(blobAndProof) - ); - - const result = await unavailableBeaconBlobsByRoot( - config, - network as unknown as INetwork, - peerId, - "peerClient", - unavailableBlockInput, - { - executionEngine: executionEngine as unknown as IExecutionEngine, - emitter: new ChainEventEmitter(), - engineGetBlobsCache, - } - ); - - const sampledSidecars = getDataColumnSidecarsFromBlock( - config, - signedBlock, - blobscommitmentsandproofs.map((b) => b.cellsAndProofs) - ).filter((s) => network.custodyConfig.sampledColumns.includes(s.index)); - - expect(executionEngine.getBlobs).toHaveBeenCalledWith( - ForkName.fulu, - blobscommitmentsandproofs.map((b) => kzgCommitmentToVersionedHash(b.kzgCommitment)) - ); - expect(result.type).toEqual(BlockInputType.availableData); - if (result.type !== BlockInputType.availableData) throw new Error("Should not get here"); - expect(result.blockData.fork).toEqual(ForkName.fulu); - if (result.blockData.fork !== ForkName.fulu) throw new Error("Should not get here"); - expect(result.blockData.dataColumns).toEqual(sampledSidecars); - }); - }); -}); - -function generateBlobs(count: number): { - blobs: Uint8Array[]; - kzgCommitments: Uint8Array[]; - blobVersionedHashes: Uint8Array[]; - kzgProofs: Uint8Array[]; -} { - const blobs = Array.from({length: count}, (_, index) => generateRandomBlob(index)); - const kzgCommitments = blobs.map((blob) => kzg.blobToKzgCommitment(blob)); - const versionedHash = kzgCommitments.map((kzgCommitment) => kzgCommitmentToVersionedHash(kzgCommitment)); - const kzgProofs = blobs.map((blob, index) => kzg.computeBlobKzgProof(blob, kzgCommitments[index])); - - return { - blobs, - kzgCommitments, - blobVersionedHashes: versionedHash.map((hash) => hash), - kzgProofs, - }; -} - -function generateBlobsWithCellProofs( - count: number -): {blob: Uint8Array; cellsAndProofs: {cells: Uint8Array[]; proofs: Uint8Array[]}; kzgCommitment: Uint8Array}[] { - const blobs = Array.from({length: count}, (_, index) => generateRandomBlob(index)); - - return blobs.map((blob) => ({ - blob, - cellsAndProofs: kzg.computeCellsAndKzgProofs(blob), - kzgCommitment: kzg.blobToKzgCommitment(blob), - })); -} - -function generateRandomBlob(index: number): deneb.Blob { - const blob = new Uint8Array(FIELD_ELEMENTS_PER_BLOB * BYTES_PER_FIELD_ELEMENT); - const dv = new DataView(blob.buffer, blob.byteOffset, blob.byteLength); - - for (let i = 0; i < FIELD_ELEMENTS_PER_BLOB; i++) { - // Generate a unique value based on the index - dv.setUint32(i * BYTES_PER_FIELD_ELEMENT, index + i); - } - return blob; -} From 511d9f6f758c67f799dcf5b4e9af621fee57d7cf Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Fri, 22 Aug 2025 06:32:52 +0700 Subject: [PATCH 053/173] feat: modify ColumnMeta to support getBlobsV2 --- .../src/chain/blocks/blockInput/blockInput.ts | 21 ++++++++++++------- .../src/chain/blocks/blockInput/types.ts | 15 ++++++++----- packages/beacon-node/src/sync/range/batch.ts | 4 ++-- packages/beacon-node/src/sync/unknownBlock.ts | 6 +++--- 4 files changed, 28 insertions(+), 18 deletions(-) diff --git a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts index a1430f0f7eb9..cf454a8c4724 100644 --- a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts +++ b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts @@ -11,7 +11,6 @@ import { BlobMeta, BlobWithSource, BlockInputInit, - ColumnMeta, ColumnWithSource, CreateBlockInputMeta, DAData, @@ -20,6 +19,7 @@ import { LogMetaBasic, LogMetaBlobs, LogMetaColumns, + MissingColumnMeta, PromiseParts, SourceMeta, } from "./types.js"; @@ -500,7 +500,7 @@ export class BlockInputBlobs extends AbstractBlockInput columnSidecar); } - getMissingSampledColumnMeta(): ColumnMeta[] { + getMissingSampledColumnMeta(): MissingColumnMeta { if (this.state.hasAllData) { - return []; + return { + missing: [], + versionedHashes: this.state.versionedHashes, + }; } - const needed: ColumnMeta[] = []; - const blockRoot = fromHex(this.blockRootHex); + const missing: number[] = []; for (const index of this.sampledColumns) { if (!this.columnsCache.has(index)) { - needed.push({index, blockRoot}); + missing.push(index); } } - return needed; + return { + missing, + versionedHashes: this.state.versionedHashes, + }; } } diff --git a/packages/beacon-node/src/chain/blocks/blockInput/types.ts b/packages/beacon-node/src/chain/blocks/blockInput/types.ts index dbd8b99921d3..9cb73c9154ca 100644 --- a/packages/beacon-node/src/chain/blocks/blockInput/types.ts +++ b/packages/beacon-node/src/chain/blocks/blockInput/types.ts @@ -1,5 +1,6 @@ import {ForkName} from "@lodestar/params"; -import {RootHex, SignedBeaconBlock, Slot, deneb, fulu} from "@lodestar/types"; +import {ColumnIndex, RootHex, SignedBeaconBlock, Slot, deneb, fulu} from "@lodestar/types"; +import {VersionedHashes} from "../../../execution"; export enum DAType { PreData = "pre-data", @@ -86,11 +87,15 @@ export type AddColumn = ColumnWithSource & { blockRootHex: RootHex; }; -export type BlobMeta = ColumnMeta & {versionHash: Uint8Array}; - -export type ColumnMeta = { - blockRoot: Uint8Array; +export type BlobMeta = { index: number; + blockRoot: Uint8Array; + versionedHash: Uint8Array; +}; + +export type MissingColumnMeta = { + missing: ColumnIndex[]; + versionedHashes: VersionedHashes; }; /** diff --git a/packages/beacon-node/src/sync/range/batch.ts b/packages/beacon-node/src/sync/range/batch.ts index 53608f39923e..e697f8907762 100644 --- a/packages/beacon-node/src/sync/range/batch.ts +++ b/packages/beacon-node/src/sync/range/batch.ts @@ -166,8 +166,8 @@ export class Batch { } if (!blockInput.hasAllData()) { if (isBlockInputColumns(blockInput)) { - for (const missing of blockInput.getMissingSampledColumnMeta()) { - neededColumns.add(missing.index); + for (const index of blockInput.getMissingSampledColumnMeta().missing) { + neededColumns.add(index); } } } else if (dataStartSlot === blockSlot) { diff --git a/packages/beacon-node/src/sync/unknownBlock.ts b/packages/beacon-node/src/sync/unknownBlock.ts index 4e0cf89bb074..dbe95db09556 100644 --- a/packages/beacon-node/src/sync/unknownBlock.ts +++ b/packages/beacon-node/src/sync/unknownBlock.ts @@ -491,7 +491,7 @@ export class BlockInputSync { while (i++ < this.getMaxDownloadAttempts()) { const pendingColumns = isPendingBlockInput(cacheItem) && isBlockInputColumns(cacheItem.blockInput) - ? new Set(cacheItem.blockInput.getMissingSampledColumnMeta().map((meta) => meta.index)) + ? new Set(cacheItem.blockInput.getMissingSampledColumnMeta().missing) : defaultPendingColumns; // pendingDataColumns is null pre-fulu const peer = this.peerBalancer.bestPeerForPendingColumns(pendingColumns, excludedPeers); @@ -546,7 +546,7 @@ export class BlockInputSync { message += ` Missing blob indices=${prettyPrintIndices(missing)}`; } } else if (isBlockInputColumns(cacheItem.blockInput)) { - const missing = cacheItem.blockInput.getMissingSampledColumnMeta().map((b) => b.index); + const missing = cacheItem.blockInput.getMissingSampledColumnMeta().missing; if (missing.length) { message += ` Missing column indices=${prettyPrintIndices(missing)}`; } @@ -677,7 +677,7 @@ export class UnknownBlockPeerBalancer { const eligiblePeers: PeerIdStr[] = []; if (isBlockInputColumns(blockInput)) { - const pendingDataColumns: Set = new Set(blockInput.getMissingSampledColumnMeta().map((c) => c.index)); + const pendingDataColumns: Set = new Set(blockInput.getMissingSampledColumnMeta().missing); // there could be no pending column in case when block is still missing eligiblePeers.push(...this.filterPeers(pendingDataColumns, excludedPeers)); } else { From 57a76999a1ca7bd1e78d2642263599d042e679b5 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Fri, 22 Aug 2025 06:33:12 +0700 Subject: [PATCH 054/173] feat: make logger public on network --- packages/beacon-node/src/network/interface.ts | 2 ++ packages/beacon-node/src/network/network.ts | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/beacon-node/src/network/interface.ts b/packages/beacon-node/src/network/interface.ts index 6da6c77c1b01..5b580586ccc4 100644 --- a/packages/beacon-node/src/network/interface.ts +++ b/packages/beacon-node/src/network/interface.ts @@ -15,6 +15,7 @@ import { Upgrader, } from "@libp2p/interface"; import type {AddressManager, ConnectionManager, Registrar, TransportManager} from "@libp2p/interface-internal"; +import {LoggerNode} from "@lodestar/logger/node"; import { AttesterSlashing, LightClientFinalityUpdate, @@ -58,6 +59,7 @@ export interface INetwork extends INetworkCorePublic { readonly peerId: PeerId; readonly custodyConfig: CustodyConfig; readonly closed: boolean; + readonly logger: LoggerNode; events: INetworkEventBus; getConnectedPeers(): PeerIdStr[]; diff --git a/packages/beacon-node/src/network/network.ts b/packages/beacon-node/src/network/network.ts index 41e3625c59ba..1e4c1c38a6ff 100644 --- a/packages/beacon-node/src/network/network.ts +++ b/packages/beacon-node/src/network/network.ts @@ -97,10 +97,10 @@ export type NetworkInitModules = { export class Network implements INetwork { readonly peerId: PeerId; readonly custodyConfig: CustodyConfig; + readonly logger: LoggerNode; // TODO: Make private readonly events: INetworkEventBus; - private readonly logger: LoggerNode; private readonly config: BeaconConfig; private readonly clock: IClock; private readonly chain: IBeaconChain; From a7c69e55a8cb664784df8581a5785285254ad3cf Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Fri, 22 Aug 2025 06:33:27 +0700 Subject: [PATCH 055/173] feat: make getCellsAndProofs async --- packages/beacon-node/src/util/dataColumns.ts | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/packages/beacon-node/src/util/dataColumns.ts b/packages/beacon-node/src/util/dataColumns.ts index 14250d827cf0..42396d4e51f2 100644 --- a/packages/beacon-node/src/util/dataColumns.ts +++ b/packages/beacon-node/src/util/dataColumns.ts @@ -238,11 +238,15 @@ export function getDataColumns(config: ChainForkConfig, nodeId: NodeId, custodyG * SPEC FUNCTION (note: spec currently computes proofs, but we already have them) * https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.4/specs/fulu/das-core.md#compute_matrix */ -export function getCellsAndProofs(blobBundles: fulu.BlobAndProofV2[]): {cells: Uint8Array[]; proofs: Uint8Array[]}[] { - return blobBundles.map(({blob, proofs}) => { - const cells = kzg.computeCells(blob); - return {cells, proofs}; - }); +export async function getCellsAndProofs( + blobBundles: fulu.BlobAndProofV2[] +): Promise<{cells: Uint8Array[]; proofs: Uint8Array[]}[]> { + const blobsAndProofs: {cells: Uint8Array[]; proofs: Uint8Array[]}[] = []; + for (const {blob, proofs} of blobBundles) { + const cells = await kzg.asyncComputeCells(blob); + blobsAndProofs.push({cells, proofs}); + } + return blobsAndProofs; } /** From 7265027798b1b3ba52b49ed3eb4b0ef597b3df37 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Fri, 22 Aug 2025 06:33:52 +0700 Subject: [PATCH 056/173] feat: add getBlobsV1 and V2 to downloadByRoot --- .../src/sync/utils/downloadByRoot.ts | 348 +++++++++++++++--- 1 file changed, 298 insertions(+), 50 deletions(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRoot.ts b/packages/beacon-node/src/sync/utils/downloadByRoot.ts index c4f6caac62d0..99fe469ad22f 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRoot.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRoot.ts @@ -1,9 +1,18 @@ import {ChainForkConfig} from "@lodestar/config"; -import {ForkPostDeneb, NUMBER_OF_COLUMNS, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; +import { + ForkName, + ForkPostDeneb, + ForkPostFulu, + ForkPreFulu, + NUMBER_OF_COLUMNS, + isForkPostDeneb, + isForkPostFulu, +} from "@lodestar/params"; +import {signedBlockToSignedHeader} from "@lodestar/state-transition"; import {BlobIndex, ColumnIndex, RootHex, SignedBeaconBlock, deneb, fulu, phase0} from "@lodestar/types"; import {LodestarError, fromHex, prettyBytes, toRootHex} from "@lodestar/utils"; import {isBlockInputBlobs, isBlockInputColumns} from "../../chain/blocks/blockInput/blockInput.js"; -import {BlockInputSource, IBlockInput} from "../../chain/blocks/blockInput/types.js"; +import {BlobMeta, BlockInputSource, IBlockInput, MissingColumnMeta} from "../../chain/blocks/blockInput/types.js"; import {SeenBlockInput} from "../../chain/seenCache/seenGossipBlockInput.js"; import {validateBlobSidecarInclusionProof, validateBlobsAndBlobProofs} from "../../chain/validation/blobSidecar.js"; import { @@ -13,7 +22,10 @@ import { import {IExecutionEngine} from "../../execution/index.js"; import {INetwork} from "../../network/interface.js"; import {prettyPrintPeerIdStr} from "../../network/util.js"; +import {computeInclusionProof, kzgCommitmentToVersionedHash} from "../../util/blobs.js"; import {byteArrayEquals} from "../../util/bytes.js"; +import {getCellsAndProofs, getDataColumnSidecarsFromBlock} from "../../util/dataColumns.js"; +import {kzg} from "../../util/kzg.js"; import {PeerIdStr} from "../../util/peerId.js"; import {BlobSidecarsByRootRequest} from "../../util/types.js"; import { @@ -37,11 +49,15 @@ export type FetchByRootProps = FetchByRootCoreProps & { export type FetchByRootAndValidateBlockProps = FetchByRootCoreProps & {blockRoot: Uint8Array}; export type FetchByRootAndValidateBlobsProps = FetchByRootAndValidateBlockProps & { executionEngine: IExecutionEngine; - blobIndices: BlobIndex[]; + forkName: ForkPreFulu; + block: SignedBeaconBlock; + blobMeta: BlobMeta[]; }; export type FetchByRootAndValidateColumnsProps = FetchByRootAndValidateBlockProps & { executionEngine: IExecutionEngine; - columnIndices: ColumnIndex[]; + forkName: ForkPostFulu; + block: SignedBeaconBlock; + columnMeta: MissingColumnMeta; }; export type FetchByRootResponses = { block: SignedBeaconBlock; @@ -176,6 +192,7 @@ export async function fetchByRoot({ }); } + const forkName = config.getForkName(block.message.slot); if (!cacheItem.blockInput.hasAllData()) { if (isBlockInputBlobs(cacheItem.blockInput)) { blobSidecars = await fetchAndValidateBlobs({ @@ -183,8 +200,10 @@ export async function fetchByRoot({ network, executionEngine, peerIdStr, + forkName: forkName as ForkPreFulu, + block: block as SignedBeaconBlock, blockRoot, - blobIndices: cacheItem.blockInput.getMissingBlobMeta().map((b) => b.index), + blobMeta: cacheItem.blockInput.getMissingBlobMeta(), }); } if (isBlockInputColumns(cacheItem.blockInput)) { @@ -193,8 +212,10 @@ export async function fetchByRoot({ network, executionEngine, peerIdStr, + forkName: forkName as ForkPostFulu, + block: block as SignedBeaconBlock, blockRoot, - columnIndices: cacheItem.blockInput.getMissingSampledColumnMeta().map((c) => c.index), + columnMeta: cacheItem.blockInput.getMissingSampledColumnMeta(), }); } } @@ -212,18 +233,32 @@ export async function fetchByRoot({ network, executionEngine, peerIdStr, + forkName, blockRoot, - columnIndices: network.custodyConfig.sampledColumns, + block: block as SignedBeaconBlock, + columnMeta: { + missing: network.custodyConfig.sampledColumns, + versionedHashes: (block as SignedBeaconBlock).message.body.blobKzgCommitments.map((c) => + kzgCommitmentToVersionedHash(c) + ), + }, }); } else if (isForkPostDeneb(forkName)) { - const blobCount = (block as SignedBeaconBlock).message.body.blobKzgCommitments.length; + const commitments = (block as SignedBeaconBlock).message.body.blobKzgCommitments; + const blobCount = commitments.length; blobSidecars = await fetchAndValidateBlobs({ config, network, executionEngine, peerIdStr, + forkName: forkName as ForkPreFulu, blockRoot, - blobIndices: Array.from({length: blobCount}, (_, i) => i), + block: block as SignedBeaconBlock, + blobMeta: Array.from({length: blobCount}, (_, i) => ({ + index: i, + blockRoot, + versionedHash: kzgCommitmentToVersionedHash(commitments[i]), + })), }); } } @@ -247,7 +282,7 @@ export async function fetchAndValidateBlock({ throw new DownloadByRootError({ code: DownloadByRootErrorCode.MISSING_BLOCK_RESPONSE, peer: prettyPrintPeerIdStr(peerIdStr), - blockRoot: prettyBytes(toRootHex(blockRoot)), + blockRoot: prettyBytes(blockRoot), }); } const receivedRoot = config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message); @@ -256,7 +291,7 @@ export async function fetchAndValidateBlock({ { code: DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT, peer: prettyPrintPeerIdStr(peerIdStr), - requestedBlockRoot: prettyBytes(toRootHex(blockRoot)), + requestedBlockRoot: prettyBytes(blockRoot), receivedBlockRoot: prettyBytes(toRootHex(receivedRoot)), }, "block does not match requested root" @@ -268,26 +303,117 @@ export async function fetchAndValidateBlock({ export async function fetchAndValidateBlobs({ config, network, - // executionEngine, + executionEngine, + forkName, peerIdStr, blockRoot, - blobIndices, + block, + blobMeta, }: FetchByRootAndValidateBlobsProps): Promise { - const blobsRequest = blobIndices.map((index) => ({blockRoot, index})); - const blobSidecars = await network.sendBlobSidecarsByRoot(peerIdStr, blobsRequest); + const blobSidecars = await fetchGetBlobsV1AndBuildSidecars({ + config, + executionEngine, + forkName, + block, + blobMeta, + }); + + // not all needed blobs were fetched via getBlobs, need to use ReqResp + if (blobSidecars.length !== blobMeta.length) { + const networkResponse = await fetchBlobByRoot({ + network, + peerIdStr, + blockRoot, + blobMeta, + indicesInPossession: blobSidecars.map((b) => b.index), + }); + blobSidecars.push(...networkResponse); + } + + await validateBlobs({config, peerIdStr, blockRoot, blobMeta, blobSidecars}); + + return blobSidecars; +} + +export async function fetchGetBlobsV1AndBuildSidecars({ + config, + executionEngine, + forkName, + block, + blobMeta, +}: Pick< + FetchByRootAndValidateBlobsProps, + "config" | "executionEngine" | "forkName" | "block" | "blobMeta" +>): Promise { + const blobSidecars: deneb.BlobSidecars = []; + + const enginedResponse = await executionEngine.getBlobs( + forkName, + blobMeta.map(({versionedHash: versionHash}) => versionHash) + ); + + if (enginedResponse.length > 0) { + // response.length should always match blobMeta.length and they should be in the same order + for (let i = 0; i < blobMeta.length; i++) { + const blobAndProof = enginedResponse[i]; + if (blobAndProof) { + const {blob, proof} = blobAndProof; + const index = blobMeta[i].index; + const kzgCommitment = block.message.body.blobKzgCommitments[i]; + const sidecar: deneb.BlobSidecar = { + index, + blob, + kzgProof: proof, + kzgCommitment, + kzgCommitmentInclusionProof: computeInclusionProof(forkName, block.message.body, index), + signedBlockHeader: signedBlockToSignedHeader(config, block), + }; + blobSidecars.push(sidecar); + } + } + } + + return blobSidecars; +} +export async function fetchBlobByRoot({ + network, + peerIdStr, + blockRoot, + blobMeta, + indicesInPossession, +}: Pick & { + indicesInPossession: number[]; +}): Promise { + const blobsRequest = blobMeta + .filter(({index}) => !indicesInPossession.includes(index)) + .map(({index}) => ({blockRoot, index})); + return await network.sendBlobSidecarsByRoot(peerIdStr, blobsRequest); +} + +export async function validateBlobs({ + config, + blockRoot, + peerIdStr, + blobMeta, + blobSidecars, +}: Pick & { + blobSidecars: deneb.BlobSidecars; +}): Promise { + const requestedIndices = blobMeta.map((b) => b.index); for (const blobSidecar of blobSidecars) { - if (!blobIndices.includes(blobSidecar.index)) { + if (!requestedIndices.includes(blobSidecar.index)) { throw new DownloadByRootError( { code: DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED, peer: prettyPrintPeerIdStr(peerIdStr), - blockRoot: prettyBytes(toRootHex(blockRoot)), + blockRoot: prettyBytes(blockRoot), invalidIndex: blobSidecar.index, }, "received a blobSidecar that was not requested" ); } + const headerRoot = config .getForkTypes(blobSidecar.signedBlockHeader.message.slot) .BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message); @@ -296,7 +422,7 @@ export async function fetchAndValidateBlobs({ { code: DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT, peer: prettyPrintPeerIdStr(peerIdStr), - requestedBlockRoot: prettyBytes(toRootHex(blockRoot)), + requestedBlockRoot: prettyBytes(blockRoot), receivedBlockRoot: prettyBytes(toRootHex(headerRoot)), }, `blobSidecar.signedBlockHeader not match requested blockRoot for index=${blobSidecar.index}` @@ -307,7 +433,7 @@ export async function fetchAndValidateBlobs({ throw new DownloadByRootError({ code: DownloadByRootErrorCode.INVALID_INCLUSION_PROOF, peer: prettyPrintPeerIdStr(peerIdStr), - blockRoot: prettyBytes(toRootHex(blockRoot)), + blockRoot: prettyBytes(blockRoot), sidecarIndex: blobSidecar.index, }); } @@ -323,61 +449,120 @@ export async function fetchAndValidateBlobs({ throw new DownloadByRootError({ code: DownloadByRootErrorCode.INVALID_KZG_PROOF, peer: prettyPrintPeerIdStr(peerIdStr), - blockRoot: prettyBytes(toRootHex(blockRoot)), + blockRoot: prettyBytes(blockRoot), }); } - - return blobSidecars; } -export async function fetchAndValidateColumns({ +export async function fetchGetBlobsV2AndBuildSidecars({ config, + executionEngine, + forkName, + block, + columnMeta, +}: Pick< + FetchByRootAndValidateColumnsProps, + "config" | "executionEngine" | "forkName" | "block" | "columnMeta" +>): Promise { + const response = await executionEngine.getBlobs(forkName, columnMeta.versionedHashes); + if (!response) { + return []; + } + + const cellsAndProofs = await getCellsAndProofs(response); + return getDataColumnSidecarsFromBlock(config, block, cellsAndProofs); +} + +export async function fetchColumnsByRoot({ network, - // executionEngine, peerIdStr, blockRoot, - columnIndices, + columnMeta, }: FetchByRootAndValidateColumnsProps): Promise { - const columnSidecars = await network.sendDataColumnSidecarsByRoot(peerIdStr, [{blockRoot, columns: columnIndices}]); + return await network.sendDataColumnSidecarsByRoot(peerIdStr, [{blockRoot, columns: columnMeta.missing}]); +} + +export function validateColumnSidecar({ + config, + peerIdStr, + blockRoot, + columnSidecar, +}: Pick & { + columnSidecar: fulu.DataColumnSidecar; +}): void { + const headerRoot = config + .getForkTypes(columnSidecar.signedBlockHeader.message.slot) + .BeaconBlockHeader.hashTreeRoot(columnSidecar.signedBlockHeader.message); + if (byteArrayEquals(blockRoot, headerRoot)) { + throw new DownloadByRootError( + { + code: DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT, + peer: prettyPrintPeerIdStr(peerIdStr), + requestedBlockRoot: prettyBytes(blockRoot), + receivedBlockRoot: prettyBytes(toRootHex(headerRoot)), + }, + `columnSidecar.signedBlockHeader not match requested blockRoot for index=${columnSidecar.index}` + ); + } - for (const columnSidecar of columnSidecars) { - if (!columnIndices.includes(columnSidecar.index)) { + if (!verifyDataColumnSidecarInclusionProof(columnSidecar)) { + throw new DownloadByRootError({ + code: DownloadByRootErrorCode.INVALID_INCLUSION_PROOF, + peer: prettyPrintPeerIdStr(peerIdStr), + blockRoot: prettyBytes(blockRoot), + sidecarIndex: columnSidecar.index, + }); + } +} + +export async function validateColumnSidecars({ + config, + peerIdStr, + blockRoot, + columnMeta, + needed, + needToPublish = [], +}: Pick & { + needed: fulu.DataColumnSidecars; + needToPublish?: fulu.DataColumnSidecars; +}): Promise { + const requestedIndices = columnMeta.missing; + for (const columnSidecar of needed) { + if (!requestedIndices.includes(columnSidecar.index)) { throw new DownloadByRootError( { code: DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED, peer: prettyPrintPeerIdStr(peerIdStr), - blockRoot: prettyBytes(toRootHex(blockRoot)), + blockRoot: prettyBytes(blockRoot), invalidIndex: columnSidecar.index, }, "received a columnSidecar that was not requested" ); } - const headerRoot = config - .getForkTypes(columnSidecar.signedBlockHeader.message.slot) - .BeaconBlockHeader.hashTreeRoot(columnSidecar.signedBlockHeader.message); - if (byteArrayEquals(blockRoot, headerRoot)) { - throw new DownloadByRootError( - { - code: DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT, - peer: prettyPrintPeerIdStr(peerIdStr), - requestedBlockRoot: prettyBytes(toRootHex(blockRoot)), - receivedBlockRoot: prettyBytes(toRootHex(headerRoot)), - }, - `columnSidecar.signedBlockHeader not match requested blockRoot for index=${columnSidecar.index}` - ); - } + validateColumnSidecar({ + config, + peerIdStr, + blockRoot, + columnSidecar, + }); + } - if (!verifyDataColumnSidecarInclusionProof(columnSidecar)) { - throw new DownloadByRootError({ - code: DownloadByRootErrorCode.INVALID_INCLUSION_PROOF, - peer: prettyPrintPeerIdStr(peerIdStr), - blockRoot: prettyBytes(toRootHex(blockRoot)), - sidecarIndex: columnSidecar.index, + const checkedIndices = needed.map((c) => c.index); + const needToCheckProof: fulu.DataColumnSidecars = []; + for (const columnSidecar of needToPublish) { + if (!checkedIndices.includes(columnSidecar.index)) { + validateColumnSidecar({ + config, + peerIdStr, + blockRoot, + columnSidecar, }); + needToCheckProof.push(columnSidecar); } } + const columnSidecars = [...needed, ...needToCheckProof]; try { // TODO(fulu): need to double check that the construction of these arrays is correct await verifyDataColumnSidecarKzgProofs( @@ -390,9 +575,72 @@ export async function fetchAndValidateColumns({ throw new DownloadByRootError({ code: DownloadByRootErrorCode.INVALID_KZG_PROOF, peer: prettyPrintPeerIdStr(peerIdStr), - blockRoot: prettyBytes(toRootHex(blockRoot)), + blockRoot: prettyBytes(blockRoot), }); } +} + +export async function fetchAndValidateColumns({ + config, + network, + executionEngine, + forkName, + peerIdStr, + block, + blockRoot, + columnMeta, +}: FetchByRootAndValidateColumnsProps): Promise { + let columnSidecars = await fetchGetBlobsV2AndBuildSidecars({ + config, + executionEngine, + forkName, + block, + columnMeta, + }); + + if (columnSidecars.length) { + // limit reconstructed to only the ones we need + const needed = columnSidecars.filter((c) => columnMeta.missing.includes(c.index)); + // spec states that reconstructed sidecars need to be published to the network, but only requires + // publishing the ones that we custody and have not already been published. + const alreadyPublished = network.custodyConfig.custodyColumns.filter( + (index) => !columnMeta.missing.includes(index) + ); + const needToPublish = columnSidecars.filter( + (c) => network.custodyConfig.custodyColumns.includes(c.index) && !alreadyPublished.includes(c.index) + ); + // need to validate the ones we sample and will process + await validateColumnSidecars({ + config, + peerIdStr, + blockRoot, + columnMeta, + needed, + needToPublish, + }); + needToPublish.map((column) => + network.publishDataColumnSidecar(column).catch((err) => + network.logger.error( + "Error publishing column after getBlobsV2 reconstruct", + { + index: column.index, + blockRoot: prettyBytes(blockRoot), + }, + err + ) + ) + ); + return needed; + } + + columnSidecars = await network.sendDataColumnSidecarsByRoot(peerIdStr, [{blockRoot, columns: columnMeta.missing}]); + await validateColumnSidecars({ + config, + peerIdStr, + blockRoot, + columnMeta, + needed: columnSidecars, + }); return columnSidecars; } From 4c248a3a47a6249e240f3cd0e21ef70f99d488c8 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Fri, 22 Aug 2025 07:30:42 +0700 Subject: [PATCH 057/173] test: rough out unit tests for downloadByRoot --- .../unit/sync/utils/downloadByRoot.test.ts | 558 ++++++++++++++++++ 1 file changed, 558 insertions(+) create mode 100644 packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts new file mode 100644 index 000000000000..6c5cdf70557c --- /dev/null +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts @@ -0,0 +1,558 @@ +import {ForkName} from "@lodestar/params"; +import {SignedBeaconBlock, deneb, fulu, ssz} from "@lodestar/types"; +import {fromHex, prettyBytes, toRootHex} from "@lodestar/utils"; +import {Mock, beforeAll, beforeEach, describe, expect, it, vi} from "vitest"; +import {BlockInputSource, IBlockInput} from "../../../../src/chain/blocks/blockInput/types.js"; +import {ChainEventEmitter} from "../../../../src/chain/index.js"; +import {SeenBlockInput} from "../../../../src/chain/seenCache/seenGossipBlockInput.js"; +import {IExecutionEngine} from "../../../../src/execution/index.js"; +import {INetwork} from "../../../../src/network/index.js"; +import {BlockInputSyncCacheItem, PendingBlockInput, PendingBlockInputStatus} from "../../../../src/sync/types.js"; +import { + DownloadByRootError, + DownloadByRootErrorCode, + downloadByRoot, + fetchAndValidateBlobs, + fetchAndValidateBlock, + fetchAndValidateColumns, + fetchBlobByRoot, + fetchByRoot, + fetchColumnsByRoot, + fetchGetBlobsV1AndBuildSidecars, + fetchGetBlobsV2AndBuildSidecars, + validateBlobs, + validateColumnSidecar, + validateColumnSidecars, +} from "../../../../src/sync/utils/downloadByRoot.js"; +import {kzgCommitmentToVersionedHash} from "../../../../src/util/blobs.js"; +import {Clock} from "../../../../src/util/clock.js"; +import {getMockedLogger} from "../../../../test/mocks/loggerMock.js"; +import { + config, + custodyConfig, + generateBlockWithBlobSidecars, + generateBlockWithColumnSidecars, + generateChainOfBlocks, + slots, +} from "../../../utils/blocksAndData.js"; + +describe("downloadByRoot.ts", () => { + const peerIdStr = "0x1234567890abcdef"; + // let cache: SeenBlockInput; + // let network: INetwork; + // let executionEngine: IExecutionEngine; + const logger = getMockedLogger(); + + // Test data + // let capellaBlock: SignedBeaconBlock; + let denebBlockWithBlobs: ReturnType; + let fuluBlockWithColumns: ReturnType; + let blockRoot: Uint8Array; + // let rootHex: string; + + beforeAll(() => { + // Generate test blocks + // const capellaBlocks = generateChainOfBlocks({forkName: ForkName.capella, count: 1}); + // capellaBlock = capellaBlocks[0].block; + + denebBlockWithBlobs = generateBlockWithBlobSidecars({forkName: ForkName.deneb}); + fuluBlockWithColumns = generateBlockWithColumnSidecars({forkName: ForkName.fulu}); + + blockRoot = denebBlockWithBlobs.blockRoot; + // rootHex = denebBlockWithBlobs.rootHex; + }); + + beforeEach(() => { + // const abortController = new AbortController(); + // const signal = abortController.signal; + // cache = new SeenBlockInput({ + // config, + // custodyConfig, + // clock: new Clock({config, signal, genesisTime: Math.floor(Date.now() / 1000)}), + // chainEvents: new ChainEventEmitter(), + // signal, + // metrics: null, + // logger, + // }); + // network = { + // sendBeaconBlocksByRoot: vi.fn(), + // sendBlobSidecarsByRoot: vi.fn(), + // sendDataColumnSidecarsByRoot: vi.fn(), + // publishDataColumnSidecar: vi.fn(), + // custodyConfig, + // logger, + // } as unknown as INetwork; + // executionEngine = { + // getBlobs: vi.fn(), + // } as unknown as IExecutionEngine; + }); + + describe("downloadByRoot", () => { + it("should successfully download block with blobs for post-Deneb fork", () => { + // Test downloading a block with blob sidecars in post-Deneb fork + }); + + it("should successfully download block with columns for post-Fulu fork", () => { + // Test downloading a block with column sidecars in post-Fulu fork + }); + + it("should successfully download block without additional data for pre-Deneb fork", () => { + // Test downloading a simple block in pre-Deneb fork + }); + + it("should handle pending block input that already has block", () => { + // Test case where cacheItem is PendingBlockInput and already has the block + }); + + it("should handle pending block input that needs block and data", () => { + // Test case where cacheItem is PendingBlockInput but missing block and data + }); + + it("should handle non-pending cache item", () => { + // Test case where cacheItem is not PendingBlockInput + }); + + it("should throw error when blob sidecars are missing for blob input", () => { + // Test MISSING_BLOB_RESPONSE error + }); + + it("should throw error when column sidecars are missing for column input", () => { + // Test MISSING_COLUMN_RESPONSE error + }); + + it("should return downloaded status when block has all data", () => { + // Test status is set to downloaded when blockInput.hasBlockAndAllData() returns true + }); + + it("should return pending status when block is missing data", () => { + // Test status is set to pending when blockInput.hasBlockAndAllData() returns false + }); + }); + + describe("fetchByRoot", () => { + it("should fetch block and blobs for pending block input in post-Deneb fork", () => { + // Test fetching when cacheItem is PendingBlockInput and fork is post-Deneb + }); + + it("should fetch block and columns for pending block input in post-Fulu fork", () => { + // Test fetching when cacheItem is PendingBlockInput and fork is post-Fulu + }); + + it("should use existing block from pending block input", () => { + // Test when cacheItem.blockInput.hasBlock() returns true + }); + + it("should fetch new block when pending block input doesn't have block", () => { + // Test when cacheItem.blockInput.hasBlock() returns false + }); + + it("should skip data fetching when pending block input has all data", () => { + // Test when cacheItem.blockInput.hasAllData() returns true + }); + + it("should fetch blobs when pending block input is missing blob data", () => { + // Test blob fetching for incomplete blob input + }); + + it("should fetch columns when pending block input is missing column data", () => { + // Test column fetching for incomplete column input + }); + + it("should fetch block and blobs for non-pending cache item in post-Deneb fork", () => { + // Test fetching for non-PendingBlockInput in post-Deneb + }); + + it("should fetch block and columns for non-pending cache item in post-Fulu fork", () => { + // Test fetching for non-PendingBlockInput in post-Fulu + }); + + it("should fetch only block for non-pending cache item in pre-Deneb fork", () => { + // Test fetching for non-PendingBlockInput in pre-Deneb + }); + }); + + describe("fetchAndValidateBlock", () => { + it("should successfully fetch and validate block with matching root", () => { + // Test successful block fetch and validation + }); + + it("should throw error when no block is returned from network", () => { + // Test MISSING_BLOCK_RESPONSE error + }); + + it("should throw error when block root doesn't match requested root", () => { + // Test MISMATCH_BLOCK_ROOT error + }); + + it("should handle network request failure", () => { + // Test network failure scenarios + }); + }); + + describe("fetchAndValidateBlobs", () => { + it("should successfully fetch blobs from execution engine only", () => { + // Test when all blobs are available from execution engine + }); + + it("should fetch remaining blobs from network when execution engine is incomplete", () => { + // Test when some blobs are from execution engine, others from network + }); + + it("should fetch all blobs from network when execution engine returns none", () => { + // Test when execution engine returns no blobs + }); + + it("should validate all fetched blobs successfully", () => { + // Test successful blob validation + }); + + it("should throw error when blob validation fails", () => { + // Test blob validation failure scenarios + }); + }); + + describe("fetchGetBlobsV1AndBuildSidecars", () => { + it("should build blob sidecars from execution engine response", () => { + // Test successful sidecar building from execution engine blobs + }); + + it("should return empty array when execution engine returns no blobs", () => { + // Test when execution engine returns empty response + }); + + it("should handle partial blob response from execution engine", () => { + // Test when execution engine returns some but not all requested blobs + }); + + it("should correctly compute inclusion proofs for blob sidecars", () => { + // Test inclusion proof computation + }); + + it("should handle execution engine errors gracefully", () => { + // Test execution engine failure scenarios + }); + }); + + describe("fetchBlobByRoot", () => { + it("should fetch blob sidecars by root from network", () => { + // Test successful network blob fetch + }); + + it("should filter out blobs already in possession", () => { + // Test that only missing blobs are requested + }); + + it("should handle empty blob request when all blobs are in possession", () => { + // Test when indicesInPossession includes all needed blobs + }); + + it("should handle network request failure", () => { + // Test network failure scenarios + }); + }); + + describe("validateBlobs", () => { + it("should successfully validate all blob sidecars", () => { + // Test successful blob validation + }); + + it("should throw error for extra unrequested blob sidecar", () => { + // Test EXTRA_SIDECAR_RECEIVED error + }); + + it("should throw error for mismatched block root in blob header", () => { + // Test MISMATCH_BLOCK_ROOT error for blob sidecar + }); + + it("should throw error for invalid inclusion proof", () => { + // Test INVALID_INCLUSION_PROOF error + }); + + it("should throw error for invalid KZG proof", () => { + // Test INVALID_KZG_PROOF error + }); + + it("should validate multiple blob sidecars correctly", () => { + // Test validation of multiple blobs + }); + }); + + describe("fetchGetBlobsV2AndBuildSidecars", () => { + it("should build column sidecars from execution engine blobs", () => { + // Test successful column sidecar building + }); + + it("should return empty array when execution engine returns no response", () => { + // Test when execution engine returns null/undefined + }); + + it("should handle execution engine errors", () => { + // Test execution engine failure scenarios + }); + + it("should correctly process cells and proofs", () => { + // Test getCellsAndProofs processing + }); + }); + + describe("fetchColumnsByRoot", () => { + it("should fetch column sidecars by root from network", () => { + // Test successful network column fetch + }); + + it("should handle network request failure", () => { + // Test network failure scenarios + }); + + it("should request correct column indices", () => { + // Test that correct missing columns are requested + }); + }); + + // describe("validateColumnSidecar", () => { + // it("should successfully validate column sidecar", () => { + // const columnSidecar = fuluBlockWithColumns.columnSidecars[0]; + // const testBlockRoot = fuluBlockWithColumns.blockRoot; + + // // This should not throw + // expect(() => { + // validateColumnSidecar({ + // config, + // peerIdStr, + // blockRoot: testBlockRoot, + // columnSidecar, + // }); + // }).not.toThrow(); + // }); + + // it("should throw error for mismatched block root in column header", () => { + // const columnSidecar = fuluBlockWithColumns.columnSidecars[0]; + // const wrongBlockRoot = new Uint8Array(32).fill(1); // Different block root + + // expect(() => { + // validateColumnSidecar({ + // config, + // peerIdStr, + // blockRoot: wrongBlockRoot, + // columnSidecar, + // }); + // }).toThrow(DownloadByRootError); + + // try { + // validateColumnSidecar({ + // config, + // peerIdStr, + // blockRoot: wrongBlockRoot, + // columnSidecar, + // }); + // } catch (error) { + // expect(error).toBeInstanceOf(DownloadByRootError); + // expect((error as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT); + // expect((error as DownloadByRootError).type.peer).toBe(peerIdStr); + // expect((error as DownloadByRootError).type.requestedBlockRoot).toBe(prettyBytes(wrongBlockRoot)); + // } + // }); + + // it("should throw error for invalid inclusion proof", () => { + // const columnSidecar = ssz.fulu.DataColumnSidecar.clone(fuluBlockWithColumns.columnSidecars[0]); + // // Corrupt the inclusion proof to make it invalid + // columnSidecar.kzgCommitmentsInclusionProof[0] = new Uint8Array(32).fill(255); + + // expect(() => { + // validateColumnSidecar({ + // config, + // peerIdStr, + // blockRoot: fuluBlockWithColumns.blockRoot, + // columnSidecar, + // }); + // }).toThrow(DownloadByRootError); + + // try { + // validateColumnSidecar({ + // config, + // peerIdStr, + // blockRoot: fuluBlockWithColumns.blockRoot, + // columnSidecar, + // }); + // } catch (error) { + // expect(error).toBeInstanceOf(DownloadByRootError); + // expect((error as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.INVALID_INCLUSION_PROOF); + // expect((error as DownloadByRootError).type.peer).toBe(peerIdStr); + // expect((error as DownloadByRootError).type.blockRoot).toBe(prettyBytes(fuluBlockWithColumns.blockRoot)); + // expect((error as DownloadByRootError).type.sidecarIndex).toBe(columnSidecar.index); + // } + // }); + // }); + + describe("validateColumnSidecars", () => { + it("should successfully validate all needed column sidecars", () => { + // Test successful validation of needed columns + }); + + it("should successfully validate needed and publish columns", () => { + // Test validation with both needed and needToPublish columns + }); + + it("should throw error for extra unrequested column sidecar", () => { + // Test EXTRA_SIDECAR_RECEIVED error for columns + }); + + it("should throw error for invalid KZG proofs", () => { + // Test INVALID_KZG_PROOF error for columns + }); + + it("should validate individual column sidecars correctly", () => { + // Test individual column validation within the batch + }); + + it("should handle empty needToPublish array", () => { + // Test when needToPublish is empty or not provided + }); + + it("should avoid duplicate validation for columns in both arrays", () => { + // Test that columns present in both needed and needToPublish are not validated twice + }); + }); + + describe("fetchAndValidateColumns", () => { + it("should fetch columns from execution engine and validate", () => { + // Test successful fetch from execution engine + }); + + it("should fetch columns from network when execution engine returns empty", () => { + // Test fallback to network when execution engine fails + }); + + it("should publish reconstructed columns to network", () => { + // Test column publishing after reconstruction + }); + + it("should filter needed columns from reconstructed set", () => { + // Test that only needed columns are returned + }); + + it("should handle publishing errors gracefully", () => { + // Test that publishing errors don't fail the main operation + }); + + it("should validate columns correctly in both scenarios", () => { + // Test validation works for both execution engine and network paths + }); + + it("should determine correct columns to publish based on custody config", () => { + // Test needToPublish logic with custody configuration + }); + }); + + describe("DownloadByRootError", () => { + it("should create error with MISMATCH_BLOCK_ROOT code", () => { + const error = new DownloadByRootError({ + code: DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT, + peer: peerIdStr, + requestedBlockRoot: prettyBytes(blockRoot), + receivedBlockRoot: prettyBytes(new Uint8Array(32).fill(1)), + }); + + expect(error).toBeInstanceOf(DownloadByRootError); + expect(error.type.code).toBe(DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT); + expect(error.type.peer).toBe(peerIdStr); + expect(error.type.requestedBlockRoot).toBe(prettyBytes(blockRoot)); + expect(error.type.receivedBlockRoot).toBe(prettyBytes(new Uint8Array(32).fill(1))); + }); + + it("should create error with EXTRA_SIDECAR_RECEIVED code", () => { + const error = new DownloadByRootError({ + code: DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED, + peer: peerIdStr, + blockRoot: prettyBytes(blockRoot), + invalidIndex: 5, + }); + + expect(error).toBeInstanceOf(DownloadByRootError); + expect(error.type.code).toBe(DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED); + expect(error.type.peer).toBe(peerIdStr); + expect(error.type.blockRoot).toBe(prettyBytes(blockRoot)); + expect(error.type.invalidIndex).toBe(5); + }); + + it("should create error with INVALID_INCLUSION_PROOF code", () => { + const error = new DownloadByRootError({ + code: DownloadByRootErrorCode.INVALID_INCLUSION_PROOF, + peer: peerIdStr, + blockRoot: prettyBytes(blockRoot), + sidecarIndex: 2, + }); + + expect(error).toBeInstanceOf(DownloadByRootError); + expect(error.type.code).toBe(DownloadByRootErrorCode.INVALID_INCLUSION_PROOF); + expect(error.type.peer).toBe(peerIdStr); + expect(error.type.blockRoot).toBe(prettyBytes(blockRoot)); + expect(error.type.sidecarIndex).toBe(2); + }); + + it("should create error with INVALID_KZG_PROOF code", () => { + const error = new DownloadByRootError({ + code: DownloadByRootErrorCode.INVALID_KZG_PROOF, + peer: peerIdStr, + blockRoot: prettyBytes(blockRoot), + }); + + expect(error).toBeInstanceOf(DownloadByRootError); + expect(error.type.code).toBe(DownloadByRootErrorCode.INVALID_KZG_PROOF); + expect(error.type.peer).toBe(peerIdStr); + expect(error.type.blockRoot).toBe(prettyBytes(blockRoot)); + }); + + it("should create error with MISSING_BLOCK_RESPONSE code", () => { + const error = new DownloadByRootError({ + code: DownloadByRootErrorCode.MISSING_BLOCK_RESPONSE, + peer: peerIdStr, + blockRoot: prettyBytes(blockRoot), + }); + + expect(error).toBeInstanceOf(DownloadByRootError); + expect(error.type.code).toBe(DownloadByRootErrorCode.MISSING_BLOCK_RESPONSE); + expect(error.type.peer).toBe(peerIdStr); + expect(error.type.blockRoot).toBe(prettyBytes(blockRoot)); + }); + + it("should create error with MISSING_BLOB_RESPONSE code", () => { + const error = new DownloadByRootError({ + code: DownloadByRootErrorCode.MISSING_BLOB_RESPONSE, + peer: peerIdStr, + blockRoot: prettyBytes(blockRoot), + }); + + expect(error).toBeInstanceOf(DownloadByRootError); + expect(error.type.code).toBe(DownloadByRootErrorCode.MISSING_BLOB_RESPONSE); + expect(error.type.peer).toBe(peerIdStr); + expect(error.type.blockRoot).toBe(prettyBytes(blockRoot)); + }); + + it("should create error with MISSING_COLUMN_RESPONSE code", () => { + const error = new DownloadByRootError({ + code: DownloadByRootErrorCode.MISSING_COLUMN_RESPONSE, + peer: peerIdStr, + blockRoot: prettyBytes(blockRoot), + }); + + expect(error).toBeInstanceOf(DownloadByRootError); + expect(error.type.code).toBe(DownloadByRootErrorCode.MISSING_COLUMN_RESPONSE); + expect(error.type.peer).toBe(peerIdStr); + expect(error.type.blockRoot).toBe(prettyBytes(blockRoot)); + }); + + it("should include correct error details in error object", () => { + const errorData = { + code: DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT, + peer: peerIdStr, + requestedBlockRoot: prettyBytes(blockRoot), + receivedBlockRoot: prettyBytes(new Uint8Array(32).fill(1)), + }; + const error = new DownloadByRootError(errorData); + + expect(error.type).toEqual(errorData); + expect(Object.keys(error.type)).toEqual(Object.keys(errorData)); + }); + }); +}); From cbda219a0748246bb78d878b7d0298c28dd06933 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Mon, 25 Aug 2025 23:45:55 +0700 Subject: [PATCH 058/173] fix: debug blocksAndData test utils --- packages/beacon-node/src/util/blobs.ts | 8 +++++-- packages/beacon-node/src/util/dataColumns.ts | 4 ++-- .../test/spec/presets/fork_choice.test.ts | 8 +++++-- .../beacon-node/test/utils/blocksAndData.ts | 24 ++++++++++++------- 4 files changed, 29 insertions(+), 15 deletions(-) diff --git a/packages/beacon-node/src/util/blobs.ts b/packages/beacon-node/src/util/blobs.ts index 245771780135..d34a699c9cb5 100644 --- a/packages/beacon-node/src/util/blobs.ts +++ b/packages/beacon-node/src/util/blobs.ts @@ -25,7 +25,7 @@ export function kzgCommitmentToVersionedHash(kzgCommitment: deneb.KZGCommitment) return hash; } -export function computeInclusionProof( +export function computePreFuluKzgCommitmentsInclusionProof( fork: ForkName, body: BeaconBlockBody, index: number @@ -56,7 +56,11 @@ export function getBlobSidecars( return blobKzgCommitments.map((kzgCommitment, index) => { const blob = blobs[index]; const kzgProof = proofs[index]; - const kzgCommitmentInclusionProof = computeInclusionProof(fork, signedBlock.message.body, index); + const kzgCommitmentInclusionProof = computePreFuluKzgCommitmentsInclusionProof( + fork, + signedBlock.message.body, + index + ); return {index, blob, kzgCommitment, kzgProof, signedBlockHeader, kzgCommitmentInclusionProof}; }); diff --git a/packages/beacon-node/src/util/dataColumns.ts b/packages/beacon-node/src/util/dataColumns.ts index 42396d4e51f2..337f80af3ca6 100644 --- a/packages/beacon-node/src/util/dataColumns.ts +++ b/packages/beacon-node/src/util/dataColumns.ts @@ -217,7 +217,7 @@ export function getCustodyGroups(config: ChainForkConfig, nodeId: NodeId, custod return custodyGroups; } -export function computeKzgCommitmentsInclusionProof( +export function computePostFuluKzgCommitmentsInclusionProof( fork: ForkName, body: BeaconBlockBody ): fulu.KzgCommitmentsInclusionProof { @@ -302,7 +302,7 @@ export function getDataColumnSidecarsFromBlock( const fork = config.getForkName(signedBlock.message.slot); const signedBlockHeader = signedBlockToSignedHeader(config, signedBlock); - const kzgCommitmentsInclusionProof = computeKzgCommitmentsInclusionProof(fork, signedBlock.message.body); + const kzgCommitmentsInclusionProof = computePostFuluKzgCommitmentsInclusionProof(fork, signedBlock.message.body); return getDataColumnSidecars(signedBlockHeader, blobKzgCommitments, kzgCommitmentsInclusionProof, cellsAndKzgProofs); } diff --git a/packages/beacon-node/test/spec/presets/fork_choice.test.ts b/packages/beacon-node/test/spec/presets/fork_choice.test.ts index 21293221ed06..7d4a16d04efc 100644 --- a/packages/beacon-node/test/spec/presets/fork_choice.test.ts +++ b/packages/beacon-node/test/spec/presets/fork_choice.test.ts @@ -40,7 +40,7 @@ import {PowMergeBlock} from "../../../src/eth1/interface.js"; import {ExecutionPayloadStatus} from "../../../src/execution/engine/interface.js"; import {ExecutionEngineMockBackend} from "../../../src/execution/engine/mock.js"; import {getExecutionEngineFromBackend} from "../../../src/execution/index.js"; -import {computeInclusionProof} from "../../../src/util/blobs.js"; +import {computePreFuluKzgCommitmentsInclusionProof} from "../../../src/util/blobs.js"; import {ClockEvent} from "../../../src/util/clock.js"; import {ClockStopped} from "../../mocks/clock.js"; import {getMockedBeaconDb} from "../../mocks/mockedBeaconDb.js"; @@ -276,7 +276,11 @@ const forkChoiceTest = kzgCommitment: commitments[index], kzgProof: (proofs ?? [])[index], signedBlockHeader: signedBlockToSignedHeader(config, signedBlock), - kzgCommitmentInclusionProof: computeInclusionProof(fork, signedBlock.message.body, index), + kzgCommitmentInclusionProof: computePreFuluKzgCommitmentsInclusionProof( + fork, + signedBlock.message.body, + index + ), }; }); diff --git a/packages/beacon-node/test/utils/blocksAndData.ts b/packages/beacon-node/test/utils/blocksAndData.ts index c77b4a56d957..cb6e357b0848 100644 --- a/packages/beacon-node/test/utils/blocksAndData.ts +++ b/packages/beacon-node/test/utils/blocksAndData.ts @@ -15,8 +15,12 @@ import {computeStartSlotAtEpoch, signedBlockToSignedHeader} from "@lodestar/stat import {SignedBeaconBlock, Slot, deneb, fulu, ssz} from "@lodestar/types"; import {toRootHex} from "@lodestar/utils"; import {computeNodeIdFromPrivateKey} from "../../src/network/subnets/index.js"; -import {computeInclusionProof, computeKzgCommitmentsInclusionProof} from "../../src/util/blobs.js"; -import {CustodyConfig, getDataColumnSidecarsFromBlock} from "../../src/util/dataColumns.js"; +import {computePreFuluKzgCommitmentsInclusionProof} from "../../src/util/blobs.js"; +import { + CustodyConfig, + computePostFuluKzgCommitmentsInclusionProof, + getDataColumnSidecarsFromBlock, +} from "../../src/util/dataColumns.js"; import {kzg} from "../../src/util/kzg.js"; import {ROOT_SIZE} from "../../src/util/sszBytes.js"; @@ -71,7 +75,7 @@ function generateRandomInt(min: number, max: number): number { return Math.floor(Math.random() * (max - min + 1)) + min; } function generateProposerIndex(min = 0, max = 100_000): number { - return generateRandomInt(max, min); + return generateRandomInt(min, max); } function generateBeaconBlock({ @@ -122,9 +126,13 @@ function generateBlobSidecars( const blobSidecar = ssz[forkName].BlobSidecar.defaultValue(); blobSidecar.index = index; blobSidecar.signedBlockHeader = signedBlockHeader; - blobSidecar.blob = generateRandomBlob(index); + blobSidecar.blob = generateRandomBlob(); blobSidecar.kzgCommitment = kzg.blobToKzgCommitment(blobSidecar.blob); - blobSidecar.kzgCommitmentInclusionProof = computeInclusionProof(forkName, block.message.body, index); + blobSidecar.kzgCommitmentInclusionProof = computePreFuluKzgCommitmentsInclusionProof( + forkName, + block.message.body, + index + ); blobSidecar.kzgProof = kzg.computeBlobKzgProof(blobSidecar.blob, blobSidecar.kzgCommitment); if (oomProtection) { @@ -160,9 +168,7 @@ function generateColumnSidecars( const signedBlockHeader = signedBlockToSignedHeader(config, block); const cellsAndProofs = blobs.map((blob) => kzg.computeCellsAndKzgProofs(blob)); - const kzgCommitmentsInclusionProof = Array.from({length: blobs.length}, () => - computeKzgCommitmentsInclusionProof(forkName, block.body) - ); + const kzgCommitmentsInclusionProof = computePostFuluKzgCommitmentsInclusionProof(forkName, block.message.body); const columnSidecars = Array.from({length: NUMBER_OF_COLUMNS}, (_, columnIndex) => { const column = oomProtection @@ -204,7 +210,7 @@ export function generateChainOfBlocks({ for (; slot < slot + count; slot++) { const block = generateBeaconBlock({forkName, parentRoot, slot}); const {blockRoot, rootHex} = generateRoots(forkName, block); - parentRoot = block.message.parentRoot; + parentRoot = blockRoot; blocks.push({ block, blockRoot, From 93222b59b7583a9d0a81d069104dda4a9fbb7b31 Mon Sep 17 00:00:00 2001 From: Cayman Date: Mon, 25 Aug 2025 17:20:28 -0400 Subject: [PATCH 059/173] chore: fix some check-type errors --- packages/beacon-node/src/sync/interface.ts | 2 +- packages/beacon-node/src/sync/range/batch.ts | 4 ++-- packages/beacon-node/src/sync/range/chain.ts | 3 +-- packages/beacon-node/src/sync/range/range.ts | 2 -- .../beacon-node/src/sync/utils/downloadByRange.ts | 2 +- .../beacon-node/src/sync/utils/downloadByRoot.ts | 14 ++------------ 6 files changed, 7 insertions(+), 20 deletions(-) diff --git a/packages/beacon-node/src/sync/interface.ts b/packages/beacon-node/src/sync/interface.ts index f9e544fe530f..546d457eb618 100644 --- a/packages/beacon-node/src/sync/interface.ts +++ b/packages/beacon-node/src/sync/interface.ts @@ -1,6 +1,6 @@ import {routes} from "@lodestar/api"; import {BeaconConfig} from "@lodestar/config"; -import {RootHex, Slot, phase0} from "@lodestar/types"; +import {Slot, phase0} from "@lodestar/types"; import {Logger} from "@lodestar/utils"; import {IBeaconChain} from "../chain/index.js"; import {IBeaconDb} from "../db/index.js"; diff --git a/packages/beacon-node/src/sync/range/batch.ts b/packages/beacon-node/src/sync/range/batch.ts index e697f8907762..b990079edbee 100644 --- a/packages/beacon-node/src/sync/range/batch.ts +++ b/packages/beacon-node/src/sync/range/batch.ts @@ -1,6 +1,6 @@ import {ChainForkConfig} from "@lodestar/config"; -import {ForkName, ForkSeq, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; -import {Epoch, RootHex, Slot, deneb, fulu, phase0} from "@lodestar/types"; +import {ForkName, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; +import {Epoch, RootHex, Slot, phase0} from "@lodestar/types"; import {LodestarError} from "@lodestar/utils"; import {isBlockInputColumns} from "../../chain/blocks/blockInput/blockInput.js"; import {IBlockInput} from "../../chain/blocks/blockInput/types.js"; diff --git a/packages/beacon-node/src/sync/range/chain.ts b/packages/beacon-node/src/sync/range/chain.ts index 987e6e4692ef..78f7a65d5b47 100644 --- a/packages/beacon-node/src/sync/range/chain.ts +++ b/packages/beacon-node/src/sync/range/chain.ts @@ -1,6 +1,5 @@ import {ChainForkConfig} from "@lodestar/config"; -import {ForkName, isForkPostFulu} from "@lodestar/params"; -import {Epoch, Root, Slot, phase0} from "@lodestar/types"; +import {Epoch, Root, Slot} from "@lodestar/types"; import {ErrorAborted, Logger, toRootHex} from "@lodestar/utils"; import {isBlockInputBlobs, isBlockInputColumns} from "../../chain/blocks/blockInput/blockInput.js"; import {IBlockInput} from "../../chain/blocks/blockInput/types.js"; diff --git a/packages/beacon-node/src/sync/range/range.ts b/packages/beacon-node/src/sync/range/range.ts index e4640e5b9f08..0335720bed07 100644 --- a/packages/beacon-node/src/sync/range/range.ts +++ b/packages/beacon-node/src/sync/range/range.ts @@ -4,7 +4,6 @@ import {computeStartSlotAtEpoch} from "@lodestar/state-transition"; import {Epoch, Status, fulu} from "@lodestar/types"; import {Logger, toRootHex} from "@lodestar/utils"; import {StrictEventEmitter} from "strict-event-emitter-types"; -import {BlockInputSource} from "../../chain/blocks/blockInput/types.js"; import {isDaOutOfRange} from "../../chain/blocks/blockInput/utils.js"; import {AttestationImportOpt, ImportBlockOpts} from "../../chain/blocks/index.js"; import {IBeaconChain} from "../../chain/index.js"; @@ -13,7 +12,6 @@ import {INetwork} from "../../network/index.js"; import {PeerIdStr} from "../../util/peerId.js"; import {cacheByRangeResponses, downloadByRange} from "../utils/downloadByRange.js"; import {RangeSyncType, getRangeSyncTarget, rangeSyncTypes} from "../utils/remoteSyncType.js"; -import {BatchStateAwaitingDownload} from "./batch.js"; import {ChainTarget, SyncChain, SyncChainDebugState, SyncChainFns} from "./chain.js"; import {updateChains} from "./utils/index.js"; diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index 84926581763c..8c931c2ebb75 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -1,6 +1,6 @@ import {ChainForkConfig} from "@lodestar/config"; import {ForkPostDeneb, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; -import {RootHex, SignedBeaconBlock, Slot, deneb, fulu, phase0} from "@lodestar/types"; +import {SignedBeaconBlock, Slot, deneb, fulu, phase0} from "@lodestar/types"; import {LodestarError, Logger, prettyBytes, prettyPrintIndices, toRootHex} from "@lodestar/utils"; import { BlockInputSource, diff --git a/packages/beacon-node/src/sync/utils/downloadByRoot.ts b/packages/beacon-node/src/sync/utils/downloadByRoot.ts index 99fe469ad22f..94264306df14 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRoot.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRoot.ts @@ -1,15 +1,7 @@ import {ChainForkConfig} from "@lodestar/config"; -import { - ForkName, - ForkPostDeneb, - ForkPostFulu, - ForkPreFulu, - NUMBER_OF_COLUMNS, - isForkPostDeneb, - isForkPostFulu, -} from "@lodestar/params"; +import {ForkPostDeneb, ForkPostFulu, ForkPreFulu, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; import {signedBlockToSignedHeader} from "@lodestar/state-transition"; -import {BlobIndex, ColumnIndex, RootHex, SignedBeaconBlock, deneb, fulu, phase0} from "@lodestar/types"; +import {SignedBeaconBlock, deneb, fulu} from "@lodestar/types"; import {LodestarError, fromHex, prettyBytes, toRootHex} from "@lodestar/utils"; import {isBlockInputBlobs, isBlockInputColumns} from "../../chain/blocks/blockInput/blockInput.js"; import {BlobMeta, BlockInputSource, IBlockInput, MissingColumnMeta} from "../../chain/blocks/blockInput/types.js"; @@ -25,9 +17,7 @@ import {prettyPrintPeerIdStr} from "../../network/util.js"; import {computeInclusionProof, kzgCommitmentToVersionedHash} from "../../util/blobs.js"; import {byteArrayEquals} from "../../util/bytes.js"; import {getCellsAndProofs, getDataColumnSidecarsFromBlock} from "../../util/dataColumns.js"; -import {kzg} from "../../util/kzg.js"; import {PeerIdStr} from "../../util/peerId.js"; -import {BlobSidecarsByRootRequest} from "../../util/types.js"; import { BlockInputSyncCacheItem, PendingBlockInput, From 6603a16afea540c1e76265aae81f95fca902e6ba Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 26 Aug 2025 19:20:06 +0700 Subject: [PATCH 060/173] test: partial testing of downloadByRoot with code changes from bugs found --- .../src/sync/utils/downloadByRoot.ts | 293 ++--- .../unit/sync/utils/downloadByRoot.test.ts | 1013 ++++++++++++----- .../beacon-node/test/utils/blocksAndData.ts | 76 +- 3 files changed, 939 insertions(+), 443 deletions(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRoot.ts b/packages/beacon-node/src/sync/utils/downloadByRoot.ts index 99fe469ad22f..bcc970dd59c0 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRoot.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRoot.ts @@ -22,7 +22,7 @@ import { import {IExecutionEngine} from "../../execution/index.js"; import {INetwork} from "../../network/interface.js"; import {prettyPrintPeerIdStr} from "../../network/util.js"; -import {computeInclusionProof, kzgCommitmentToVersionedHash} from "../../util/blobs.js"; +import {computePreFuluKzgCommitmentsInclusionProof, kzgCommitmentToVersionedHash} from "../../util/blobs.js"; import {byteArrayEquals} from "../../util/bytes.js"; import {getCellsAndProofs, getDataColumnSidecarsFromBlock} from "../../util/dataColumns.js"; import {kzg} from "../../util/kzg.js"; @@ -320,16 +320,18 @@ export async function fetchAndValidateBlobs({ // not all needed blobs were fetched via getBlobs, need to use ReqResp if (blobSidecars.length !== blobMeta.length) { - const networkResponse = await fetchBlobByRoot({ + const networkResponse = await fetchBlobsByRoot({ network, peerIdStr, - blockRoot, blobMeta, indicesInPossession: blobSidecars.map((b) => b.index), }); blobSidecars.push(...networkResponse); } + // responses can be sparse for both types of requests to sort to make sure its in sequential order + blobSidecars.sort((a, b) => a.index - b.index); + await validateBlobs({config, peerIdStr, blockRoot, blobMeta, blobSidecars}); return blobSidecars; @@ -352,42 +354,46 @@ export async function fetchGetBlobsV1AndBuildSidecars({ blobMeta.map(({versionedHash: versionHash}) => versionHash) ); - if (enginedResponse.length > 0) { - // response.length should always match blobMeta.length and they should be in the same order - for (let i = 0; i < blobMeta.length; i++) { - const blobAndProof = enginedResponse[i]; - if (blobAndProof) { - const {blob, proof} = blobAndProof; - const index = blobMeta[i].index; - const kzgCommitment = block.message.body.blobKzgCommitments[i]; - const sidecar: deneb.BlobSidecar = { - index, - blob, - kzgProof: proof, - kzgCommitment, - kzgCommitmentInclusionProof: computeInclusionProof(forkName, block.message.body, index), - signedBlockHeader: signedBlockToSignedHeader(config, block), - }; - blobSidecars.push(sidecar); - } + if (!enginedResponse.length) { + return blobSidecars; + } + + // response.length should always match blobMeta.length and they should be in the same order + for (let i = 0; i < blobMeta.length; i++) { + const blobAndProof = enginedResponse[i]; + if (blobAndProof) { + const {blob, proof} = blobAndProof; + const index = blobMeta[i].index; + const kzgCommitment = block.message.body.blobKzgCommitments[i]; + const sidecar: deneb.BlobSidecar = { + index, + blob, + kzgProof: proof, + kzgCommitment, + kzgCommitmentInclusionProof: computePreFuluKzgCommitmentsInclusionProof(forkName, block.message.body, index), + signedBlockHeader: signedBlockToSignedHeader(config, block), + }; + blobSidecars.push(sidecar); } } return blobSidecars; } -export async function fetchBlobByRoot({ +export async function fetchBlobsByRoot({ network, peerIdStr, - blockRoot, blobMeta, - indicesInPossession, -}: Pick & { - indicesInPossession: number[]; + indicesInPossession = [], +}: Pick & { + indicesInPossession?: number[]; }): Promise { const blobsRequest = blobMeta .filter(({index}) => !indicesInPossession.includes(index)) - .map(({index}) => ({blockRoot, index})); + .map(({blockRoot, index}) => ({blockRoot, index})); + if (!blobsRequest.length) { + return []; + } return await network.sendBlobSidecarsByRoot(peerIdStr, blobsRequest); } @@ -417,25 +423,28 @@ export async function validateBlobs({ const headerRoot = config .getForkTypes(blobSidecar.signedBlockHeader.message.slot) .BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message); - if (byteArrayEquals(blockRoot, headerRoot)) { + if (!byteArrayEquals(blockRoot, headerRoot)) { throw new DownloadByRootError( { code: DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT, peer: prettyPrintPeerIdStr(peerIdStr), requestedBlockRoot: prettyBytes(blockRoot), - receivedBlockRoot: prettyBytes(toRootHex(headerRoot)), + receivedBlockRoot: prettyBytes(headerRoot), }, - `blobSidecar.signedBlockHeader not match requested blockRoot for index=${blobSidecar.index}` + `blobSidecar header root did not match requested blockRoot for index=${blobSidecar.index}` ); } if (!validateBlobSidecarInclusionProof(blobSidecar)) { - throw new DownloadByRootError({ - code: DownloadByRootErrorCode.INVALID_INCLUSION_PROOF, - peer: prettyPrintPeerIdStr(peerIdStr), - blockRoot: prettyBytes(blockRoot), - sidecarIndex: blobSidecar.index, - }); + throw new DownloadByRootError( + { + code: DownloadByRootErrorCode.INVALID_INCLUSION_PROOF, + peer: prettyPrintPeerIdStr(peerIdStr), + blockRoot: prettyBytes(blockRoot), + sidecarIndex: blobSidecar.index, + }, + `invalid inclusion proof for blobSidecar at index=${blobSidecar.index}` + ); } } @@ -454,6 +463,80 @@ export async function validateBlobs({ } } +export async function fetchAndValidateColumns({ + config, + network, + executionEngine, + forkName, + peerIdStr, + block, + blockRoot, + columnMeta, +}: FetchByRootAndValidateColumnsProps): Promise { + let columnSidecars: fulu.DataColumnSidecars; + try { + columnSidecars = await fetchGetBlobsV2AndBuildSidecars({ + config, + executionEngine, + forkName, + block, + columnMeta, + }); + } catch (err) { + network.logger.error( + `error building columnSidecars for blockRoot=${prettyBytes(blockRoot)} via getBlobsV2`, + {}, + err + ); + } + + if (columnSidecars.length) { + // limit reconstructed to only the ones we need + const needed = columnSidecars.filter((c) => columnMeta.missing.includes(c.index)); + // spec states that reconstructed sidecars need to be published to the network, but only requires + // publishing the ones that we custody and have not already been published. + const alreadyPublished = network.custodyConfig.custodyColumns.filter( + (index) => !columnMeta.missing.includes(index) + ); + const needToPublish = columnSidecars.filter( + (c) => network.custodyConfig.custodyColumns.includes(c.index) && !alreadyPublished.includes(c.index) + ); + // need to validate the ones we sample and will process + await validateColumnSidecars({ + config, + peerIdStr, + blockRoot, + columnMeta, + needed, + needToPublish, + }); + needToPublish.map((column) => + network.publishDataColumnSidecar(column).catch((err) => + network.logger.error( + "Error publishing column after getBlobsV2 reconstruct", + { + index: column.index, + blockRoot: prettyBytes(blockRoot), + }, + err + ) + ) + ); + return needed; + } + + columnSidecars = await network.sendDataColumnSidecarsByRoot(peerIdStr, [{blockRoot, columns: columnMeta.missing}]); + await validateColumnSidecars({ + config, + peerIdStr, + blockRoot, + columnMeta, + needed: columnSidecars, + }); + + return columnSidecars; +} + export async function fetchGetBlobsV2AndBuildSidecars({ config, executionEngine, @@ -478,22 +561,24 @@ export async function fetchColumnsByRoot({ peerIdStr, blockRoot, columnMeta, -}: FetchByRootAndValidateColumnsProps): Promise { +}: Pick< + FetchByRootAndValidateColumnsProps, + "network" | "peerIdStr" | "blockRoot" | "columnMeta" +>): Promise { return await network.sendDataColumnSidecarsByRoot(peerIdStr, [{blockRoot, columns: columnMeta.missing}]); } -export function validateColumnSidecar({ - config, - peerIdStr, - blockRoot, - columnSidecar, -}: Pick & { +export type ValidateColumnSidecarProps = Pick< + FetchByRootAndValidateColumnsProps, + "config" | "peerIdStr" | "blockRoot" +> & { columnSidecar: fulu.DataColumnSidecar; -}): void { +}; +export function validateColumnSidecar({config, peerIdStr, blockRoot, columnSidecar}: ValidateColumnSidecarProps): void { const headerRoot = config .getForkTypes(columnSidecar.signedBlockHeader.message.slot) .BeaconBlockHeader.hashTreeRoot(columnSidecar.signedBlockHeader.message); - if (byteArrayEquals(blockRoot, headerRoot)) { + if (!byteArrayEquals(blockRoot, headerRoot)) { throw new DownloadByRootError( { code: DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT, @@ -515,17 +600,24 @@ export function validateColumnSidecar({ } } +export type ValidateColumnSidecarsProps = Pick< + FetchByRootAndValidateColumnsProps, + "config" | "peerIdStr" | "blockRoot" | "columnMeta" +> & { + needed?: fulu.DataColumnSidecars; + needToPublish?: fulu.DataColumnSidecars; + /* should only be used for testing purposes */ + validateFn?: (props: ValidateColumnSidecarProps) => void; +}; export async function validateColumnSidecars({ config, peerIdStr, blockRoot, columnMeta, - needed, + needed = [], needToPublish = [], -}: Pick & { - needed: fulu.DataColumnSidecars; - needToPublish?: fulu.DataColumnSidecars; -}): Promise { + validateFn = validateColumnSidecar, +}: ValidateColumnSidecarsProps): Promise { const requestedIndices = columnMeta.missing; for (const columnSidecar of needed) { if (!requestedIndices.includes(columnSidecar.index)) { @@ -536,28 +628,38 @@ export async function validateColumnSidecars({ blockRoot: prettyBytes(blockRoot), invalidIndex: columnSidecar.index, }, - "received a columnSidecar that was not requested" + "Received a columnSidecar that was not requested" ); } - validateColumnSidecar({ - config, - peerIdStr, - blockRoot, - columnSidecar, - }); + try { + validateFn({ + config, + peerIdStr, + blockRoot, + columnSidecar, + }); + } catch (err) { + err.message = `Error validating needed columnSidecar index=${columnSidecar.index}. Validation error: ${err.message}`; + throw err; + } } const checkedIndices = needed.map((c) => c.index); const needToCheckProof: fulu.DataColumnSidecars = []; for (const columnSidecar of needToPublish) { if (!checkedIndices.includes(columnSidecar.index)) { - validateColumnSidecar({ - config, - peerIdStr, - blockRoot, - columnSidecar, - }); + try { + validateFn({ + config, + peerIdStr, + blockRoot, + columnSidecar, + }); + } catch (err) { + err.message = `Error validating needToPublish columnSidecar index=${columnSidecar.index}. Validation error: ${err.message}`; + throw err; + } needToCheckProof.push(columnSidecar); } } @@ -580,71 +682,6 @@ export async function validateColumnSidecars({ } } -export async function fetchAndValidateColumns({ - config, - network, - executionEngine, - forkName, - peerIdStr, - block, - blockRoot, - columnMeta, -}: FetchByRootAndValidateColumnsProps): Promise { - let columnSidecars = await fetchGetBlobsV2AndBuildSidecars({ - config, - executionEngine, - forkName, - block, - columnMeta, - }); - - if (columnSidecars.length) { - // limit reconstructed to only the ones we need - const needed = columnSidecars.filter((c) => columnMeta.missing.includes(c.index)); - // spec states that reconstructed sidecars need to be published to the network, but only requires - // publishing the ones that we custody and have not already been published. - const alreadyPublished = network.custodyConfig.custodyColumns.filter( - (index) => !columnMeta.missing.includes(index) - ); - const needToPublish = columnSidecars.filter( - (c) => network.custodyConfig.custodyColumns.includes(c.index) && !alreadyPublished.includes(c.index) - ); - // need to validate the ones we sample and will process - await validateColumnSidecars({ - config, - peerIdStr, - blockRoot, - columnMeta, - needed, - needToPublish, - }); - needToPublish.map((column) => - network.publishDataColumnSidecar(column).catch((err) => - network.logger.error( - "Error publishing column after getBlobsV2 reconstruct", - { - index: column.index, - blockRoot: prettyBytes(blockRoot), - }, - err - ) - ) - ); - return needed; - } - - columnSidecars = await network.sendDataColumnSidecarsByRoot(peerIdStr, [{blockRoot, columns: columnMeta.missing}]); - await validateColumnSidecars({ - config, - peerIdStr, - blockRoot, - columnMeta, - needed: columnSidecars, - }); - - return columnSidecars; -} - export enum DownloadByRootErrorCode { MISMATCH_BLOCK_ROOT = "DOWNLOAD_BY_ROOT_ERROR_MISMATCH_BLOCK_ROOT", EXTRA_SIDECAR_RECEIVED = "DOWNLOAD_BY_ROOT_ERROR_EXTRA_SIDECAR_RECEIVED", diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts index 6c5cdf70557c..14fa58a4c2b3 100644 --- a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts @@ -1,21 +1,29 @@ -import {ForkName} from "@lodestar/params"; +import {randomBytes} from "node:crypto"; +import {BYTES_PER_CELL, BYTES_PER_COMMITMENT, BYTES_PER_PROOF} from "@crate-crypto/node-eth-kzg"; +import {ForkName, NUMBER_OF_COLUMNS} from "@lodestar/params"; import {SignedBeaconBlock, deneb, fulu, ssz} from "@lodestar/types"; import {fromHex, prettyBytes, toRootHex} from "@lodestar/utils"; -import {Mock, beforeAll, beforeEach, describe, expect, it, vi} from "vitest"; -import {BlockInputSource, IBlockInput} from "../../../../src/chain/blocks/blockInput/types.js"; +import {Mock, afterAll, afterEach, beforeAll, beforeEach, describe, expect, it, vi} from "vitest"; +import { + BlobMeta, + BlockInputSource, + IBlockInput, + MissingColumnMeta, +} from "../../../../src/chain/blocks/blockInput/types.js"; import {ChainEventEmitter} from "../../../../src/chain/index.js"; import {SeenBlockInput} from "../../../../src/chain/seenCache/seenGossipBlockInput.js"; import {IExecutionEngine} from "../../../../src/execution/index.js"; -import {INetwork} from "../../../../src/network/index.js"; +import {INetwork, prettyPrintPeerIdStr} from "../../../../src/network/index.js"; import {BlockInputSyncCacheItem, PendingBlockInput, PendingBlockInputStatus} from "../../../../src/sync/types.js"; import { DownloadByRootError, DownloadByRootErrorCode, + ValidateColumnSidecarsProps, downloadByRoot, fetchAndValidateBlobs, fetchAndValidateBlock, fetchAndValidateColumns, - fetchBlobByRoot, + fetchBlobsByRoot, fetchByRoot, fetchColumnsByRoot, fetchGetBlobsV1AndBuildSidecars, @@ -26,6 +34,8 @@ import { } from "../../../../src/sync/utils/downloadByRoot.js"; import {kzgCommitmentToVersionedHash} from "../../../../src/util/blobs.js"; import {Clock} from "../../../../src/util/clock.js"; +import {kzg} from "../../../../src/util/kzg.js"; +import {ROOT_SIZE} from "../../../../src/util/sszBytes.js"; import {getMockedLogger} from "../../../../test/mocks/loggerMock.js"; import { config, @@ -37,28 +47,27 @@ import { } from "../../../utils/blocksAndData.js"; describe("downloadByRoot.ts", () => { - const peerIdStr = "0x1234567890abcdef"; + const peerIdStr = "1234567890abcdef1234567890abcdef"; + const prettyPeerIdStr = prettyPrintPeerIdStr(peerIdStr); + let network: INetwork; // let cache: SeenBlockInput; - // let network: INetwork; - // let executionEngine: IExecutionEngine; + let executionEngine: IExecutionEngine; const logger = getMockedLogger(); // Test data // let capellaBlock: SignedBeaconBlock; - let denebBlockWithBlobs: ReturnType; - let fuluBlockWithColumns: ReturnType; - let blockRoot: Uint8Array; + // let denebBlockWithBlobs: ReturnType; + // let fuluBlockWithColumns: ReturnType; + // let blockRoot: Uint8Array; // let rootHex: string; beforeAll(() => { // Generate test blocks // const capellaBlocks = generateChainOfBlocks({forkName: ForkName.capella, count: 1}); // capellaBlock = capellaBlocks[0].block; - - denebBlockWithBlobs = generateBlockWithBlobSidecars({forkName: ForkName.deneb}); - fuluBlockWithColumns = generateBlockWithColumnSidecars({forkName: ForkName.fulu}); - - blockRoot = denebBlockWithBlobs.blockRoot; + // denebBlockWithBlobs = generateBlockWithBlobSidecars({forkName: ForkName.deneb}); + // fuluBlockWithColumns = generateBlockWithColumnSidecars({forkName: ForkName.fulu}); + // blockRoot = denebBlockWithBlobs.blockRoot; // rootHex = denebBlockWithBlobs.rootHex; }); @@ -87,129 +96,129 @@ describe("downloadByRoot.ts", () => { // } as unknown as IExecutionEngine; }); - describe("downloadByRoot", () => { - it("should successfully download block with blobs for post-Deneb fork", () => { - // Test downloading a block with blob sidecars in post-Deneb fork - }); + // describe("downloadByRoot", () => { + // it("should successfully download block with blobs for post-Deneb fork", () => { + // // Test downloading a block with blob sidecars in post-Deneb fork + // }); - it("should successfully download block with columns for post-Fulu fork", () => { - // Test downloading a block with column sidecars in post-Fulu fork - }); + // it("should successfully download block with columns for post-Fulu fork", () => { + // // Test downloading a block with column sidecars in post-Fulu fork + // }); - it("should successfully download block without additional data for pre-Deneb fork", () => { - // Test downloading a simple block in pre-Deneb fork - }); + // it("should successfully download block without additional data for pre-Deneb fork", () => { + // // Test downloading a simple block in pre-Deneb fork + // }); - it("should handle pending block input that already has block", () => { - // Test case where cacheItem is PendingBlockInput and already has the block - }); + // it("should handle pending block input that already has block", () => { + // // Test case where cacheItem is PendingBlockInput and already has the block + // }); - it("should handle pending block input that needs block and data", () => { - // Test case where cacheItem is PendingBlockInput but missing block and data - }); + // it("should handle pending block input that needs block and data", () => { + // // Test case where cacheItem is PendingBlockInput but missing block and data + // }); - it("should handle non-pending cache item", () => { - // Test case where cacheItem is not PendingBlockInput - }); + // it("should handle non-pending cache item", () => { + // // Test case where cacheItem is not PendingBlockInput + // }); - it("should throw error when blob sidecars are missing for blob input", () => { - // Test MISSING_BLOB_RESPONSE error - }); + // it("should throw error when blob sidecars are missing for blob input", () => { + // // Test MISSING_BLOB_RESPONSE error + // }); - it("should throw error when column sidecars are missing for column input", () => { - // Test MISSING_COLUMN_RESPONSE error - }); + // it("should throw error when column sidecars are missing for column input", () => { + // // Test MISSING_COLUMN_RESPONSE error + // }); - it("should return downloaded status when block has all data", () => { - // Test status is set to downloaded when blockInput.hasBlockAndAllData() returns true - }); + // it("should return downloaded status when block has all data", () => { + // // Test status is set to downloaded when blockInput.hasBlockAndAllData() returns true + // }); - it("should return pending status when block is missing data", () => { - // Test status is set to pending when blockInput.hasBlockAndAllData() returns false - }); - }); + // it("should return pending status when block is missing data", () => { + // // Test status is set to pending when blockInput.hasBlockAndAllData() returns false + // }); + // }); - describe("fetchByRoot", () => { - it("should fetch block and blobs for pending block input in post-Deneb fork", () => { - // Test fetching when cacheItem is PendingBlockInput and fork is post-Deneb - }); + // describe("fetchByRoot", () => { + // it("should fetch block and blobs for pending block input in post-Deneb fork", () => { + // // Test fetching when cacheItem is PendingBlockInput and fork is post-Deneb + // }); - it("should fetch block and columns for pending block input in post-Fulu fork", () => { - // Test fetching when cacheItem is PendingBlockInput and fork is post-Fulu - }); + // it("should fetch block and columns for pending block input in post-Fulu fork", () => { + // // Test fetching when cacheItem is PendingBlockInput and fork is post-Fulu + // }); - it("should use existing block from pending block input", () => { - // Test when cacheItem.blockInput.hasBlock() returns true - }); + // it("should use existing block from pending block input", () => { + // // Test when cacheItem.blockInput.hasBlock() returns true + // }); - it("should fetch new block when pending block input doesn't have block", () => { - // Test when cacheItem.blockInput.hasBlock() returns false - }); + // it("should fetch new block when pending block input doesn't have block", () => { + // // Test when cacheItem.blockInput.hasBlock() returns false + // }); - it("should skip data fetching when pending block input has all data", () => { - // Test when cacheItem.blockInput.hasAllData() returns true - }); + // it("should skip data fetching when pending block input has all data", () => { + // // Test when cacheItem.blockInput.hasAllData() returns true + // }); - it("should fetch blobs when pending block input is missing blob data", () => { - // Test blob fetching for incomplete blob input - }); + // it("should fetch blobs when pending block input is missing blob data", () => { + // // Test blob fetching for incomplete blob input + // }); - it("should fetch columns when pending block input is missing column data", () => { - // Test column fetching for incomplete column input - }); + // it("should fetch columns when pending block input is missing column data", () => { + // // Test column fetching for incomplete column input + // }); - it("should fetch block and blobs for non-pending cache item in post-Deneb fork", () => { - // Test fetching for non-PendingBlockInput in post-Deneb - }); + // it("should fetch block and blobs for non-pending cache item in post-Deneb fork", () => { + // // Test fetching for non-PendingBlockInput in post-Deneb + // }); - it("should fetch block and columns for non-pending cache item in post-Fulu fork", () => { - // Test fetching for non-PendingBlockInput in post-Fulu - }); + // it("should fetch block and columns for non-pending cache item in post-Fulu fork", () => { + // // Test fetching for non-PendingBlockInput in post-Fulu + // }); - it("should fetch only block for non-pending cache item in pre-Deneb fork", () => { - // Test fetching for non-PendingBlockInput in pre-Deneb - }); - }); + // it("should fetch only block for non-pending cache item in pre-Deneb fork", () => { + // // Test fetching for non-PendingBlockInput in pre-Deneb + // }); + // }); - describe("fetchAndValidateBlock", () => { - it("should successfully fetch and validate block with matching root", () => { - // Test successful block fetch and validation - }); + // describe("fetchAndValidateBlock", () => { + // it("should successfully fetch and validate block with matching root", () => { + // // Test successful block fetch and validation + // }); - it("should throw error when no block is returned from network", () => { - // Test MISSING_BLOCK_RESPONSE error - }); + // it("should throw error when no block is returned from network", () => { + // // Test MISSING_BLOCK_RESPONSE error + // }); - it("should throw error when block root doesn't match requested root", () => { - // Test MISMATCH_BLOCK_ROOT error - }); + // it("should throw error when block root doesn't match requested root", () => { + // // Test MISMATCH_BLOCK_ROOT error + // }); - it("should handle network request failure", () => { - // Test network failure scenarios - }); - }); + // it("should handle network request failure", () => { + // // Test network failure scenarios + // }); + // }); - describe("fetchAndValidateBlobs", () => { - it("should successfully fetch blobs from execution engine only", () => { - // Test when all blobs are available from execution engine - }); + // describe("fetchAndValidateBlobs", () => { + // it("should successfully fetch blobs from execution engine only", () => { + // // Test when all blobs are available from execution engine + // }); - it("should fetch remaining blobs from network when execution engine is incomplete", () => { - // Test when some blobs are from execution engine, others from network - }); + // it("should fetch remaining blobs from network when execution engine is incomplete", () => { + // // Test when some blobs are from execution engine, others from network + // }); - it("should fetch all blobs from network when execution engine returns none", () => { - // Test when execution engine returns no blobs - }); + // it("should fetch all blobs from network when execution engine returns none", () => { + // // Test when execution engine returns no blobs + // }); - it("should validate all fetched blobs successfully", () => { - // Test successful blob validation - }); + // it("should validate all fetched blobs successfully", () => { + // // Test successful blob validation + // }); - it("should throw error when blob validation fails", () => { - // Test blob validation failure scenarios - }); - }); + // it("should throw error when blob validation fails", () => { + // // Test blob validation failure scenarios + // }); + // }); describe("fetchGetBlobsV1AndBuildSidecars", () => { it("should build blob sidecars from execution engine response", () => { @@ -227,224 +236,668 @@ describe("downloadByRoot.ts", () => { it("should correctly compute inclusion proofs for blob sidecars", () => { // Test inclusion proof computation }); - - it("should handle execution engine errors gracefully", () => { - // Test execution engine failure scenarios - }); }); - describe("fetchBlobByRoot", () => { - it("should fetch blob sidecars by root from network", () => { - // Test successful network blob fetch - }); - - it("should filter out blobs already in possession", () => { - // Test that only missing blobs are requested - }); - - it("should handle empty blob request when all blobs are in possession", () => { - // Test when indicesInPossession includes all needed blobs + describe("fetchBlobsByRoot", () => { + let denebBlockWithColumns: ReturnType; + let blockRoot: Uint8Array; + let blobMeta: BlobMeta[]; + beforeAll(() => { + denebBlockWithColumns = generateBlockWithBlobSidecars({forkName: ForkName.deneb, count: 6}); + blockRoot = denebBlockWithColumns.blockRoot; + blobMeta = denebBlockWithColumns.blobSidecars.map((_, index) => ({blockRoot, index}) as BlobMeta); + network = { + sendBlobSidecarsByRoot: vi.fn(() => denebBlockWithColumns.blobSidecars), + } as unknown as INetwork; + }); + afterAll(() => { + vi.resetAllMocks(); + }); + + it("should fetch missing columnSidecars ByRoot from network", async () => { + const response = await fetchBlobsByRoot({ + network, + peerIdStr, + blockRoot, + blobMeta, + }); + expect(response).toEqual(denebBlockWithColumns.blobSidecars); + expect(network.sendBlobSidecarsByRoot).toHaveBeenCalledOnce(); + expect(network.sendBlobSidecarsByRoot).toHaveBeenCalledWith(peerIdStr, blobMeta); + }); + + it("should filter out blobs already in possession", async () => { + await fetchBlobsByRoot({ + network, + peerIdStr, + blockRoot, + blobMeta, + indicesInPossession: [0, denebBlockWithColumns.blobSidecars.at(-1)?.index], + }); + expect(network.sendBlobSidecarsByRoot).toHaveBeenCalledOnce(); + expect(network.sendBlobSidecarsByRoot).toHaveBeenCalledWith(peerIdStr, blobMeta.slice(1, -1)); }); - it("should handle network request failure", () => { - // Test network failure scenarios + it("should handle empty blob request when all blobs are in possession", async () => { + const response = await fetchBlobsByRoot({ + network, + peerIdStr, + blockRoot, + blobMeta, + indicesInPossession: blobMeta.map(({index}) => index), + }); + expect(response).toEqual([]); + expect(network.sendBlobSidecarsByRoot).not.toHaveBeenCalled(); }); }); describe("validateBlobs", () => { - it("should successfully validate all blob sidecars", () => { - // Test successful blob validation + let denebBlockWithBlobs: ReturnType; + let blockRoot: Uint8Array; + let blobMeta: BlobMeta[]; + let blobSidecars: deneb.BlobSidecars; + + beforeAll(() => { + denebBlockWithBlobs = generateBlockWithBlobSidecars({forkName: ForkName.deneb}); + blockRoot = denebBlockWithBlobs.blockRoot; + blobSidecars = denebBlockWithBlobs.blobSidecars; + blobMeta = blobSidecars.map((b) => ({index: b.index}) as BlobMeta); + }); + + it("should successfully validate all blobSidecars", async () => { + await expect( + validateBlobs({ + config, + peerIdStr, + blockRoot, + blobMeta, + blobSidecars, + }) + ).resolves.toBeUndefined(); + }); + + it("should throw error for extra un-requested blobSidecar", async () => { + try { + await validateBlobs({ + config, + peerIdStr, + blockRoot, + blobMeta: blobMeta.slice(0, -1), + blobSidecars, + }); + } catch (err) { + expect(err).toBeInstanceOf(DownloadByRootError); + expect((err as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED); + expect((err as DownloadByRootError).type.peer).toBe(prettyPeerIdStr); + expect((err as DownloadByRootError).type.blockRoot).toBe(prettyBytes(blockRoot)); + expect((err as DownloadByRootError).type.invalidIndex).toBe(blobMeta.at(-1)?.index); + expect((err as DownloadByRootError).message).toBe("received a blobSidecar that was not requested"); + } + }); + + it("should throw error for mismatched block root in blob header", async () => { + const requestedBlockRoot = new Uint8Array(ROOT_SIZE).fill(0xac); + const headerRoot = config + .getForkTypes(blobSidecars[0].signedBlockHeader.message.slot) + .BeaconBlockHeader.hashTreeRoot(blobSidecars[0].signedBlockHeader.message); + try { + await validateBlobs({ + config, + peerIdStr, + blockRoot: requestedBlockRoot, + blobMeta, + blobSidecars, + }); + } catch (err) { + expect(err).toBeInstanceOf(DownloadByRootError); + expect((err as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT); + expect((err as DownloadByRootError).type.peer).toBe(prettyPeerIdStr); + expect((err as DownloadByRootError).type.requestedBlockRoot).toBe(prettyBytes(requestedBlockRoot)); + expect((err as DownloadByRootError).type.receivedBlockRoot).toBe(prettyBytes(headerRoot)); + expect(err.message).toEqual("blobSidecar header root did not match requested blockRoot for index=0"); + } + }); + + it("should throw error for invalid inclusion proof", async () => { + const invalidBlobSidecar = ssz.deneb.BlobSidecar.clone(denebBlockWithBlobs.blobSidecars[0]); + // Corrupt the inclusion proof to make it invalid + invalidBlobSidecar.kzgCommitmentInclusionProof[0] = new Uint8Array(32).fill(255); + + try { + await validateBlobs({ + config, + peerIdStr, + blockRoot, + blobMeta: [blobMeta[0]], + blobSidecars: [invalidBlobSidecar], + }); + } catch (err) { + expect(err).toBeInstanceOf(DownloadByRootError); + expect((err as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.INVALID_INCLUSION_PROOF); + expect((err as DownloadByRootError).type.peer).toBe(prettyPeerIdStr); + expect((err as DownloadByRootError).type.blockRoot).toBe(prettyBytes(blockRoot)); + expect((err as DownloadByRootError).type.sidecarIndex).toBe(invalidBlobSidecar.index); + expect(err.message).toEqual("invalid inclusion proof for blobSidecar at index=0"); + } + }); + + it("should throw error for invalid KZG proof", async () => { + const invalidBlobSidecar = ssz.deneb.BlobSidecar.clone(denebBlockWithBlobs.blobSidecars[0]); + // Corrupt a single proof in the batch and make sure all trip as invalid + invalidBlobSidecar.kzgProof = new Uint8Array(48).fill(255); + + try { + await validateBlobs({ + config, + peerIdStr, + blockRoot, + blobMeta, + blobSidecars: [invalidBlobSidecar, ...blobSidecars.slice(1)], + }); + } catch (err) { + expect(err).toBeInstanceOf(DownloadByRootError); + expect((err as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.INVALID_KZG_PROOF); + expect((err as DownloadByRootError).type.peer).toBe(prettyPeerIdStr); + expect((err as DownloadByRootError).type.blockRoot).toBe(prettyBytes(blockRoot)); + } }); + }); - it("should throw error for extra unrequested blob sidecar", () => { - // Test EXTRA_SIDECAR_RECEIVED error - }); + // describe("fetchAndValidateColumns", () => { + // it("should fetch columns from execution engine and validate", () => { + // // Test successful fetch from execution engine + // }); - it("should throw error for mismatched block root in blob header", () => { - // Test MISMATCH_BLOCK_ROOT error for blob sidecar - }); + // it("should gracefully handle executionEngine errors", () => { + // // Test needToPublish logic with custody configuration + // }); - it("should throw error for invalid inclusion proof", () => { - // Test INVALID_INCLUSION_PROOF error - }); + // it("should fetch columns from network when execution engine returns empty", () => { + // // Test fallback to network when execution engine fails + // }); - it("should throw error for invalid KZG proof", () => { - // Test INVALID_KZG_PROOF error - }); + // it("should publish reconstructed columns to network", () => { + // // Test column publishing after reconstruction + // }); - it("should validate multiple blob sidecars correctly", () => { - // Test validation of multiple blobs - }); - }); + // it("should filter needed columns from reconstructed set", () => { + // // Test that only needed columns are returned + // }); - describe("fetchGetBlobsV2AndBuildSidecars", () => { - it("should build column sidecars from execution engine blobs", () => { - // Test successful column sidecar building - }); + // it("should handle publishing errors gracefully", () => { + // // Test that publishing errors don't fail the main operation + // }); - it("should return empty array when execution engine returns no response", () => { - // Test when execution engine returns null/undefined - }); + // it("should validate columns correctly in both scenarios", () => { + // // Test validation works for both execution engine and network paths + // }); - it("should handle execution engine errors", () => { - // Test execution engine failure scenarios - }); + // it("should determine correct columns to publish based on custody config", () => { + // // Test needToPublish logic with custody configuration + // }); - it("should correctly process cells and proofs", () => { - // Test getCellsAndProofs processing - }); - }); + // }); - describe("fetchColumnsByRoot", () => { - it("should fetch column sidecars by root from network", () => { - // Test successful network column fetch - }); + describe("fetchGetBlobsV2AndBuildSidecars", () => { + let fuluBlockWithColumns: ReturnType; + let blobAndProofs: fulu.BlobAndProofV2[]; + let versionedHashes: Uint8Array[]; + + beforeEach(() => { + fuluBlockWithColumns = generateBlockWithColumnSidecars({forkName: ForkName.fulu, returnBlobs: true}); + // biome-ignore lint/style/noNonNullAssertion: returnBlobs = true + const blobs = fuluBlockWithColumns.blobs!; + blobAndProofs = blobs + .map((b) => kzg.computeCellsAndKzgProofs(b)) + .map(({proofs}, i) => ({proofs, blob: blobs[i]})); + versionedHashes = fuluBlockWithColumns.block.message.body.blobKzgCommitments.map((c) => + kzgCommitmentToVersionedHash(c) + ); + }); + + afterEach(() => { + vi.resetAllMocks(); + }); + + it("should call getBlobs with the correct arguments", async () => { + const getBlobsMock = vi.fn(() => Promise.resolve(blobAndProofs)); + executionEngine = { + getBlobs: getBlobsMock, + } as unknown as IExecutionEngine; + + const columnMeta = { + missing: fuluBlockWithColumns.columnSidecars.map((c) => c.index), + versionedHashes, + }; - it("should handle network request failure", () => { - // Test network failure scenarios - }); + await fetchGetBlobsV2AndBuildSidecars({ + config, + executionEngine, + forkName: ForkName.fulu, + block: fuluBlockWithColumns.block, + columnMeta, + }); - it("should request correct column indices", () => { - // Test that correct missing columns are requested + expect(getBlobsMock).toHaveBeenCalledOnce(); + expect(getBlobsMock).toHaveBeenCalledWith(ForkName.fulu, versionedHashes); }); - }); - // describe("validateColumnSidecar", () => { - // it("should successfully validate column sidecar", () => { - // const columnSidecar = fuluBlockWithColumns.columnSidecars[0]; - // const testBlockRoot = fuluBlockWithColumns.blockRoot; - - // // This should not throw - // expect(() => { - // validateColumnSidecar({ - // config, - // peerIdStr, - // blockRoot: testBlockRoot, - // columnSidecar, - // }); - // }).not.toThrow(); - // }); - - // it("should throw error for mismatched block root in column header", () => { - // const columnSidecar = fuluBlockWithColumns.columnSidecars[0]; - // const wrongBlockRoot = new Uint8Array(32).fill(1); // Different block root - - // expect(() => { - // validateColumnSidecar({ - // config, - // peerIdStr, - // blockRoot: wrongBlockRoot, - // columnSidecar, - // }); - // }).toThrow(DownloadByRootError); - - // try { - // validateColumnSidecar({ - // config, - // peerIdStr, - // blockRoot: wrongBlockRoot, - // columnSidecar, - // }); - // } catch (error) { - // expect(error).toBeInstanceOf(DownloadByRootError); - // expect((error as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT); - // expect((error as DownloadByRootError).type.peer).toBe(peerIdStr); - // expect((error as DownloadByRootError).type.requestedBlockRoot).toBe(prettyBytes(wrongBlockRoot)); - // } - // }); - - // it("should throw error for invalid inclusion proof", () => { - // const columnSidecar = ssz.fulu.DataColumnSidecar.clone(fuluBlockWithColumns.columnSidecars[0]); - // // Corrupt the inclusion proof to make it invalid - // columnSidecar.kzgCommitmentsInclusionProof[0] = new Uint8Array(32).fill(255); - - // expect(() => { - // validateColumnSidecar({ - // config, - // peerIdStr, - // blockRoot: fuluBlockWithColumns.blockRoot, - // columnSidecar, - // }); - // }).toThrow(DownloadByRootError); - - // try { - // validateColumnSidecar({ - // config, - // peerIdStr, - // blockRoot: fuluBlockWithColumns.blockRoot, - // columnSidecar, - // }); - // } catch (error) { - // expect(error).toBeInstanceOf(DownloadByRootError); - // expect((error as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.INVALID_INCLUSION_PROOF); - // expect((error as DownloadByRootError).type.peer).toBe(peerIdStr); - // expect((error as DownloadByRootError).type.blockRoot).toBe(prettyBytes(fuluBlockWithColumns.blockRoot)); - // expect((error as DownloadByRootError).type.sidecarIndex).toBe(columnSidecar.index); - // } - // }); - // }); + it("should return empty array when execution engine returns no response", async () => { + const getBlobsMock = vi.fn(() => Promise.resolve(null)); + executionEngine = { + getBlobs: getBlobsMock, + } as unknown as IExecutionEngine; - describe("validateColumnSidecars", () => { - it("should successfully validate all needed column sidecars", () => { - // Test successful validation of needed columns - }); + const columnMeta = { + missing: fuluBlockWithColumns.columnSidecars.map((c) => c.index), + versionedHashes, + }; - it("should successfully validate needed and publish columns", () => { - // Test validation with both needed and needToPublish columns - }); + const result = await fetchGetBlobsV2AndBuildSidecars({ + config, + executionEngine, + forkName: ForkName.fulu, + block: fuluBlockWithColumns.block, + columnMeta, + }); - it("should throw error for extra unrequested column sidecar", () => { - // Test EXTRA_SIDECAR_RECEIVED error for columns + expect(getBlobsMock).toHaveBeenCalledOnce(); + expect(result).toEqual([]); }); - it("should throw error for invalid KZG proofs", () => { - // Test INVALID_KZG_PROOF error for columns - }); + it("should build columnSidecars from execution engine blobs", async () => { + const getBlobsMock = vi.fn(() => Promise.resolve(blobAndProofs)); + executionEngine = { + getBlobs: getBlobsMock, + } as unknown as IExecutionEngine; - it("should validate individual column sidecars correctly", () => { - // Test individual column validation within the batch - }); + const columnMeta = { + missing: fuluBlockWithColumns.columnSidecars.map((c) => c.index), + versionedHashes, + }; - it("should handle empty needToPublish array", () => { - // Test when needToPublish is empty or not provided - }); + const result = await fetchGetBlobsV2AndBuildSidecars({ + config, + executionEngine, + forkName: ForkName.fulu, + block: fuluBlockWithColumns.block, + columnMeta, + }); - it("should avoid duplicate validation for columns in both arrays", () => { - // Test that columns present in both needed and needToPublish are not validated twice + expect(getBlobsMock).toHaveBeenCalledOnce(); + expect(result).toBeDefined(); + expect(result).toBeInstanceOf(Array); + expect(result.length).toEqual(NUMBER_OF_COLUMNS); + + // Verify the structure of the returned column sidecars + for (const [index, columnSidecar] of Object.entries(result)) { + expect(columnSidecar).toHaveProperty("column"); + expect(columnSidecar.column).toBeInstanceOf(Array); + columnSidecar.column.map((cell) => expect(cell).toBeInstanceOf(Uint8Array)); + expect(columnSidecar.column.length).toEqual(fuluBlockWithColumns.block.message.body.blobKzgCommitments.length); + + expect(columnSidecar).toHaveProperty("index"); + expect(columnSidecar.index).toBeTypeOf("number"); + expect(columnSidecar.index).toEqual(parseInt(index)); + + expect(columnSidecar).toHaveProperty("kzgCommitments"); + expect(columnSidecar.kzgCommitments).toBeInstanceOf(Array); + columnSidecar.kzgCommitments.map((c) => expect(c).toBeInstanceOf(Uint8Array)); + expect(columnSidecar.kzgCommitments.toString()).toEqual( + fuluBlockWithColumns.block.message.body.blobKzgCommitments.toString() + ); + + expect(columnSidecar).toHaveProperty("kzgProofs"); + expect(columnSidecar.kzgProofs).toBeInstanceOf(Array); + columnSidecar.kzgProofs.map((proof) => expect(proof).toBeInstanceOf(Uint8Array)); + expect(columnSidecar.kzgProofs.length).toEqual(columnSidecar.column.length); + + expect(columnSidecar).toHaveProperty("kzgCommitmentsInclusionProof"); + expect(columnSidecar.kzgCommitmentsInclusionProof).toBeInstanceOf(Array); + columnSidecar.kzgCommitmentsInclusionProof.map((proof) => expect(proof).toBeInstanceOf(Uint8Array)); + + // // Verify the signed block header matches the block + expect(columnSidecar).toHaveProperty("signedBlockHeader"); + expect(columnSidecar.signedBlockHeader.message.slot).toBe(fuluBlockWithColumns.block.message.slot); + expect(columnSidecar.signedBlockHeader.message.proposerIndex).toBe( + fuluBlockWithColumns.block.message.proposerIndex + ); + expect(columnSidecar.signedBlockHeader.message.parentRoot).toEqual( + fuluBlockWithColumns.block.message.parentRoot + ); + expect(columnSidecar.signedBlockHeader.message.stateRoot).toEqual(fuluBlockWithColumns.block.message.stateRoot); + } }); }); - describe("fetchAndValidateColumns", () => { - it("should fetch columns from execution engine and validate", () => { - // Test successful fetch from execution engine + describe("fetchColumnsByRoot", () => { + let fuluBlockWithColumns: ReturnType; + beforeAll(() => { + fuluBlockWithColumns = generateBlockWithColumnSidecars({forkName: ForkName.fulu}); + network = { + sendDataColumnSidecarsByRoot: vi.fn(() => fuluBlockWithColumns.columnSidecars), + } as unknown as INetwork; + }); + afterAll(() => { + vi.resetAllMocks(); + }); + it("should fetch missing columnSidecars ByRoot from network", async () => { + const blockRoot = fuluBlockWithColumns.blockRoot; + const missing = fuluBlockWithColumns.columnSidecars.map((c) => c.index); + const response = await fetchColumnsByRoot({ + network, + peerIdStr, + blockRoot, + columnMeta: { + missing, + versionedHashes: [], + }, + }); + expect(response).toEqual(fuluBlockWithColumns.columnSidecars); + expect(network.sendDataColumnSidecarsByRoot).toHaveBeenCalledOnce(); + expect(network.sendDataColumnSidecarsByRoot).toHaveBeenCalledWith(peerIdStr, [{blockRoot, columns: missing}]); }); + }); - it("should fetch columns from network when execution engine returns empty", () => { - // Test fallback to network when execution engine fails + describe("validateColumnSidecar", () => { + let fuluBlockWithColumns: ReturnType; + + beforeAll(() => { + fuluBlockWithColumns = generateBlockWithColumnSidecars({forkName: ForkName.fulu}); + }); + + it("should successfully validate column sidecar", () => { + const columnSidecar = fuluBlockWithColumns.columnSidecars[0]; + const testBlockRoot = fuluBlockWithColumns.blockRoot; + + // This should not throw + expect(() => { + validateColumnSidecar({ + config, + peerIdStr, + blockRoot: testBlockRoot, + columnSidecar, + }); + }).not.toThrow(); + }); + + it("should throw error for mismatched block root in column header", () => { + const columnSidecar = fuluBlockWithColumns.columnSidecars[0]; + const wrongBlockRoot = new Uint8Array(32).fill(1); + + expect(() => { + validateColumnSidecar({ + config, + peerIdStr, + blockRoot: wrongBlockRoot, + columnSidecar, + }); + }).toThrow(DownloadByRootError); + + try { + validateColumnSidecar({ + config, + peerIdStr, + blockRoot: wrongBlockRoot, + columnSidecar, + }); + } catch (error) { + expect(error).toBeInstanceOf(DownloadByRootError); + expect((error as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT); + expect((error as DownloadByRootError).type.peer).toBe(prettyPeerIdStr); + expect((error as DownloadByRootError).type.requestedBlockRoot).toBe(prettyBytes(wrongBlockRoot)); + } }); - it("should publish reconstructed columns to network", () => { - // Test column publishing after reconstruction + it("should throw error for invalid inclusion proof", () => { + const columnSidecar = ssz.fulu.DataColumnSidecar.clone(fuluBlockWithColumns.columnSidecars[0]); + // Corrupt the inclusion proof to make it invalid + columnSidecar.kzgCommitmentsInclusionProof[0] = new Uint8Array(32).fill(255); + + expect(() => { + validateColumnSidecar({ + config, + peerIdStr, + blockRoot: fuluBlockWithColumns.blockRoot, + columnSidecar, + }); + }).toThrow(DownloadByRootError); + + try { + validateColumnSidecar({ + config, + peerIdStr, + blockRoot: fuluBlockWithColumns.blockRoot, + columnSidecar, + }); + } catch (error) { + expect(error).toBeInstanceOf(DownloadByRootError); + expect((error as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.INVALID_INCLUSION_PROOF); + expect((error as DownloadByRootError).type.peer).toBe(prettyPeerIdStr); + expect((error as DownloadByRootError).type.blockRoot).toBe(prettyBytes(fuluBlockWithColumns.blockRoot)); + expect((error as DownloadByRootError).type.sidecarIndex).toBe(columnSidecar.index); + } }); + }); - it("should filter needed columns from reconstructed set", () => { - // Test that only needed columns are returned + describe("validateColumnSidecars", () => { + let fuluBlockWithColumns: ReturnType; + let blockRoot: Uint8Array; + let columnMeta: MissingColumnMeta; + + beforeAll(() => { + fuluBlockWithColumns = generateBlockWithColumnSidecars({forkName: ForkName.fulu}); + blockRoot = fuluBlockWithColumns.blockRoot; + columnMeta = { + missing: fuluBlockWithColumns.columnSidecars.map((c) => c.index), + versionedHashes: [], + }; }); - it("should handle publishing errors gracefully", () => { - // Test that publishing errors don't fail the main operation - }); + it("should successfully validate all needed column sidecars", async () => { + await expect( + validateColumnSidecars({ + config, + peerIdStr, + blockRoot, + columnMeta, + needed: fuluBlockWithColumns.columnSidecars, + }) + ).resolves.toBeUndefined(); + }); + + it("should successfully validate needToPublish columns", async () => { + await expect( + validateColumnSidecars({ + config, + peerIdStr, + blockRoot, + columnMeta, + needToPublish: fuluBlockWithColumns.columnSidecars, + }) + ).resolves.toBeUndefined(); + }); + + it("should throw error for extra un-requested column sidecar", async () => { + const testProps = { + config, + peerIdStr, + blockRoot, + columnMeta: { + ...columnMeta, + missing: Array.from({length: 18}, (_, i) => i), + }, + needed: fuluBlockWithColumns.columnSidecars, + }; + await expect(validateColumnSidecars(testProps)).rejects.toThrow(); + + try { + await validateColumnSidecars(testProps); + } catch (err) { + expect(err).toBeInstanceOf(DownloadByRootError); + expect((err as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED); + expect((err as DownloadByRootError).type.peer).toBe(prettyPeerIdStr); + expect((err as DownloadByRootError).type.blockRoot).toBe(prettyBytes(blockRoot)); + expect((err as DownloadByRootError).type.invalidIndex).toBe(18); + expect((err as DownloadByRootError).message).toBe("Received a columnSidecar that was not requested"); + } + }); + + it("should invalidate individual needed column sidecar correctly", async () => { + // Create an invalid column with bad inclusion proof to trigger the final validation error + const invalidColumn = ssz.fulu.DataColumnSidecar.clone(fuluBlockWithColumns.columnSidecars[127]); + invalidColumn.kzgCommitmentsInclusionProof[0] = new Uint8Array(32).fill(255); + + const invalidTestProps = { + config, + peerIdStr, + blockRoot, + columnMeta, + needed: [...fuluBlockWithColumns.columnSidecars.slice(0, -1), invalidColumn], + }; + + try { + await validateColumnSidecars(invalidTestProps); + } catch (err) { + expect(err).toBeInstanceOf(DownloadByRootError); + expect((err as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.INVALID_INCLUSION_PROOF); + expect((err as DownloadByRootError).type.peer).toBe(prettyPeerIdStr); + expect((err as DownloadByRootError).type.blockRoot).toBe(prettyBytes(fuluBlockWithColumns.blockRoot)); + expect((err as DownloadByRootError).type.sidecarIndex).toBe(127); + expect(err.message).toBe( + "Error validating needed columnSidecar index=127. Validation error: DOWNLOAD_BY_ROOT_ERROR_INVALID_INCLUSION_PROOF" + ); + } + }); + + it("should invalidate individual needToPublish column sidecar correctly", async () => { + // Create an invalid column with bad inclusion proof to trigger the final validation error + const invalidColumn = ssz.fulu.DataColumnSidecar.clone(fuluBlockWithColumns.columnSidecars[127]); + invalidColumn.kzgCommitmentsInclusionProof[0] = new Uint8Array(32).fill(255); + + const invalidTestProps = { + config, + peerIdStr, + blockRoot, + columnMeta, + needToPublish: [...fuluBlockWithColumns.columnSidecars.slice(0, -1), invalidColumn], + }; + + try { + await validateColumnSidecars(invalidTestProps); + } catch (err) { + expect(err).toBeInstanceOf(DownloadByRootError); + expect((err as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.INVALID_INCLUSION_PROOF); + expect((err as DownloadByRootError).type.peer).toBe(prettyPeerIdStr); + expect((err as DownloadByRootError).type.blockRoot).toBe(prettyBytes(fuluBlockWithColumns.blockRoot)); + expect((err as DownloadByRootError).type.sidecarIndex).toBe(127); + expect(err.message).toBe( + "Error validating needToPublish columnSidecar index=127. Validation error: DOWNLOAD_BY_ROOT_ERROR_INVALID_INCLUSION_PROOF" + ); + } + }); + + it("should avoid duplicate validation for columns in both arrays", async () => { + // Use valid columns to simplify the test setup + const sharedColumns = fuluBlockWithColumns.columnSidecars.slice(0, 2); + const uniqueNeededColumns = fuluBlockWithColumns.columnSidecars.slice(2, 4); + const uniquePublishColumns = fuluBlockWithColumns.columnSidecars.slice(4, 6); + const validateFn = vi.fn(); + + const testProps: ValidateColumnSidecarsProps = { + config, + peerIdStr, + blockRoot, + columnMeta: { + missing: [...sharedColumns, ...uniqueNeededColumns, ...uniquePublishColumns].map((c) => c.index), + versionedHashes: columnMeta.versionedHashes, + }, + needed: [...sharedColumns, ...uniqueNeededColumns], // 4 columns total (2 shared + 2 unique) + needToPublish: [...sharedColumns, ...uniquePublishColumns], // 4 columns total (2 shared + 2 unique to publish) + validateFn, + }; - it("should validate columns correctly in both scenarios", () => { - // Test validation works for both execution engine and network paths + await expect(validateColumnSidecars(testProps)).resolves.toBeUndefined(); + const validateCommonProps = { + config, + peerIdStr, + blockRoot, + }; + expect(validateFn).toHaveBeenCalledTimes(6); + expect(validateFn).toHaveBeenNthCalledWith(1, { + ...validateCommonProps, + columnSidecar: sharedColumns[0], + }); + expect(validateFn).toHaveBeenNthCalledWith(2, { + ...validateCommonProps, + columnSidecar: sharedColumns[1], + }); + expect(validateFn).toHaveBeenNthCalledWith(3, { + ...validateCommonProps, + columnSidecar: uniqueNeededColumns[0], + }); + expect(validateFn).toHaveBeenNthCalledWith(4, { + ...validateCommonProps, + columnSidecar: uniqueNeededColumns[1], + }); + expect(validateFn).toHaveBeenNthCalledWith(5, { + ...validateCommonProps, + columnSidecar: uniquePublishColumns[0], + }); + expect(validateFn).toHaveBeenNthCalledWith(6, { + ...validateCommonProps, + columnSidecar: uniquePublishColumns[1], + }); }); - it("should determine correct columns to publish based on custody config", () => { - // Test needToPublish logic with custody configuration + it("should throw error for invalid KZG proofs", async () => { + let invalidColumn = ssz.fulu.DataColumnSidecar.clone(fuluBlockWithColumns.columnSidecars[0]); + // Corrupt one of the KZG proofs to make it invalid + invalidColumn.kzgProofs[0] = new Uint8Array(BYTES_PER_PROOF).fill(255); + + let testProps = { + config, + peerIdStr, + blockRoot, + columnMeta, + needed: [invalidColumn, ...fuluBlockWithColumns.columnSidecars.slice(1)], + }; + + try { + await validateColumnSidecars(testProps); + } catch (err) { + expect(err).toBeInstanceOf(DownloadByRootError); + expect((err as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.INVALID_KZG_PROOF); + expect((err as DownloadByRootError).type.peer).toBe(prettyPeerIdStr); + expect((err as DownloadByRootError).type.blockRoot).toBe(prettyBytes(blockRoot)); + } + + invalidColumn = ssz.fulu.DataColumnSidecar.clone(fuluBlockWithColumns.columnSidecars[0]); + // Corrupt one of the cells to make it invalid + invalidColumn.column[0] = new Uint8Array(BYTES_PER_CELL).fill(255); + + testProps = { + config, + peerIdStr, + blockRoot, + columnMeta, + needed: [invalidColumn, ...fuluBlockWithColumns.columnSidecars.slice(1)], + }; + + try { + await validateColumnSidecars(testProps); + } catch (err) { + expect(err).toBeInstanceOf(DownloadByRootError); + expect((err as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.INVALID_KZG_PROOF); + expect((err as DownloadByRootError).type.peer).toBe(prettyPeerIdStr); + expect((err as DownloadByRootError).type.blockRoot).toBe(prettyBytes(blockRoot)); + } }); }); describe("DownloadByRootError", () => { + const blockRoot = randomBytes(ROOT_SIZE); + it("should create error with MISMATCH_BLOCK_ROOT code", () => { const error = new DownloadByRootError({ code: DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT, diff --git a/packages/beacon-node/test/utils/blocksAndData.ts b/packages/beacon-node/test/utils/blocksAndData.ts index cb6e357b0848..457481d110f3 100644 --- a/packages/beacon-node/test/utils/blocksAndData.ts +++ b/packages/beacon-node/test/utils/blocksAndData.ts @@ -2,7 +2,7 @@ import {randomBytes} from "node:crypto"; import {SIGNATURE_LENGTH_UNCOMPRESSED} from "@chainsafe/blst"; import {BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT} from "@crate-crypto/node-eth-kzg"; import {generateKeyPair} from "@libp2p/crypto/keys"; -import {ChainForkConfig, createChainForkConfig, defaultChainConfig} from "@lodestar/config"; +import {createChainForkConfig, defaultChainConfig} from "@lodestar/config"; import { ForkPostCapella, ForkPostDeneb, @@ -14,8 +14,13 @@ import { import {computeStartSlotAtEpoch, signedBlockToSignedHeader} from "@lodestar/state-transition"; import {SignedBeaconBlock, Slot, deneb, fulu, ssz} from "@lodestar/types"; import {toRootHex} from "@lodestar/utils"; +import {VersionedHashes} from "../../src/execution/index.js"; import {computeNodeIdFromPrivateKey} from "../../src/network/subnets/index.js"; -import {computePreFuluKzgCommitmentsInclusionProof} from "../../src/util/blobs.js"; +import { + computePreFuluKzgCommitmentsInclusionProof, + getBlobSidecars, + kzgCommitmentToVersionedHash, +} from "../../src/util/blobs.js"; import { CustodyConfig, computePostFuluKzgCommitmentsInclusionProof, @@ -28,12 +33,14 @@ export const CAPELLA_FORK_EPOCH = 0; export const DENEB_FORK_EPOCH = 10; export const ELECTRA_FORK_EPOCH = 20; export const FULU_FORK_EPOCH = 30; +export const GLOAS_FORK_EPOCH = 40; export const config = createChainForkConfig({ ...defaultChainConfig, CAPELLA_FORK_EPOCH, DENEB_FORK_EPOCH, ELECTRA_FORK_EPOCH, FULU_FORK_EPOCH, + GLOAS_FORK_EPOCH, }); export const privateKey = await generateKeyPair("secp256k1"); export const nodeId = computeNodeIdFromPrivateKey(privateKey); @@ -44,6 +51,7 @@ export const slots: Record = { deneb: computeStartSlotAtEpoch(DENEB_FORK_EPOCH), electra: computeStartSlotAtEpoch(ELECTRA_FORK_EPOCH), fulu: computeStartSlotAtEpoch(FULU_FORK_EPOCH), + gloas: computeStartSlotAtEpoch(GLOAS_FORK_EPOCH), }; /** @@ -109,47 +117,32 @@ function generateRoots( } function generateBlobSidecars( - forkName: ForkPostDeneb, block: SignedBeaconBlock, count: number, oomProtection = false ): { block: SignedBeaconBlock; blobSidecars: deneb.BlobSidecars; - // versionedHashes: VersionedHashes + versionedHashes: VersionedHashes; } { - const blobKzgCommitments: Uint8Array[] = []; - const blobSidecars: deneb.BlobSidecars = []; - const signedBlockHeader = signedBlockToSignedHeader(config, block); + const blobs = Array.from({length: count}, () => generateRandomBlob()); + const commitments = blobs.map((blob) => kzg.blobToKzgCommitment(blob)); + const proofs = blobs.map((blob, i) => kzg.computeBlobKzgProof(blob, commitments[i])); - for (let index = 0; index < count; index++) { - const blobSidecar = ssz[forkName].BlobSidecar.defaultValue(); - blobSidecar.index = index; - blobSidecar.signedBlockHeader = signedBlockHeader; - blobSidecar.blob = generateRandomBlob(); - blobSidecar.kzgCommitment = kzg.blobToKzgCommitment(blobSidecar.blob); - blobSidecar.kzgCommitmentInclusionProof = computePreFuluKzgCommitmentsInclusionProof( - forkName, - block.message.body, - index - ); - blobSidecar.kzgProof = kzg.computeBlobKzgProof(blobSidecar.blob, blobSidecar.kzgCommitment); + block.message.body.blobKzgCommitments = commitments; - if (oomProtection) { - blobSidecar.blob = new Uint8Array(1); - } + const blobSidecars = getBlobSidecars(config, block, blobs, proofs); - blobSidecars.push(blobSidecar); - blobKzgCommitments.push(blobSidecar.kzgCommitment); + if (oomProtection) { + blobSidecars.map((sidecar) => ({...sidecar, blob: new Uint8Array(1)})); } - block.message.body.blobKzgCommitments = blobKzgCommitments; - // const versionedHashes = blobKzgCommitments.map((commitment) => kzgCommitmentToVersionedHash(commitment)); + const versionedHashes = commitments.map((commitment) => kzgCommitmentToVersionedHash(commitment)); return { block, blobSidecars, - // versionedHashes, + versionedHashes, }; } @@ -157,10 +150,12 @@ function generateColumnSidecars( forkName: F, block: SignedBeaconBlock, numberOfBlobs: number, - oomProtection = false + oomProtection = false, + returnBlobs = false ): { block: SignedBeaconBlock; columnSidecars: fulu.DataColumnSidecars; + blobs?: deneb.BlobSidecars; } { const blobs = Array.from({length: numberOfBlobs}, () => generateRandomBlob()); const kzgCommitments = blobs.map((blob) => kzg.blobToKzgCommitment(blob)); @@ -191,6 +186,7 @@ function generateColumnSidecars( return { block, columnSidecars, + blobs: returnBlobs ? blobs : undefined, }; } @@ -220,27 +216,32 @@ export function generateChainOfBlocks({ return blocks; } -export type BlockWithBlobsTestSet = BlockTestSet & {blobSidecars: deneb.BlobSidecars}; +export type BlockWithBlobsTestSet = BlockTestSet & { + blobSidecars: deneb.BlobSidecars; + versionedHashed: VersionedHashes; +}; export type BlockWithColumnsTestSet = BlockTestSet & { columnSidecars: fulu.DataColumnSidecars; + blobs?: deneb.Blob[]; }; export function generateBlockWithBlobSidecars({ forkName, slot, + count, parentRoot, oomProtection = false, }: { forkName: F; parentRoot?: Uint8Array; + count?: number; slot?: Slot; oomProtection?: boolean; }): BlockWithBlobsTestSet { - const {block, blobSidecars} = generateBlobSidecars( - forkName, + const {block, blobSidecars, versionedHashes} = generateBlobSidecars( generateBeaconBlock({forkName, parentRoot, slot}), - generateRandomInt(1, 6), + count ? count : generateRandomInt(1, 6), oomProtection ); const {blockRoot, rootHex} = generateRoots(forkName, block); @@ -249,6 +250,7 @@ export function generateBlockWithBlobSidecars({ blobSidecars, blockRoot, rootHex, + versionedHashes, }; } @@ -257,24 +259,28 @@ export function generateBlockWithColumnSidecars({ slot, parentRoot, oomProtection = false, + returnBlobs = false, }: { forkName: F; parentRoot?: Uint8Array; slot?: Slot; oomProtection?: boolean; + returnBlobs?: boolean; }): BlockWithColumnsTestSet { - const {block, columnSidecars} = generateColumnSidecars( + const {block, columnSidecars, blobs} = generateColumnSidecars( forkName, generateBeaconBlock({forkName, parentRoot, slot}), generateRandomInt(1, 6), - oomProtection + oomProtection, + returnBlobs ); const {blockRoot, rootHex} = generateRoots(forkName, block); return { block, - columnSidecars, blockRoot, rootHex, + columnSidecars, + blobs: returnBlobs ? blobs : undefined, }; } From bff41ab2bf76a82822db98a31181c58996bcd59a Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 26 Aug 2025 19:21:06 +0700 Subject: [PATCH 061/173] fix: rename dataAvailabilityStatus --- .../unit/sync/utils/downloadByRange.test.ts | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts index ef9034b86ad7..a1bf9a3d58a3 100644 --- a/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts @@ -77,14 +77,14 @@ describe("downloadByRange", () => { expect( typeof validateRequests({ config, - dataAvailabilityStatus: DataAvailabilityStatus.PreData, + daOutOfRange: DataAvailabilityStatus.PreData, blocksRequest: {startSlot: slots.capella, count: 1}, }) === "string" ).toBeTruthy(); expect( typeof validateRequests({ config, - dataAvailabilityStatus: DataAvailabilityStatus.OutOfRange, + daOutOfRange: DataAvailabilityStatus.OutOfRange, blocksRequest: {startSlot: slots.deneb, count: 1}, }) === "string" ).toBeTruthy(); @@ -93,7 +93,7 @@ describe("downloadByRange", () => { expect(() => validateRequests({ config, - dataAvailabilityStatus: DataAvailabilityStatus.OutOfRange, + daOutOfRange: DataAvailabilityStatus.OutOfRange, blocksRequest: {startSlot: slots.deneb, count: 1}, blobsRequest: {startSlot: slots.deneb, count: 1}, }) @@ -103,7 +103,7 @@ describe("downloadByRange", () => { expect(() => validateRequests({ config, - dataAvailabilityStatus: DataAvailabilityStatus.Available, + daOutOfRange: DataAvailabilityStatus.Available, blocksRequest: {startSlot: slots.deneb, count: 1}, }) ).toThrow("Must request data if it is available"); @@ -112,7 +112,7 @@ describe("downloadByRange", () => { expect(() => validateRequests({ config, - dataAvailabilityStatus: DataAvailabilityStatus.Available, + daOutOfRange: DataAvailabilityStatus.Available, blocksRequest: {startSlot: slots.deneb, count: 1}, blobsRequest: {startSlot: slots.deneb, count: 1}, columnsRequest: {startSlot: slots.fulu, count: 1}, @@ -123,7 +123,7 @@ describe("downloadByRange", () => { expect(() => validateRequests({ config, - dataAvailabilityStatus: DataAvailabilityStatus.Available, + daOutOfRange: DataAvailabilityStatus.Available, blocksRequest: {startSlot: slots.capella, count: 1}, columnsRequest: {startSlot: slots.capella, count: 1}, }) @@ -133,7 +133,7 @@ describe("downloadByRange", () => { expect(() => validateRequests({ config, - dataAvailabilityStatus: DataAvailabilityStatus.Available, + daOutOfRange: DataAvailabilityStatus.Available, blocksRequest: {startSlot: slots.deneb, count: 1}, columnsRequest: {startSlot: slots.deneb, count: 1}, }) @@ -143,7 +143,7 @@ describe("downloadByRange", () => { expect(() => validateRequests({ config, - dataAvailabilityStatus: DataAvailabilityStatus.Available, + daOutOfRange: DataAvailabilityStatus.Available, blocksRequest: {startSlot: slots.fulu, count: 1}, blobsRequest: {startSlot: slots.fulu, count: 1}, }) @@ -153,7 +153,7 @@ describe("downloadByRange", () => { expect(() => validateRequests({ config, - dataAvailabilityStatus: DataAvailabilityStatus.Available, + daOutOfRange: DataAvailabilityStatus.Available, blocksRequest: {startSlot: slots.deneb, count: 1}, blobsRequest: {startSlot: slots.deneb + 1, count: 1}, }) @@ -163,7 +163,7 @@ describe("downloadByRange", () => { expect(() => validateRequests({ config, - dataAvailabilityStatus: DataAvailabilityStatus.Available, + daOutOfRange: DataAvailabilityStatus.Available, blocksRequest: {startSlot: slots.deneb, count: 1}, blobsRequest: {startSlot: slots.deneb, count: 2}, }) @@ -173,7 +173,7 @@ describe("downloadByRange", () => { expect( typeof validateRequests({ config, - dataAvailabilityStatus: DataAvailabilityStatus.Available, + daOutOfRange: DataAvailabilityStatus.Available, blocksRequest: {startSlot: slots.deneb, count: 1}, blobsRequest: {startSlot: slots.deneb, count: 1}, }) === "string" @@ -183,7 +183,7 @@ describe("downloadByRange", () => { expect( typeof validateRequests({ config, - dataAvailabilityStatus: DataAvailabilityStatus.Available, + daOutOfRange: DataAvailabilityStatus.Available, blocksRequest: {startSlot: slots.fulu, count: 1}, columnsRequest: {startSlot: slots.fulu, count: 1}, }) === "string" From 0da2fc16405ad19991cf20b1caae32ce8fbeec57 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 26 Aug 2025 19:26:59 +0700 Subject: [PATCH 062/173] fix: type errors --- packages/beacon-node/src/sync/utils/downloadByRoot.ts | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRoot.ts b/packages/beacon-node/src/sync/utils/downloadByRoot.ts index 0123a81468df..25c7aaf3086e 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRoot.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRoot.ts @@ -463,7 +463,7 @@ export async function fetchAndValidateColumns({ blockRoot, columnMeta, }: FetchByRootAndValidateColumnsProps): Promise { - let columnSidecars: fulu.DataColumnSidecars; + let columnSidecars: fulu.DataColumnSidecars = []; try { columnSidecars = await fetchGetBlobsV2AndBuildSidecars({ config, @@ -476,7 +476,7 @@ export async function fetchAndValidateColumns({ network.logger.error( `error building columnSidecars for blockRoot=${prettyBytes(blockRoot)} via getBlobsV2`, {}, - err + err as Error ); } @@ -630,7 +630,8 @@ export async function validateColumnSidecars({ columnSidecar, }); } catch (err) { - err.message = `Error validating needed columnSidecar index=${columnSidecar.index}. Validation error: ${err.message}`; + (err as Error).message = + `Error validating needed columnSidecar index=${columnSidecar.index}. Validation error: ${(err as Error).message}`; throw err; } } @@ -647,7 +648,8 @@ export async function validateColumnSidecars({ columnSidecar, }); } catch (err) { - err.message = `Error validating needToPublish columnSidecar index=${columnSidecar.index}. Validation error: ${err.message}`; + (err as Error).message = + `Error validating needToPublish columnSidecar index=${columnSidecar.index}. Validation error: ${(err as Error).message}`; throw err; } needToCheckProof.push(columnSidecar); From d45f3e4d854b4e495abf8d25ad76a7e2707d31a2 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 26 Aug 2025 19:29:43 +0700 Subject: [PATCH 063/173] chore: lint --- .../src/chain/blocks/blockInput/types.ts | 2 +- .../unit/sync/utils/downloadByRange.test.ts | 26 +++++++++---------- .../unit/sync/utils/downloadByRoot.test.ts | 2 +- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/packages/beacon-node/src/chain/blocks/blockInput/types.ts b/packages/beacon-node/src/chain/blocks/blockInput/types.ts index 9cb73c9154ca..8157630a619b 100644 --- a/packages/beacon-node/src/chain/blocks/blockInput/types.ts +++ b/packages/beacon-node/src/chain/blocks/blockInput/types.ts @@ -1,6 +1,6 @@ import {ForkName} from "@lodestar/params"; import {ColumnIndex, RootHex, SignedBeaconBlock, Slot, deneb, fulu} from "@lodestar/types"; -import {VersionedHashes} from "../../../execution"; +import {VersionedHashes} from "../../../execution/index.js"; export enum DAType { PreData = "pre-data", diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts index a1bf9a3d58a3..64064d281498 100644 --- a/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts @@ -19,9 +19,9 @@ import {config, custodyConfig, generateChainOfBlockMaybeSidecars, slots} from ". describe("downloadByRange", () => { const peerIdStr = "0x1234567890abcdef"; - let cache: SeenBlockInputCache; + // let cache: SeenBlockInputCache; let network: INetwork; - const logger = getMockedLogger(); + // const logger = getMockedLogger(); const startSlot = slots.deneb; const count = 32; @@ -52,17 +52,17 @@ describe("downloadByRange", () => { }); beforeEach(() => { - const abortController = new AbortController(); - const signal = abortController.signal; - cache = new SeenBlockInputCache({ - config, - custodyConfig, - clock: new Clock({config, signal, genesisTime: Math.floor(Date.now() / 1000)}), - chainEvents: new ChainEventEmitter(), - signal, - metrics: null, - logger, - }); + // const abortController = new AbortController(); + // const signal = abortController.signal; + // cache = new SeenBlockInputCache({ + // config, + // custodyConfig, + // clock: new Clock({config, signal, genesisTime: Math.floor(Date.now() / 1000)}), + // chainEvents: new ChainEventEmitter(), + // signal, + // metrics: null, + // logger, + // }); network = { sendBeaconBlocksByRange: vi.fn(), sendBlobSidecarsByRange: vi.fn(), diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts index 14fa58a4c2b3..22a4d104388a 100644 --- a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts @@ -52,7 +52,7 @@ describe("downloadByRoot.ts", () => { let network: INetwork; // let cache: SeenBlockInput; let executionEngine: IExecutionEngine; - const logger = getMockedLogger(); + // const logger = getMockedLogger(); // Test data // let capellaBlock: SignedBeaconBlock; From fa83ae1f143040d77fbadbd7f2224d73c91dac7e Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 26 Aug 2025 23:11:57 +0700 Subject: [PATCH 064/173] test: rough out MockBlockInput for testing --- packages/beacon-node/test/utils/blockInput.ts | 103 ++++++++++++++++++ 1 file changed, 103 insertions(+) create mode 100644 packages/beacon-node/test/utils/blockInput.ts diff --git a/packages/beacon-node/test/utils/blockInput.ts b/packages/beacon-node/test/utils/blockInput.ts new file mode 100644 index 000000000000..bb4d9fec33e0 --- /dev/null +++ b/packages/beacon-node/test/utils/blockInput.ts @@ -0,0 +1,103 @@ +import {ForkName} from "@lodestar/params"; +import {SignedBeaconBlock} from "@lodestar/types"; +import { + AddBlock, + BlockInputSource, + DAData, + DAType, + IBlockInput, + LogMetaBasic, + SourceMeta, +} from "../../src/chain/blocks/blockInput/index.js"; + +export type MockBlockInputProps = { + type: DAType; + daOutOfRange: boolean; + timeCreatedSec: number; + forkName: ForkName; + slot: number; + blockRootHex: string; + parentRootHex: string; +}; + +export class MockBlockInput implements IBlockInput { + type: DAType; + daOutOfRange: boolean; + timeCreatedSec: number; + forkName: ForkName; + slot: number; + blockRootHex: string; + parentRootHex: string; + + _block?: SignedBeaconBlock; + _blockSource?: BlockInputSource; + _blockSeenTimestampSec?: number; + _blockPeerIdStr?: string; + + _timeCompleted?: number; + + constructor({type, daOutOfRange, timeCreatedSec, forkName, slot, blockRootHex, parentRootHex}: MockBlockInputProps) { + this.type = type; + this.daOutOfRange = daOutOfRange; + this.timeCreatedSec = timeCreatedSec; + this.forkName = forkName; + this.slot = slot; + this.blockRootHex = blockRootHex; + this.parentRootHex = parentRootHex; + } + + addBlock( + {block, blockRootHex, seenTimestampSec, source, peerIdStr}: AddBlock, + _opts?: {throwOnDuplicateAdd: boolean} + ): void { + this.blockRootHex = blockRootHex; + + this._block = block; + this._blockSeenTimestampSec = seenTimestampSec; + this._blockSource = source; + this._blockPeerIdStr = peerIdStr; + } + hasBlock(): boolean { + return !this._block; + } + getBlock(): SignedBeaconBlock { + // biome-ignore lint/style/noNonNullAssertion: test fixture + return this._block!; + } + getBlockSource(): SourceMeta { + return { + seenTimestampSec: this._blockSeenTimestampSec ?? Date.now(), + source: this._blockSource ?? BlockInputSource.gossip, + peerIdStr: this._blockPeerIdStr ?? "0xTESTING_PEER_ID_STR", + }; + } + + hasAllData(): boolean { + return true; + } + hasBlockAndAllData(): boolean { + return !!this._block; + } + + getLogMeta(): LogMetaBasic { + return { + blockRoot: this.blockRootHex, + slot: this.slot, + timeCreatedSec: this.timeCreatedSec, + }; + } + + getTimeComplete(): number { + return this._timeCompleted ?? 0; + } + + waitForAllData(_timeout: number, _signal?: AbortSignal): Promise { + return Promise.resolve(null); + } + waitForBlock(_timeout: number, _signal?: AbortSignal): Promise { + return Promise.resolve(this._block as SignedBeaconBlock); + } + waitForBlockAndAllData(_timeout: number, _signal?: AbortSignal): Promise { + return Promise.resolve(this); + } +} From c3adcf83cab69b6f64022d2024afb233e3571f93 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 26 Aug 2025 23:12:29 +0700 Subject: [PATCH 065/173] chore: fix check-types --- .../unit/sync/utils/downloadByRoot.test.ts | 243 ++++++++---------- .../beacon-node/test/utils/blocksAndData.ts | 12 +- 2 files changed, 112 insertions(+), 143 deletions(-) diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts index 22a4d104388a..3dbceb455e28 100644 --- a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts @@ -1,49 +1,48 @@ import {randomBytes} from "node:crypto"; -import {BYTES_PER_CELL, BYTES_PER_COMMITMENT, BYTES_PER_PROOF} from "@crate-crypto/node-eth-kzg"; +import {BYTES_PER_CELL, BYTES_PER_PROOF} from "@crate-crypto/node-eth-kzg"; import {ForkName, NUMBER_OF_COLUMNS} from "@lodestar/params"; -import {SignedBeaconBlock, deneb, fulu, ssz} from "@lodestar/types"; -import {fromHex, prettyBytes, toRootHex} from "@lodestar/utils"; -import {Mock, afterAll, afterEach, beforeAll, beforeEach, describe, expect, it, vi} from "vitest"; +import {deneb, fulu, ssz} from "@lodestar/types"; +import {prettyBytes} from "@lodestar/utils"; +import {afterAll, afterEach, beforeAll, beforeEach, describe, expect, it, vi} from "vitest"; import { BlobMeta, - BlockInputSource, - IBlockInput, + // IBlockInput, MissingColumnMeta, } from "../../../../src/chain/blocks/blockInput/types.js"; -import {ChainEventEmitter} from "../../../../src/chain/index.js"; -import {SeenBlockInput} from "../../../../src/chain/seenCache/seenGossipBlockInput.js"; +// import {ChainEventEmitter} from "../../../../src/chain/index.js"; +// import {SeenBlockInput} from "../../../../src/chain/seenCache/seenGossipBlockInput.js"; import {IExecutionEngine} from "../../../../src/execution/index.js"; import {INetwork, prettyPrintPeerIdStr} from "../../../../src/network/index.js"; -import {BlockInputSyncCacheItem, PendingBlockInput, PendingBlockInputStatus} from "../../../../src/sync/types.js"; +// import {BlockInputSyncCacheItem, PendingBlockInput, PendingBlockInputStatus} from "../../../../src/sync/types.js"; import { DownloadByRootError, DownloadByRootErrorCode, ValidateColumnSidecarsProps, - downloadByRoot, - fetchAndValidateBlobs, - fetchAndValidateBlock, - fetchAndValidateColumns, + // downloadByRoot, + // fetchAndValidateBlobs, + // fetchAndValidateBlock, + // fetchAndValidateColumns, fetchBlobsByRoot, - fetchByRoot, + // fetchByRoot, fetchColumnsByRoot, - fetchGetBlobsV1AndBuildSidecars, + // fetchGetBlobsV1AndBuildSidecars, fetchGetBlobsV2AndBuildSidecars, validateBlobs, validateColumnSidecar, validateColumnSidecars, } from "../../../../src/sync/utils/downloadByRoot.js"; import {kzgCommitmentToVersionedHash} from "../../../../src/util/blobs.js"; -import {Clock} from "../../../../src/util/clock.js"; +// import {Clock} from "../../../../src/util/clock.js"; import {kzg} from "../../../../src/util/kzg.js"; import {ROOT_SIZE} from "../../../../src/util/sszBytes.js"; -import {getMockedLogger} from "../../../../test/mocks/loggerMock.js"; +// import {getMockedLogger} from "../../../../test/mocks/loggerMock.js"; import { config, - custodyConfig, + // custodyConfig, generateBlockWithBlobSidecars, generateBlockWithColumnSidecars, - generateChainOfBlocks, - slots, + // generateChainOfBlocks, + // slots, } from "../../../utils/blocksAndData.js"; describe("downloadByRoot.ts", () => { @@ -258,7 +257,6 @@ describe("downloadByRoot.ts", () => { const response = await fetchBlobsByRoot({ network, peerIdStr, - blockRoot, blobMeta, }); expect(response).toEqual(denebBlockWithColumns.blobSidecars); @@ -270,9 +268,9 @@ describe("downloadByRoot.ts", () => { await fetchBlobsByRoot({ network, peerIdStr, - blockRoot, blobMeta, - indicesInPossession: [0, denebBlockWithColumns.blobSidecars.at(-1)?.index], + // biome-ignore lint/style/noNonNullAssertion: its there + indicesInPossession: [0, denebBlockWithColumns.blobSidecars.at(-1)?.index!], }); expect(network.sendBlobSidecarsByRoot).toHaveBeenCalledOnce(); expect(network.sendBlobSidecarsByRoot).toHaveBeenCalledWith(peerIdStr, blobMeta.slice(1, -1)); @@ -282,7 +280,6 @@ describe("downloadByRoot.ts", () => { const response = await fetchBlobsByRoot({ network, peerIdStr, - blockRoot, blobMeta, indicesInPossession: blobMeta.map(({index}) => index), }); @@ -327,11 +324,11 @@ describe("downloadByRoot.ts", () => { }); } catch (err) { expect(err).toBeInstanceOf(DownloadByRootError); - expect((err as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED); - expect((err as DownloadByRootError).type.peer).toBe(prettyPeerIdStr); - expect((err as DownloadByRootError).type.blockRoot).toBe(prettyBytes(blockRoot)); - expect((err as DownloadByRootError).type.invalidIndex).toBe(blobMeta.at(-1)?.index); - expect((err as DownloadByRootError).message).toBe("received a blobSidecar that was not requested"); + expect((err as any).type.code).toBe(DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED); + expect((err as any).type.peer).toBe(prettyPeerIdStr); + expect((err as any).type.blockRoot).toBe(prettyBytes(blockRoot)); + expect((err as any).type.invalidIndex).toBe(blobMeta.at(-1)?.index); + expect((err as any).message).toBe("received a blobSidecar that was not requested"); } }); @@ -350,11 +347,11 @@ describe("downloadByRoot.ts", () => { }); } catch (err) { expect(err).toBeInstanceOf(DownloadByRootError); - expect((err as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT); - expect((err as DownloadByRootError).type.peer).toBe(prettyPeerIdStr); - expect((err as DownloadByRootError).type.requestedBlockRoot).toBe(prettyBytes(requestedBlockRoot)); - expect((err as DownloadByRootError).type.receivedBlockRoot).toBe(prettyBytes(headerRoot)); - expect(err.message).toEqual("blobSidecar header root did not match requested blockRoot for index=0"); + expect((err as any).type.code).toBe(DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT); + expect((err as any).type.peer).toBe(prettyPeerIdStr); + expect((err as any).type.requestedBlockRoot).toBe(prettyBytes(requestedBlockRoot)); + expect((err as any).type.receivedBlockRoot).toBe(prettyBytes(headerRoot)); + expect((err as any).message).toEqual("blobSidecar header root did not match requested blockRoot for index=0"); } }); @@ -373,11 +370,11 @@ describe("downloadByRoot.ts", () => { }); } catch (err) { expect(err).toBeInstanceOf(DownloadByRootError); - expect((err as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.INVALID_INCLUSION_PROOF); - expect((err as DownloadByRootError).type.peer).toBe(prettyPeerIdStr); - expect((err as DownloadByRootError).type.blockRoot).toBe(prettyBytes(blockRoot)); - expect((err as DownloadByRootError).type.sidecarIndex).toBe(invalidBlobSidecar.index); - expect(err.message).toEqual("invalid inclusion proof for blobSidecar at index=0"); + expect((err as any).type.code).toBe(DownloadByRootErrorCode.INVALID_INCLUSION_PROOF); + expect((err as any).type.peer).toBe(prettyPeerIdStr); + expect((err as any).type.blockRoot).toBe(prettyBytes(blockRoot)); + expect((err as any).type.sidecarIndex).toBe(invalidBlobSidecar.index); + expect((err as any).message).toEqual("invalid inclusion proof for blobSidecar at index=0"); } }); @@ -396,9 +393,9 @@ describe("downloadByRoot.ts", () => { }); } catch (err) { expect(err).toBeInstanceOf(DownloadByRootError); - expect((err as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.INVALID_KZG_PROOF); - expect((err as DownloadByRootError).type.peer).toBe(prettyPeerIdStr); - expect((err as DownloadByRootError).type.blockRoot).toBe(prettyBytes(blockRoot)); + expect((err as any).type.code).toBe(DownloadByRootErrorCode.INVALID_KZG_PROOF); + expect((err as any).type.peer).toBe(prettyPeerIdStr); + expect((err as any).type.blockRoot).toBe(prettyBytes(blockRoot)); } }); }); @@ -624,16 +621,6 @@ describe("downloadByRoot.ts", () => { it("should throw error for mismatched block root in column header", () => { const columnSidecar = fuluBlockWithColumns.columnSidecars[0]; const wrongBlockRoot = new Uint8Array(32).fill(1); - - expect(() => { - validateColumnSidecar({ - config, - peerIdStr, - blockRoot: wrongBlockRoot, - columnSidecar, - }); - }).toThrow(DownloadByRootError); - try { validateColumnSidecar({ config, @@ -643,9 +630,9 @@ describe("downloadByRoot.ts", () => { }); } catch (error) { expect(error).toBeInstanceOf(DownloadByRootError); - expect((error as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT); - expect((error as DownloadByRootError).type.peer).toBe(prettyPeerIdStr); - expect((error as DownloadByRootError).type.requestedBlockRoot).toBe(prettyBytes(wrongBlockRoot)); + expect((error as any).type.code).toBe(DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT); + expect((error as any).type.peer).toBe(prettyPeerIdStr); + expect((error as any).type.requestedBlockRoot).toBe(prettyBytes(wrongBlockRoot)); } }); @@ -653,16 +640,6 @@ describe("downloadByRoot.ts", () => { const columnSidecar = ssz.fulu.DataColumnSidecar.clone(fuluBlockWithColumns.columnSidecars[0]); // Corrupt the inclusion proof to make it invalid columnSidecar.kzgCommitmentsInclusionProof[0] = new Uint8Array(32).fill(255); - - expect(() => { - validateColumnSidecar({ - config, - peerIdStr, - blockRoot: fuluBlockWithColumns.blockRoot, - columnSidecar, - }); - }).toThrow(DownloadByRootError); - try { validateColumnSidecar({ config, @@ -670,12 +647,12 @@ describe("downloadByRoot.ts", () => { blockRoot: fuluBlockWithColumns.blockRoot, columnSidecar, }); - } catch (error) { - expect(error).toBeInstanceOf(DownloadByRootError); - expect((error as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.INVALID_INCLUSION_PROOF); - expect((error as DownloadByRootError).type.peer).toBe(prettyPeerIdStr); - expect((error as DownloadByRootError).type.blockRoot).toBe(prettyBytes(fuluBlockWithColumns.blockRoot)); - expect((error as DownloadByRootError).type.sidecarIndex).toBe(columnSidecar.index); + } catch (err) { + expect(err).toBeInstanceOf(DownloadByRootError); + expect((err as any).type.code).toBe(DownloadByRootErrorCode.INVALID_INCLUSION_PROOF); + expect((err as any).type.peer).toBe(prettyPeerIdStr); + expect((err as any).type.blockRoot).toBe(prettyBytes(fuluBlockWithColumns.blockRoot)); + expect((err as any).type.sidecarIndex).toBe(columnSidecar.index); } }); }); @@ -735,11 +712,11 @@ describe("downloadByRoot.ts", () => { await validateColumnSidecars(testProps); } catch (err) { expect(err).toBeInstanceOf(DownloadByRootError); - expect((err as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED); - expect((err as DownloadByRootError).type.peer).toBe(prettyPeerIdStr); - expect((err as DownloadByRootError).type.blockRoot).toBe(prettyBytes(blockRoot)); - expect((err as DownloadByRootError).type.invalidIndex).toBe(18); - expect((err as DownloadByRootError).message).toBe("Received a columnSidecar that was not requested"); + expect((err as any).type.code).toBe(DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED); + expect((err as any).type.peer).toBe(prettyPeerIdStr); + expect((err as any).type.blockRoot).toBe(prettyBytes(blockRoot)); + expect((err as any).type.invalidIndex).toBe(18); + expect((err as any).message).toBe("Received a columnSidecar that was not requested"); } }); @@ -760,11 +737,11 @@ describe("downloadByRoot.ts", () => { await validateColumnSidecars(invalidTestProps); } catch (err) { expect(err).toBeInstanceOf(DownloadByRootError); - expect((err as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.INVALID_INCLUSION_PROOF); - expect((err as DownloadByRootError).type.peer).toBe(prettyPeerIdStr); - expect((err as DownloadByRootError).type.blockRoot).toBe(prettyBytes(fuluBlockWithColumns.blockRoot)); - expect((err as DownloadByRootError).type.sidecarIndex).toBe(127); - expect(err.message).toBe( + expect((err as any).type.code).toBe(DownloadByRootErrorCode.INVALID_INCLUSION_PROOF); + expect((err as any).type.peer).toBe(prettyPeerIdStr); + expect((err as any).type.blockRoot).toBe(prettyBytes(fuluBlockWithColumns.blockRoot)); + expect((err as any).type.sidecarIndex).toBe(127); + expect((err as any).message).toBe( "Error validating needed columnSidecar index=127. Validation error: DOWNLOAD_BY_ROOT_ERROR_INVALID_INCLUSION_PROOF" ); } @@ -787,11 +764,11 @@ describe("downloadByRoot.ts", () => { await validateColumnSidecars(invalidTestProps); } catch (err) { expect(err).toBeInstanceOf(DownloadByRootError); - expect((err as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.INVALID_INCLUSION_PROOF); - expect((err as DownloadByRootError).type.peer).toBe(prettyPeerIdStr); - expect((err as DownloadByRootError).type.blockRoot).toBe(prettyBytes(fuluBlockWithColumns.blockRoot)); - expect((err as DownloadByRootError).type.sidecarIndex).toBe(127); - expect(err.message).toBe( + expect((err as any).type.code).toBe(DownloadByRootErrorCode.INVALID_INCLUSION_PROOF); + expect((err as any).type.peer).toBe(prettyPeerIdStr); + expect((err as any).type.blockRoot).toBe(prettyBytes(fuluBlockWithColumns.blockRoot)); + expect((err as any).type.sidecarIndex).toBe(127); + expect((err as any).message).toBe( "Error validating needToPublish columnSidecar index=127. Validation error: DOWNLOAD_BY_ROOT_ERROR_INVALID_INCLUSION_PROOF" ); } @@ -867,9 +844,9 @@ describe("downloadByRoot.ts", () => { await validateColumnSidecars(testProps); } catch (err) { expect(err).toBeInstanceOf(DownloadByRootError); - expect((err as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.INVALID_KZG_PROOF); - expect((err as DownloadByRootError).type.peer).toBe(prettyPeerIdStr); - expect((err as DownloadByRootError).type.blockRoot).toBe(prettyBytes(blockRoot)); + expect((err as any).type.code).toBe(DownloadByRootErrorCode.INVALID_KZG_PROOF); + expect((err as any).type.peer).toBe(prettyPeerIdStr); + expect((err as any).type.blockRoot).toBe(prettyBytes(blockRoot)); } invalidColumn = ssz.fulu.DataColumnSidecar.clone(fuluBlockWithColumns.columnSidecars[0]); @@ -888,9 +865,9 @@ describe("downloadByRoot.ts", () => { await validateColumnSidecars(testProps); } catch (err) { expect(err).toBeInstanceOf(DownloadByRootError); - expect((err as DownloadByRootError).type.code).toBe(DownloadByRootErrorCode.INVALID_KZG_PROOF); - expect((err as DownloadByRootError).type.peer).toBe(prettyPeerIdStr); - expect((err as DownloadByRootError).type.blockRoot).toBe(prettyBytes(blockRoot)); + expect((err as any).type.code).toBe(DownloadByRootErrorCode.INVALID_KZG_PROOF); + expect((err as any).type.peer).toBe(prettyPeerIdStr); + expect((err as any).type.blockRoot).toBe(prettyBytes(blockRoot)); } }); }); @@ -899,100 +876,100 @@ describe("downloadByRoot.ts", () => { const blockRoot = randomBytes(ROOT_SIZE); it("should create error with MISMATCH_BLOCK_ROOT code", () => { - const error = new DownloadByRootError({ + const err = new DownloadByRootError({ code: DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT, peer: peerIdStr, requestedBlockRoot: prettyBytes(blockRoot), receivedBlockRoot: prettyBytes(new Uint8Array(32).fill(1)), }); - expect(error).toBeInstanceOf(DownloadByRootError); - expect(error.type.code).toBe(DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT); - expect(error.type.peer).toBe(peerIdStr); - expect(error.type.requestedBlockRoot).toBe(prettyBytes(blockRoot)); - expect(error.type.receivedBlockRoot).toBe(prettyBytes(new Uint8Array(32).fill(1))); + expect(err as any).toBeInstanceOf(DownloadByRootError); + expect((err as any).type.code).toBe(DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT); + expect((err as any).type.peer).toBe(peerIdStr); + expect((err as any).type.requestedBlockRoot).toBe(prettyBytes(blockRoot)); + expect((err as any).type.receivedBlockRoot).toBe(prettyBytes(new Uint8Array(32).fill(1))); }); it("should create error with EXTRA_SIDECAR_RECEIVED code", () => { - const error = new DownloadByRootError({ + const err = new DownloadByRootError({ code: DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED, peer: peerIdStr, blockRoot: prettyBytes(blockRoot), invalidIndex: 5, }); - expect(error).toBeInstanceOf(DownloadByRootError); - expect(error.type.code).toBe(DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED); - expect(error.type.peer).toBe(peerIdStr); - expect(error.type.blockRoot).toBe(prettyBytes(blockRoot)); - expect(error.type.invalidIndex).toBe(5); + expect(err as any).toBeInstanceOf(DownloadByRootError); + expect((err as any).type.code).toBe(DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED); + expect((err as any).type.peer).toBe(peerIdStr); + expect((err as any).type.blockRoot).toBe(prettyBytes(blockRoot)); + expect((err as any).type.invalidIndex).toBe(5); }); it("should create error with INVALID_INCLUSION_PROOF code", () => { - const error = new DownloadByRootError({ + const err = new DownloadByRootError({ code: DownloadByRootErrorCode.INVALID_INCLUSION_PROOF, peer: peerIdStr, blockRoot: prettyBytes(blockRoot), sidecarIndex: 2, }); - expect(error).toBeInstanceOf(DownloadByRootError); - expect(error.type.code).toBe(DownloadByRootErrorCode.INVALID_INCLUSION_PROOF); - expect(error.type.peer).toBe(peerIdStr); - expect(error.type.blockRoot).toBe(prettyBytes(blockRoot)); - expect(error.type.sidecarIndex).toBe(2); + expect(err as any).toBeInstanceOf(DownloadByRootError); + expect((err as any).type.code).toBe(DownloadByRootErrorCode.INVALID_INCLUSION_PROOF); + expect((err as any).type.peer).toBe(peerIdStr); + expect((err as any).type.blockRoot).toBe(prettyBytes(blockRoot)); + expect((err as any).type.sidecarIndex).toBe(2); }); it("should create error with INVALID_KZG_PROOF code", () => { - const error = new DownloadByRootError({ + const err = new DownloadByRootError({ code: DownloadByRootErrorCode.INVALID_KZG_PROOF, peer: peerIdStr, blockRoot: prettyBytes(blockRoot), }); - expect(error).toBeInstanceOf(DownloadByRootError); - expect(error.type.code).toBe(DownloadByRootErrorCode.INVALID_KZG_PROOF); - expect(error.type.peer).toBe(peerIdStr); - expect(error.type.blockRoot).toBe(prettyBytes(blockRoot)); + expect(err as any).toBeInstanceOf(DownloadByRootError); + expect((err as any).type.code).toBe(DownloadByRootErrorCode.INVALID_KZG_PROOF); + expect((err as any).type.peer).toBe(peerIdStr); + expect((err as any).type.blockRoot).toBe(prettyBytes(blockRoot)); }); it("should create error with MISSING_BLOCK_RESPONSE code", () => { - const error = new DownloadByRootError({ + const err = new DownloadByRootError({ code: DownloadByRootErrorCode.MISSING_BLOCK_RESPONSE, peer: peerIdStr, blockRoot: prettyBytes(blockRoot), }); - expect(error).toBeInstanceOf(DownloadByRootError); - expect(error.type.code).toBe(DownloadByRootErrorCode.MISSING_BLOCK_RESPONSE); - expect(error.type.peer).toBe(peerIdStr); - expect(error.type.blockRoot).toBe(prettyBytes(blockRoot)); + expect(err as any).toBeInstanceOf(DownloadByRootError); + expect((err as any).type.code).toBe(DownloadByRootErrorCode.MISSING_BLOCK_RESPONSE); + expect((err as any).type.peer).toBe(peerIdStr); + expect((err as any).type.blockRoot).toBe(prettyBytes(blockRoot)); }); it("should create error with MISSING_BLOB_RESPONSE code", () => { - const error = new DownloadByRootError({ + const err = new DownloadByRootError({ code: DownloadByRootErrorCode.MISSING_BLOB_RESPONSE, peer: peerIdStr, blockRoot: prettyBytes(blockRoot), }); - expect(error).toBeInstanceOf(DownloadByRootError); - expect(error.type.code).toBe(DownloadByRootErrorCode.MISSING_BLOB_RESPONSE); - expect(error.type.peer).toBe(peerIdStr); - expect(error.type.blockRoot).toBe(prettyBytes(blockRoot)); + expect(err as any).toBeInstanceOf(DownloadByRootError); + expect((err as any).type.code).toBe(DownloadByRootErrorCode.MISSING_BLOB_RESPONSE); + expect((err as any).type.peer).toBe(peerIdStr); + expect((err as any).type.blockRoot).toBe(prettyBytes(blockRoot)); }); it("should create error with MISSING_COLUMN_RESPONSE code", () => { - const error = new DownloadByRootError({ + const err = new DownloadByRootError({ code: DownloadByRootErrorCode.MISSING_COLUMN_RESPONSE, peer: peerIdStr, blockRoot: prettyBytes(blockRoot), }); - expect(error).toBeInstanceOf(DownloadByRootError); - expect(error.type.code).toBe(DownloadByRootErrorCode.MISSING_COLUMN_RESPONSE); - expect(error.type.peer).toBe(peerIdStr); - expect(error.type.blockRoot).toBe(prettyBytes(blockRoot)); + expect(err as any).toBeInstanceOf(DownloadByRootError); + expect((err as any).type.code).toBe(DownloadByRootErrorCode.MISSING_COLUMN_RESPONSE); + expect((err as any).type.peer).toBe(peerIdStr); + expect((err as any).type.blockRoot).toBe(prettyBytes(blockRoot)); }); it("should include correct error details in error object", () => { @@ -1002,10 +979,10 @@ describe("downloadByRoot.ts", () => { requestedBlockRoot: prettyBytes(blockRoot), receivedBlockRoot: prettyBytes(new Uint8Array(32).fill(1)), }; - const error = new DownloadByRootError(errorData); + const err = new DownloadByRootError(errorData as any); - expect(error.type).toEqual(errorData); - expect(Object.keys(error.type)).toEqual(Object.keys(errorData)); + expect(err.type).toEqual(errorData); + expect(Object.keys(err.type)).toEqual(Object.keys(errorData)); }); }); }); diff --git a/packages/beacon-node/test/utils/blocksAndData.ts b/packages/beacon-node/test/utils/blocksAndData.ts index 457481d110f3..7939a46f3a31 100644 --- a/packages/beacon-node/test/utils/blocksAndData.ts +++ b/packages/beacon-node/test/utils/blocksAndData.ts @@ -16,16 +16,8 @@ import {SignedBeaconBlock, Slot, deneb, fulu, ssz} from "@lodestar/types"; import {toRootHex} from "@lodestar/utils"; import {VersionedHashes} from "../../src/execution/index.js"; import {computeNodeIdFromPrivateKey} from "../../src/network/subnets/index.js"; -import { - computePreFuluKzgCommitmentsInclusionProof, - getBlobSidecars, - kzgCommitmentToVersionedHash, -} from "../../src/util/blobs.js"; -import { - CustodyConfig, - computePostFuluKzgCommitmentsInclusionProof, - getDataColumnSidecarsFromBlock, -} from "../../src/util/dataColumns.js"; +import {getBlobSidecars, kzgCommitmentToVersionedHash} from "../../src/util/blobs.js"; +import {CustodyConfig, computePostFuluKzgCommitmentsInclusionProof} from "../../src/util/dataColumns.js"; import {kzg} from "../../src/util/kzg.js"; import {ROOT_SIZE} from "../../src/util/sszBytes.js"; From efcdaa2ce004647a768fcf645d5fa2323be409da Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Wed, 27 Aug 2025 19:06:42 +0700 Subject: [PATCH 066/173] fix: update pendingBlocksTree.test for new BlockInput --- .../unit/sync/utils/pendingBlocksTree.test.ts | 35 ++++++++++++------- packages/beacon-node/test/utils/blockInput.ts | 28 +++++++-------- 2 files changed, 37 insertions(+), 26 deletions(-) diff --git a/packages/beacon-node/test/unit/sync/utils/pendingBlocksTree.test.ts b/packages/beacon-node/test/unit/sync/utils/pendingBlocksTree.test.ts index 9251c1159c64..b8d21e8dea49 100644 --- a/packages/beacon-node/test/unit/sync/utils/pendingBlocksTree.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/pendingBlocksTree.test.ts @@ -1,11 +1,20 @@ -import {RootHex} from "@lodestar/types"; +import {ForkName} from "@lodestar/params"; +import {RootHex, ssz} from "@lodestar/types"; import {describe, expect, it} from "vitest"; -import {PendingBlock, PendingBlockStatus, UnknownAndAncestorBlocks} from "../../../../src/sync/index.js"; +import {BlockInputPreData} from "../../../../src/chain/blocks/blockInput/blockInput.js"; import { + BlockInputSyncCacheItem, + PendingBlockInput, + PendingBlockInputStatus, + getBlockInputSyncCacheItemRootHex, +} from "../../../../src/sync/types.js"; +import { + UnknownAndAncestorBlocks, getAllDescendantBlocks, getDescendantBlocks, getUnknownAndAncestorBlocks, } from "../../../../src/sync/utils/pendingBlocksTree.js"; +import {MockBlockInput} from "../../../utils/blockInput.js"; describe("sync / pendingBlocksTree", () => { const testCases: { @@ -49,13 +58,15 @@ describe("sync / pendingBlocksTree", () => { ]; for (const testCase of testCases) { - const blocks = new Map(); + const blocks = new Map(); for (const block of testCase.blocks) { - blocks.set(block.block, { - blockRootHex: block.block, - parentBlockRootHex: block.parent, - status: block.parent == null ? PendingBlockStatus.pending : PendingBlockStatus.downloaded, - } as PendingBlock); + const pending: PendingBlockInput = { + status: block.parent === null ? PendingBlockInputStatus.pending : PendingBlockInputStatus.downloaded, + blockInput: new MockBlockInput({blockRootHex: block.block, parentRootHex: block.parent}), + peerIdStrings: new Set(), + timeAddedSec: 0, + }; + blocks.set(pending.blockInput.blockRootHex, pending); } describe(testCase.id, () => { @@ -78,13 +89,13 @@ describe("sync / pendingBlocksTree", () => { } }); -function toRes(blocks: PendingBlock[]): string[] { - return blocks.map((block) => block.blockRootHex); +function toRes(blocks: BlockInputSyncCacheItem[]): string[] { + return blocks.map((block) => getBlockInputSyncCacheItemRootHex(block)); } function toRes2(blocks: UnknownAndAncestorBlocks): {unknowns: string[]; ancestors: string[]} { return { - unknowns: blocks.unknowns.map((block) => block.blockRootHex), - ancestors: blocks.ancestors.map((block) => block.blockRootHex), + unknowns: blocks.unknowns.map((block) => getBlockInputSyncCacheItemRootHex(block)), + ancestors: blocks.ancestors.map((block) => getBlockInputSyncCacheItemRootHex(block)), }; } diff --git a/packages/beacon-node/test/utils/blockInput.ts b/packages/beacon-node/test/utils/blockInput.ts index bb4d9fec33e0..34b9d0747f07 100644 --- a/packages/beacon-node/test/utils/blockInput.ts +++ b/packages/beacon-node/test/utils/blockInput.ts @@ -11,13 +11,13 @@ import { } from "../../src/chain/blocks/blockInput/index.js"; export type MockBlockInputProps = { - type: DAType; - daOutOfRange: boolean; - timeCreatedSec: number; - forkName: ForkName; - slot: number; - blockRootHex: string; - parentRootHex: string; + type?: DAType; + daOutOfRange?: boolean; + timeCreatedSec?: number; + forkName?: ForkName; + slot?: number; + blockRootHex?: string; + parentRootHex?: string | null; }; export class MockBlockInput implements IBlockInput { @@ -37,13 +37,13 @@ export class MockBlockInput implements IBlockInput { _timeCompleted?: number; constructor({type, daOutOfRange, timeCreatedSec, forkName, slot, blockRootHex, parentRootHex}: MockBlockInputProps) { - this.type = type; - this.daOutOfRange = daOutOfRange; - this.timeCreatedSec = timeCreatedSec; - this.forkName = forkName; - this.slot = slot; - this.blockRootHex = blockRootHex; - this.parentRootHex = parentRootHex; + this.type = type ?? DAType.PreData; + this.daOutOfRange = daOutOfRange ?? true; + this.timeCreatedSec = timeCreatedSec ?? 0; + this.forkName = forkName ?? ForkName.capella; + this.slot = slot ?? 0; + this.blockRootHex = blockRootHex ?? "0x0000000000000000000000000000000000000000000000000000000000000000"; + this.parentRootHex = parentRootHex ?? "0x0000000000000000000000000000000000000000000000000000000000000000"; } addBlock( From 1087c82bb6c32d9b56ab3a552702ca68030adf95 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Wed, 27 Aug 2025 19:39:12 +0700 Subject: [PATCH 067/173] fix: check-types in blocksAndData test util --- .../beacon-node/test/utils/blocksAndData.ts | 41 +++++++++++-------- 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/packages/beacon-node/test/utils/blocksAndData.ts b/packages/beacon-node/test/utils/blocksAndData.ts index 7939a46f3a31..86290fd9fb0f 100644 --- a/packages/beacon-node/test/utils/blocksAndData.ts +++ b/packages/beacon-node/test/utils/blocksAndData.ts @@ -100,7 +100,7 @@ function generateRoots( blockRoot: Uint8Array; rootHex: string; } { - const blockRoot = ssz[forkName].BeaconBlock.hashTreeRoot(block.message); + const blockRoot = ssz[forkName].BeaconBlock.hashTreeRoot(block.message as any); const rootHex = toRootHex(blockRoot); return { blockRoot, @@ -147,7 +147,7 @@ function generateColumnSidecars( ): { block: SignedBeaconBlock; columnSidecars: fulu.DataColumnSidecars; - blobs?: deneb.BlobSidecars; + blobs?: deneb.Blob[]; } { const blobs = Array.from({length: numberOfBlobs}, () => generateRandomBlob()); const kzgCommitments = blobs.map((blob) => kzg.blobToKzgCommitment(blob)); @@ -210,7 +210,7 @@ export function generateChainOfBlocks({ export type BlockWithBlobsTestSet = BlockTestSet & { blobSidecars: deneb.BlobSidecars; - versionedHashed: VersionedHashes; + versionedHashes: VersionedHashes; }; export type BlockWithColumnsTestSet = BlockTestSet & { @@ -276,9 +276,9 @@ export function generateBlockWithColumnSidecars({ }; } -export type BlocksWithSidecars = F extends ForkPostFulu - ? BlockWithColumnsTestSet[] - : BlockWithBlobsTestSet[]; +export type BlockWithSidecars = F extends ForkPostFulu + ? BlockWithColumnsTestSet + : BlockWithBlobsTestSet; export function generateChainOfBlocksWithBlobs({ forkName, @@ -288,25 +288,30 @@ export function generateChainOfBlocksWithBlobs({ forkName: F; count: number; oomProtection?: boolean; -}): BlocksWithSidecars { +}): BlockWithSidecars[] { let parentRoot = Uint8Array.from(randomBytes(ROOT_SIZE)); let slot = slots[forkName]; - const blocks: BlocksWithSidecars = []; + const blocks: BlockWithSidecars[] = []; for (; slot < slot + count; slot++) { - const blockWithSidecars = isForkPostFulu(forkName) - ? generateBlockWithColumnSidecars({forkName, parentRoot, slot, oomProtection}) - : generateBlockWithBlobSidecars({forkName, parentRoot, slot, oomProtection}); + const blockWithSidecars = ( + isForkPostFulu(forkName) + ? generateBlockWithColumnSidecars({forkName, parentRoot, slot, oomProtection}) + : generateBlockWithBlobSidecars({ + forkName, + parentRoot, + slot, + oomProtection, + }) + ) as BlockWithSidecars; parentRoot = blockWithSidecars.blockRoot; blocks.push(blockWithSidecars); } return blocks; } -export type ChainOfBlockMaybeSidecars = F extends ForkPostFulu - ? BlockWithColumnsTestSet[] - : F extends ForkPostDeneb - ? BlockWithBlobsTestSet[] - : BlockTestSet[]; +export type ChainOfBlockMaybeSidecars = F extends ForkPostDeneb + ? BlockWithSidecars[] + : BlockTestSet[]; export function generateChainOfBlockMaybeSidecars( forkName: F, @@ -314,7 +319,7 @@ export function generateChainOfBlockMaybeSidecars( oomProtection = false ): ChainOfBlockMaybeSidecars { if (isForkPostDeneb(forkName)) { - return generateChainOfBlocksWithBlobs({forkName, count, oomProtection}); + return generateChainOfBlocksWithBlobs({forkName, count, oomProtection}) as ChainOfBlockMaybeSidecars; } - return generateChainOfBlocks({forkName, count}); + return generateChainOfBlocks({forkName, count}) as ChainOfBlockMaybeSidecars; } From 35883bae04a09b39095874a07d648af8fe7eb2f9 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Wed, 27 Aug 2025 19:39:43 +0700 Subject: [PATCH 068/173] chore: lint --- .../test/unit/sync/utils/pendingBlocksTree.test.ts | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/packages/beacon-node/test/unit/sync/utils/pendingBlocksTree.test.ts b/packages/beacon-node/test/unit/sync/utils/pendingBlocksTree.test.ts index b8d21e8dea49..5e76f31cc276 100644 --- a/packages/beacon-node/test/unit/sync/utils/pendingBlocksTree.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/pendingBlocksTree.test.ts @@ -1,7 +1,5 @@ -import {ForkName} from "@lodestar/params"; -import {RootHex, ssz} from "@lodestar/types"; +import {RootHex} from "@lodestar/types"; import {describe, expect, it} from "vitest"; -import {BlockInputPreData} from "../../../../src/chain/blocks/blockInput/blockInput.js"; import { BlockInputSyncCacheItem, PendingBlockInput, From 8576a2b5ff06f62460d958f98119c46912586422 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Wed, 27 Aug 2025 21:03:28 +0700 Subject: [PATCH 069/173] test: unit testing downloadByRoot --- .../src/sync/utils/downloadByRoot.ts | 2 +- .../unit/sync/utils/downloadByRoot.test.ts | 140 ++++++++++++++++-- 2 files changed, 131 insertions(+), 11 deletions(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRoot.ts b/packages/beacon-node/src/sync/utils/downloadByRoot.ts index 25c7aaf3086e..3035535cba8a 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRoot.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRoot.ts @@ -341,7 +341,7 @@ export async function fetchGetBlobsV1AndBuildSidecars({ const enginedResponse = await executionEngine.getBlobs( forkName, - blobMeta.map(({versionedHash: versionHash}) => versionHash) + blobMeta.map(({versionedHash}) => versionedHash) ); if (!enginedResponse.length) { diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts index 3dbceb455e28..17c71940c573 100644 --- a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts @@ -1,5 +1,5 @@ import {randomBytes} from "node:crypto"; -import {BYTES_PER_CELL, BYTES_PER_PROOF} from "@crate-crypto/node-eth-kzg"; +import {BYTES_PER_BLOB, BYTES_PER_CELL, BYTES_PER_COMMITMENT, BYTES_PER_PROOF} from "@crate-crypto/node-eth-kzg"; import {ForkName, NUMBER_OF_COLUMNS} from "@lodestar/params"; import {deneb, fulu, ssz} from "@lodestar/types"; import {prettyBytes} from "@lodestar/utils"; @@ -25,6 +25,7 @@ import { fetchBlobsByRoot, // fetchByRoot, fetchColumnsByRoot, + fetchGetBlobsV1AndBuildSidecars, // fetchGetBlobsV1AndBuildSidecars, fetchGetBlobsV2AndBuildSidecars, validateBlobs, @@ -220,20 +221,135 @@ describe("downloadByRoot.ts", () => { // }); describe("fetchGetBlobsV1AndBuildSidecars", () => { - it("should build blob sidecars from execution engine response", () => { - // Test successful sidecar building from execution engine blobs + let denebBlockWithColumns: ReturnType; + let blobsAndProofs: deneb.BlobAndProof[]; + let blobMeta: BlobMeta[]; + const forkName = ForkName.deneb; + + beforeEach(() => { + denebBlockWithColumns = generateBlockWithBlobSidecars({forkName: ForkName.fulu, count: 6}); + blobsAndProofs = denebBlockWithColumns.blobSidecars.map(({blob, kzgProof}) => ({blob, proof: kzgProof})); + blobMeta = denebBlockWithColumns.versionedHashes.map((versionedHash, index) => ({index, versionedHash})); }); - it("should return empty array when execution engine returns no blobs", () => { - // Test when execution engine returns empty response + afterEach(() => { + vi.resetAllMocks(); }); - it("should handle partial blob response from execution engine", () => { - // Test when execution engine returns some but not all requested blobs + it("should call getBlobs with the correct arguments", async () => { + const getBlobsMock = vi.fn(() => Promise.resolve(blobsAndProofs)); + executionEngine = { + getBlobs: getBlobsMock, + } as unknown as IExecutionEngine; + + await fetchGetBlobsV1AndBuildSidecars({ + config, + forkName, + executionEngine, + block: denebBlockWithColumns.block, + blobMeta: blobMeta, + }); + + expect(getBlobsMock).toHaveBeenCalledOnce(); + expect(getBlobsMock).toHaveBeenCalledWith(forkName, denebBlockWithColumns.versionedHashes); }); - it("should correctly compute inclusion proofs for blob sidecars", () => { - // Test inclusion proof computation + it("should return empty array when execution engine returns no blobs", async () => { + const getBlobsMock = vi.fn(() => Promise.resolve([])); + executionEngine = { + getBlobs: getBlobsMock, + } as unknown as IExecutionEngine; + + const response = await fetchGetBlobsV1AndBuildSidecars({ + config, + forkName, + executionEngine, + block: denebBlockWithColumns.block, + blobMeta: blobMeta, + }); + expect(response).toEqual([]); + }); + + it("should build valid blob sidecars from execution engine response", async () => { + const getBlobsMock = vi.fn(() => Promise.resolve(blobsAndProofs)); + executionEngine = { + getBlobs: getBlobsMock, + } as unknown as IExecutionEngine; + + const response = await fetchGetBlobsV1AndBuildSidecars({ + config, + forkName, + executionEngine, + block: denebBlockWithColumns.block, + blobMeta: blobMeta, + }); + + expect(getBlobsMock).toHaveBeenCalledOnce(); + expect(response).toBeDefined(); + expect(response).toBeInstanceOf(Array); + expect(response.length).toEqual(blobsAndProofs.length); + for (const blobSidecar of response) { + blobSidecar.kzgCommitmentInclusionProof; + expect(blobSidecar).toHaveProperty("index"); + expect(blobSidecar.index).toBeTypeOf("number"); + + expect(blobSidecar).toHaveProperty("blob"); + expect(blobSidecar.blob).toBeInstanceOf(Uint8Array); + expect(blobSidecar.blob.length).toEqual(BYTES_PER_BLOB); + + expect(blobSidecar).toHaveProperty("kzgProof"); + expect(blobSidecar.kzgProof).toBeInstanceOf(Uint8Array); + expect(blobSidecar.kzgProof.length).toEqual(BYTES_PER_PROOF); + + expect(blobSidecar).toHaveProperty("kzgCommitment"); + expect(blobSidecar.kzgCommitment).toBeInstanceOf(Uint8Array); + expect(blobSidecar.kzgCommitment.length).toEqual(BYTES_PER_COMMITMENT); + + expect(blobSidecar).toHaveProperty("kzgCommitmentInclusionProof"); + expect(blobSidecar.kzgCommitmentInclusionProof).toBeInstanceOf(Array); + blobSidecar.kzgCommitmentInclusionProof.map((proof) => expect(proof).toBeInstanceOf(Uint8Array)); + + expect(blobSidecar).toHaveProperty("signedBlockHeader"); + expect(blobSidecar.signedBlockHeader.message.slot).toBe(denebBlockWithColumns.block.message.slot); + expect(blobSidecar.signedBlockHeader.message.proposerIndex).toBe( + denebBlockWithColumns.block.message.proposerIndex + ); + expect(blobSidecar.signedBlockHeader.message.parentRoot).toEqual( + denebBlockWithColumns.block.message.parentRoot + ); + expect(blobSidecar.signedBlockHeader.message.stateRoot).toEqual(denebBlockWithColumns.block.message.stateRoot); + } + + await expect( + validateBlobs({ + config, + peerIdStr, + blockRoot: denebBlockWithColumns.blockRoot, + blobSidecars: response, + blobMeta, + }) + ).resolves.toBeUndefined(); + }); + + it("should handle partial blob response from execution engine", async () => { + const engineResponse = [...blobsAndProofs]; + engineResponse[2] = null; + engineResponse[4] = null; + const getBlobsMock = vi.fn(() => Promise.resolve(engineResponse)); + executionEngine = { + getBlobs: getBlobsMock, + } as unknown as IExecutionEngine; + + const response = await fetchGetBlobsV1AndBuildSidecars({ + config, + forkName, + executionEngine, + block: denebBlockWithColumns.block, + blobMeta: blobMeta, + }); + + expect(response.length).toEqual(4); + expect(response.map(({index}) => index)).toEqual([0, 1, 3, 5]); }); }); @@ -502,7 +618,7 @@ describe("downloadByRoot.ts", () => { expect(result).toEqual([]); }); - it("should build columnSidecars from execution engine blobs", async () => { + it("should build valid columnSidecars from execution engine blobs", async () => { const getBlobsMock = vi.fn(() => Promise.resolve(blobAndProofs)); executionEngine = { getBlobs: getBlobsMock, @@ -563,6 +679,10 @@ describe("downloadByRoot.ts", () => { fuluBlockWithColumns.block.message.parentRoot ); expect(columnSidecar.signedBlockHeader.message.stateRoot).toEqual(fuluBlockWithColumns.block.message.stateRoot); + + expect( + validateColumnSidecar({config, peerIdStr, blockRoot: fuluBlockWithColumns.blockRoot, columnSidecar}) + ).toBeUndefined(); } }); }); From 9cf380b666550d3d131f1b6f31e561a3006baae9 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Thu, 28 Aug 2025 01:50:47 +0700 Subject: [PATCH 070/173] test: testing fetchAndValidateBlobs --- .../src/sync/utils/downloadByRoot.ts | 3 +- .../unit/sync/utils/downloadByRoot.test.ts | 210 ++++++++++++++---- .../beacon-node/test/utils/blocksAndData.ts | 27 ++- 3 files changed, 191 insertions(+), 49 deletions(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRoot.ts b/packages/beacon-node/src/sync/utils/downloadByRoot.ts index 3035535cba8a..2112244649c1 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRoot.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRoot.ts @@ -354,12 +354,13 @@ export async function fetchGetBlobsV1AndBuildSidecars({ if (blobAndProof) { const {blob, proof} = blobAndProof; const index = blobMeta[i].index; - const kzgCommitment = block.message.body.blobKzgCommitments[i]; + const kzgCommitment = block.message.body.blobKzgCommitments[index]; const sidecar: deneb.BlobSidecar = { index, blob, kzgProof: proof, kzgCommitment, + // TODO(fulu): refactor this to only calculate the root inside these following two functions once kzgCommitmentInclusionProof: computePreFuluKzgCommitmentsInclusionProof(forkName, block.message.body, index), signedBlockHeader: signedBlockToSignedHeader(config, block), }; diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts index 17c71940c573..082097eadc3d 100644 --- a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts @@ -18,6 +18,8 @@ import { DownloadByRootError, DownloadByRootErrorCode, ValidateColumnSidecarsProps, + fetchAndValidateBlobs, + fetchAndValidateBlock, // downloadByRoot, // fetchAndValidateBlobs, // fetchAndValidateBlock, @@ -39,6 +41,8 @@ import {ROOT_SIZE} from "../../../../src/util/sszBytes.js"; // import {getMockedLogger} from "../../../../test/mocks/loggerMock.js"; import { config, + gen, + generateBlock, // custodyConfig, generateBlockWithBlobSidecars, generateBlockWithColumnSidecars, @@ -180,56 +184,170 @@ describe("downloadByRoot.ts", () => { // }); // }); - // describe("fetchAndValidateBlock", () => { - // it("should successfully fetch and validate block with matching root", () => { - // // Test successful block fetch and validation - // }); + describe("fetchAndValidateBlock", () => { + let capellaBlock: ReturnType; + beforeAll(() => { + capellaBlock = generateBlock({forkName: ForkName.capella}); + }); + afterAll(() => { + vi.resetAllMocks(); + }); - // it("should throw error when no block is returned from network", () => { - // // Test MISSING_BLOCK_RESPONSE error - // }); + it("should successfully fetch and validate block with matching root", async () => { + network = { + sendBeaconBlocksByRoot: vi.fn(() => [{data: capellaBlock.block}]), + } as unknown as INetwork; - // it("should throw error when block root doesn't match requested root", () => { - // // Test MISMATCH_BLOCK_ROOT error - // }); + const response = await fetchAndValidateBlock({ + config, + network, + peerIdStr, + blockRoot: capellaBlock.blockRoot, + }); - // it("should handle network request failure", () => { - // // Test network failure scenarios - // }); - // }); + expect(response).toBe(capellaBlock.block); + }); - // describe("fetchAndValidateBlobs", () => { - // it("should successfully fetch blobs from execution engine only", () => { - // // Test when all blobs are available from execution engine - // }); + it("should throw error when no block is returned from network", async () => { + network = { + sendBeaconBlocksByRoot: vi.fn(() => []), + } as unknown as INetwork; - // it("should fetch remaining blobs from network when execution engine is incomplete", () => { - // // Test when some blobs are from execution engine, others from network - // }); + try { + await fetchAndValidateBlock({ + config, + network, + peerIdStr, + blockRoot: capellaBlock.blockRoot, + }); + } catch (err) { + expect(err).toBeInstanceOf(DownloadByRootError); + expect((err as any).type.code).toEqual(DownloadByRootErrorCode.MISSING_BLOCK_RESPONSE); + expect((err as any).type.peer).toEqual(prettyPeerIdStr); + expect((err as any).type.blockRoot).toEqual(prettyBytes(capellaBlock.blockRoot)); + } + }); - // it("should fetch all blobs from network when execution engine returns none", () => { - // // Test when execution engine returns no blobs - // }); + it("should throw error when block root doesn't match requested root", async () => { + network = { + sendBeaconBlocksByRoot: vi.fn(() => [{data: capellaBlock.block}]), + } as unknown as INetwork; - // it("should validate all fetched blobs successfully", () => { - // // Test successful blob validation - // }); + const invalidRoot = randomBytes(ROOT_SIZE); + try { + await fetchAndValidateBlock({ + config, + network, + peerIdStr, + blockRoot: invalidRoot, + }); + } catch (err) { + expect(err).toBeInstanceOf(DownloadByRootError); + expect((err as any).message).toEqual("block does not match requested root"); + expect((err as any).type.code).toEqual(DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT); + expect((err as any).type.peer).toEqual(prettyPeerIdStr); + expect((err as any).type.requestedBlockRoot).toEqual(prettyBytes(invalidRoot)); + expect((err as any).type.receivedBlockRoot).toEqual(prettyBytes(capellaBlock.blockRoot)); + } + }); + }); - // it("should throw error when blob validation fails", () => { - // // Test blob validation failure scenarios - // }); - // }); + describe("fetchAndValidateBlobs", () => { + const forkName = ForkName.deneb; + let denebBlockWithBlobs: ReturnType; + let blobsAndProofs: deneb.BlobAndProof[]; + let blobMeta: BlobMeta[]; + + beforeEach(() => { + denebBlockWithBlobs = generateBlockWithBlobSidecars({forkName, count: 6}); + blobsAndProofs = denebBlockWithBlobs.blobSidecars.map(({blob, kzgProof}) => ({blob, proof: kzgProof})); + blobMeta = denebBlockWithBlobs.versionedHashes.map((versionedHash, index) => ({index, versionedHash})); + }); + + afterEach(() => { + vi.resetAllMocks(); + }); + + it("should successfully fetch blobs from execution engine only", async () => { + const sendBlobSidecarsByRootMock = vi.fn(() => Promise.resolve([])); + network = { + sendBlobSidecarsByRoot: sendBlobSidecarsByRootMock, + } as unknown as INetwork; + + const getBlobsMock = vi.fn(() => Promise.resolve(blobsAndProofs.slice(0, 1))); + executionEngine = { + getBlobs: getBlobsMock, + } as unknown as IExecutionEngine; + + const response = await fetchAndValidateBlobs({ + config, + network, + executionEngine, + forkName, + peerIdStr, + blockRoot: denebBlockWithBlobs.blockRoot, + block: denebBlockWithBlobs.block, + blobMeta: blobMeta.slice(0, 1), + }); + + expect(response).toBe(denebBlockWithBlobs.blobSidecars.slice(0, 1)); + }); + + // it("should successfully fetch blobs from network only", async () => { + // const response = await fetchAndValidateBlobs({ + // config, + // network, + // executionEngine, + // forkName, + // peerIdStr, + // blockRoot, + // block, + // blobMeta, + // }); + // }); + + // it("should fetch remaining blobs from network when execution engine is incomplete", async () => { + // const response = await fetchAndValidateBlobs({ + // config, + // network, + // executionEngine, + // forkName, + // peerIdStr, + // blockRoot, + // block, + // blobMeta, + // }); + // }); + + // it("should throw error if blob validation fails", async () => { + // try { + // await fetchAndValidateBlobs({ + // config, + // network, + // executionEngine, + // forkName, + // peerIdStr, + // blockRoot, + // block, + // blobMeta, + // }); + // expect.fail("should have errored"); + // } catch (err) { + // expect(err).toBeInstanceOf(DownloadByRootError); + // } + // }); + }); describe("fetchGetBlobsV1AndBuildSidecars", () => { - let denebBlockWithColumns: ReturnType; + let denebBlockWithBlobs: ReturnType; let blobsAndProofs: deneb.BlobAndProof[]; let blobMeta: BlobMeta[]; const forkName = ForkName.deneb; beforeEach(() => { - denebBlockWithColumns = generateBlockWithBlobSidecars({forkName: ForkName.fulu, count: 6}); - blobsAndProofs = denebBlockWithColumns.blobSidecars.map(({blob, kzgProof}) => ({blob, proof: kzgProof})); - blobMeta = denebBlockWithColumns.versionedHashes.map((versionedHash, index) => ({index, versionedHash})); + denebBlockWithBlobs = generateBlockWithBlobSidecars({forkName, count: 6}); + blobsAndProofs = denebBlockWithBlobs.blobSidecars.map(({blob, kzgProof}) => ({blob, proof: kzgProof})); + blobMeta = denebBlockWithBlobs.versionedHashes.map((versionedHash, index) => ({index, versionedHash})); }); afterEach(() => { @@ -246,12 +364,12 @@ describe("downloadByRoot.ts", () => { config, forkName, executionEngine, - block: denebBlockWithColumns.block, + block: denebBlockWithBlobs.block, blobMeta: blobMeta, }); expect(getBlobsMock).toHaveBeenCalledOnce(); - expect(getBlobsMock).toHaveBeenCalledWith(forkName, denebBlockWithColumns.versionedHashes); + expect(getBlobsMock).toHaveBeenCalledWith(forkName, denebBlockWithBlobs.versionedHashes); }); it("should return empty array when execution engine returns no blobs", async () => { @@ -264,7 +382,7 @@ describe("downloadByRoot.ts", () => { config, forkName, executionEngine, - block: denebBlockWithColumns.block, + block: denebBlockWithBlobs.block, blobMeta: blobMeta, }); expect(response).toEqual([]); @@ -280,7 +398,7 @@ describe("downloadByRoot.ts", () => { config, forkName, executionEngine, - block: denebBlockWithColumns.block, + block: denebBlockWithBlobs.block, blobMeta: blobMeta, }); @@ -310,21 +428,19 @@ describe("downloadByRoot.ts", () => { blobSidecar.kzgCommitmentInclusionProof.map((proof) => expect(proof).toBeInstanceOf(Uint8Array)); expect(blobSidecar).toHaveProperty("signedBlockHeader"); - expect(blobSidecar.signedBlockHeader.message.slot).toBe(denebBlockWithColumns.block.message.slot); + expect(blobSidecar.signedBlockHeader.message.slot).toBe(denebBlockWithBlobs.block.message.slot); expect(blobSidecar.signedBlockHeader.message.proposerIndex).toBe( - denebBlockWithColumns.block.message.proposerIndex - ); - expect(blobSidecar.signedBlockHeader.message.parentRoot).toEqual( - denebBlockWithColumns.block.message.parentRoot + denebBlockWithBlobs.block.message.proposerIndex ); - expect(blobSidecar.signedBlockHeader.message.stateRoot).toEqual(denebBlockWithColumns.block.message.stateRoot); + expect(blobSidecar.signedBlockHeader.message.parentRoot).toEqual(denebBlockWithBlobs.block.message.parentRoot); + expect(blobSidecar.signedBlockHeader.message.stateRoot).toEqual(denebBlockWithBlobs.block.message.stateRoot); } await expect( validateBlobs({ config, peerIdStr, - blockRoot: denebBlockWithColumns.blockRoot, + blockRoot: denebBlockWithBlobs.blockRoot, blobSidecars: response, blobMeta, }) @@ -344,7 +460,7 @@ describe("downloadByRoot.ts", () => { config, forkName, executionEngine, - block: denebBlockWithColumns.block, + block: denebBlockWithBlobs.block, blobMeta: blobMeta, }); diff --git a/packages/beacon-node/test/utils/blocksAndData.ts b/packages/beacon-node/test/utils/blocksAndData.ts index 86290fd9fb0f..723775131c42 100644 --- a/packages/beacon-node/test/utils/blocksAndData.ts +++ b/packages/beacon-node/test/utils/blocksAndData.ts @@ -78,11 +78,17 @@ function generateProposerIndex(min = 0, max = 100_000): number { return generateRandomInt(min, max); } +export type GenerateBlockProps = { + forkName: F; + slot?: Slot; + parentRoot?: Uint8Array; +}; + function generateBeaconBlock({ forkName, slot, parentRoot, -}: {forkName: F; slot?: Slot; parentRoot?: Uint8Array}): SignedBeaconBlock { +}: GenerateBlockProps): SignedBeaconBlock { const block = ssz[forkName].SignedBeaconBlock.defaultValue(); block.message.slot = slot ? slot : slots[forkName]; block.message.parentRoot = parentRoot ? parentRoot : Uint8Array.from(randomBytes(ROOT_SIZE)); @@ -188,6 +194,25 @@ export type BlockTestSet = { rootHex: string; }; +export function generateBlock({ + forkName, + parentRoot, + slot, +}: GenerateBlockProps): BlockTestSet { + const block = generateBeaconBlock({ + forkName, + slot, + parentRoot, + }); + const {blockRoot, rootHex} = generateRoots(forkName, block); + + return { + block, + rootHex, + blockRoot, + }; +} + export function generateChainOfBlocks({ forkName, count, From 4b7a24417141f518afddf8c5ada8f48584125b3b Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Thu, 28 Aug 2025 02:20:07 +0700 Subject: [PATCH 071/173] test: fetchAndValidateBlobs --- .../unit/sync/utils/downloadByRoot.test.ts | 160 ++++++++++++------ 1 file changed, 111 insertions(+), 49 deletions(-) diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts index 082097eadc3d..d60648fec791 100644 --- a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts @@ -261,7 +261,11 @@ describe("downloadByRoot.ts", () => { beforeEach(() => { denebBlockWithBlobs = generateBlockWithBlobSidecars({forkName, count: 6}); blobsAndProofs = denebBlockWithBlobs.blobSidecars.map(({blob, kzgProof}) => ({blob, proof: kzgProof})); - blobMeta = denebBlockWithBlobs.versionedHashes.map((versionedHash, index) => ({index, versionedHash})); + blobMeta = denebBlockWithBlobs.versionedHashes.map((versionedHash, index) => ({ + index, + blockRoot: denebBlockWithBlobs.blockRoot, + versionedHash, + })); }); afterEach(() => { @@ -274,7 +278,7 @@ describe("downloadByRoot.ts", () => { sendBlobSidecarsByRoot: sendBlobSidecarsByRootMock, } as unknown as INetwork; - const getBlobsMock = vi.fn(() => Promise.resolve(blobsAndProofs.slice(0, 1))); + const getBlobsMock = vi.fn(() => Promise.resolve(blobsAndProofs)); executionEngine = { getBlobs: getBlobsMock, } as unknown as IExecutionEngine; @@ -287,55 +291,116 @@ describe("downloadByRoot.ts", () => { peerIdStr, blockRoot: denebBlockWithBlobs.blockRoot, block: denebBlockWithBlobs.block, - blobMeta: blobMeta.slice(0, 1), + blobMeta, }); - expect(response).toBe(denebBlockWithBlobs.blobSidecars.slice(0, 1)); + expect(response.map((b) => b.index)).toEqual(denebBlockWithBlobs.blobSidecars.map((b) => b.index)); }); - // it("should successfully fetch blobs from network only", async () => { - // const response = await fetchAndValidateBlobs({ - // config, - // network, - // executionEngine, - // forkName, - // peerIdStr, - // blockRoot, - // block, - // blobMeta, - // }); - // }); + it("should successfully fetch blobs from network only", async () => { + const sendBlobSidecarsByRootMock = vi.fn(() => Promise.resolve(denebBlockWithBlobs.blobSidecars)); + network = { + sendBlobSidecarsByRoot: sendBlobSidecarsByRootMock, + } as unknown as INetwork; - // it("should fetch remaining blobs from network when execution engine is incomplete", async () => { - // const response = await fetchAndValidateBlobs({ - // config, - // network, - // executionEngine, - // forkName, - // peerIdStr, - // blockRoot, - // block, - // blobMeta, - // }); - // }); + const getBlobsMock = vi.fn(() => Promise.resolve([])); + executionEngine = { + getBlobs: getBlobsMock, + } as unknown as IExecutionEngine; - // it("should throw error if blob validation fails", async () => { - // try { - // await fetchAndValidateBlobs({ - // config, - // network, - // executionEngine, - // forkName, - // peerIdStr, - // blockRoot, - // block, - // blobMeta, - // }); - // expect.fail("should have errored"); - // } catch (err) { - // expect(err).toBeInstanceOf(DownloadByRootError); - // } - // }); + const response = await fetchAndValidateBlobs({ + config, + network, + executionEngine, + forkName, + peerIdStr, + blockRoot: denebBlockWithBlobs.blockRoot, + block: denebBlockWithBlobs.block, + blobMeta, + }); + + expect(response).toEqual(denebBlockWithBlobs.blobSidecars); + }); + + it("should fetch remaining blobs from network when execution engine is incomplete", async () => { + const getBlobsMock = vi.fn(() => + Promise.resolve([blobsAndProofs[0], null, blobsAndProofs[2], null, blobsAndProofs[4], null]) + ); + executionEngine = { + getBlobs: getBlobsMock, + } as unknown as IExecutionEngine; + + const sendBlobSidecarsByRootMock = vi.fn(() => + Promise.resolve([ + denebBlockWithBlobs.blobSidecars[1], + denebBlockWithBlobs.blobSidecars[3], + denebBlockWithBlobs.blobSidecars[5], + ]) + ); + network = { + sendBlobSidecarsByRoot: sendBlobSidecarsByRootMock, + } as unknown as INetwork; + + const response = await fetchAndValidateBlobs({ + config, + network, + executionEngine, + forkName, + peerIdStr, + blockRoot: denebBlockWithBlobs.blockRoot, + block: denebBlockWithBlobs.block, + blobMeta, + }); + + expect(getBlobsMock).toHaveBeenCalledExactlyOnceWith( + forkName, + blobMeta.map(({versionedHash}) => versionedHash) + ); + expect(sendBlobSidecarsByRootMock).toHaveBeenCalledExactlyOnceWith(peerIdStr, [ + {blockRoot: denebBlockWithBlobs.blockRoot, index: 1}, + {blockRoot: denebBlockWithBlobs.blockRoot, index: 3}, + {blockRoot: denebBlockWithBlobs.blockRoot, index: 5}, + ]); + + const returnedIndices = response.map((b) => b.index); + expect(returnedIndices).toEqual(returnedIndices.sort()); + expect(returnedIndices).toEqual(denebBlockWithBlobs.blobSidecars.map((b) => b.index)); + }); + + it("should throw error if blob validation fails", async () => { + const sendBlobSidecarsByRootMock = vi.fn(() => Promise.resolve([])); + network = { + sendBlobSidecarsByRoot: sendBlobSidecarsByRootMock, + } as unknown as INetwork; + + const getBlobsMock = vi.fn(() => Promise.resolve(blobsAndProofs)); + executionEngine = { + getBlobs: getBlobsMock, + } as unknown as IExecutionEngine; + + const requestedBlockRoot = randomBytes(ROOT_SIZE); + + try { + await fetchAndValidateBlobs({ + config, + network, + executionEngine, + forkName, + peerIdStr, + blockRoot: requestedBlockRoot, + block: denebBlockWithBlobs.block, + blobMeta, + }); + expect.fail("should have errored"); + } catch (err) { + expect(err).toBeInstanceOf(DownloadByRootError); + expect((err as any).type.code).toBe(DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT); + expect((err as any).type.peer).toBe(prettyPeerIdStr); + expect((err as any).type.requestedBlockRoot).toBe(prettyBytes(requestedBlockRoot)); + expect((err as any).type.receivedBlockRoot).toBe(prettyBytes(denebBlockWithBlobs.blockRoot)); + expect((err as any).message).toEqual("blobSidecar header root did not match requested blockRoot for index=0"); + } + }); }); describe("fetchGetBlobsV1AndBuildSidecars", () => { @@ -566,9 +631,6 @@ describe("downloadByRoot.ts", () => { it("should throw error for mismatched block root in blob header", async () => { const requestedBlockRoot = new Uint8Array(ROOT_SIZE).fill(0xac); - const headerRoot = config - .getForkTypes(blobSidecars[0].signedBlockHeader.message.slot) - .BeaconBlockHeader.hashTreeRoot(blobSidecars[0].signedBlockHeader.message); try { await validateBlobs({ config, @@ -582,7 +644,7 @@ describe("downloadByRoot.ts", () => { expect((err as any).type.code).toBe(DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT); expect((err as any).type.peer).toBe(prettyPeerIdStr); expect((err as any).type.requestedBlockRoot).toBe(prettyBytes(requestedBlockRoot)); - expect((err as any).type.receivedBlockRoot).toBe(prettyBytes(headerRoot)); + expect((err as any).type.receivedBlockRoot).toBe(prettyBytes(denebBlockWithBlobs.blockRoot)); expect((err as any).message).toEqual("blobSidecar header root did not match requested blockRoot for index=0"); } }); From 2fa46f1987ec27deaa23f4ab8a5f12712dd08d31 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Thu, 28 Aug 2025 02:41:04 +0700 Subject: [PATCH 072/173] test: fetchAndValidateBlobs --- .../src/sync/utils/downloadByRoot.ts | 23 +++++++--- .../unit/sync/utils/downloadByRoot.test.ts | 45 +++++++++++++++++++ 2 files changed, 61 insertions(+), 7 deletions(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRoot.ts b/packages/beacon-node/src/sync/utils/downloadByRoot.ts index 2112244649c1..4127b7a01c23 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRoot.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRoot.ts @@ -300,13 +300,22 @@ export async function fetchAndValidateBlobs({ block, blobMeta, }: FetchByRootAndValidateBlobsProps): Promise { - const blobSidecars = await fetchGetBlobsV1AndBuildSidecars({ - config, - executionEngine, - forkName, - block, - blobMeta, - }); + let blobSidecars: deneb.BlobSidecars = []; + try { + blobSidecars = await fetchGetBlobsV1AndBuildSidecars({ + config, + executionEngine, + forkName, + block, + blobMeta, + }); + } catch (err) { + network.logger.error( + `error fetching/building blobSidecars for blockRoot=${prettyBytes(blockRoot)} via getBlobsV1`, + {}, + err as Error + ); + } // not all needed blobs were fetched via getBlobs, need to use ReqResp if (blobSidecars.length !== blobMeta.length) { diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts index d60648fec791..44526ac38868 100644 --- a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts @@ -367,6 +367,51 @@ describe("downloadByRoot.ts", () => { expect(returnedIndices).toEqual(denebBlockWithBlobs.blobSidecars.map((b) => b.index)); }); + it("should gracefully handle getBlobsV1 failure", async () => { + const rejectedError = new Error("TESTING_ERROR"); + const getBlobsMock = vi.fn(() => Promise.reject(rejectedError)); + executionEngine = { + getBlobs: getBlobsMock, + } as unknown as IExecutionEngine; + + const sendBlobSidecarsByRootMock = vi.fn(() => Promise.resolve(denebBlockWithBlobs.blobSidecars)); + const loggerMock = { + error: vi.fn(), + }; + network = { + logger: loggerMock, + sendBlobSidecarsByRoot: sendBlobSidecarsByRootMock, + } as unknown as INetwork; + + const response = await fetchAndValidateBlobs({ + config, + network, + executionEngine, + forkName, + peerIdStr, + blockRoot: denebBlockWithBlobs.blockRoot, + block: denebBlockWithBlobs.block, + blobMeta, + }); + expect(getBlobsMock).toHaveBeenCalledExactlyOnceWith( + forkName, + blobMeta.map(({versionedHash}) => versionedHash) + ); + expect(loggerMock.error).toHaveBeenCalledExactlyOnceWith( + `error fetching/building blobSidecars for blockRoot=${prettyBytes(denebBlockWithBlobs.blockRoot)} via getBlobsV1`, + {}, + rejectedError + ); + expect(sendBlobSidecarsByRootMock).toHaveBeenCalledExactlyOnceWith( + peerIdStr, + denebBlockWithBlobs.blobSidecars.map((b) => ({ + blockRoot: denebBlockWithBlobs.blockRoot, + index: b.index, + })) + ); + expect(response).toEqual(denebBlockWithBlobs.blobSidecars); + }); + it("should throw error if blob validation fails", async () => { const sendBlobSidecarsByRootMock = vi.fn(() => Promise.resolve([])); network = { From 29bc78b113d1eb411a14713cbeaf90540b5d52f0 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Thu, 28 Aug 2025 03:25:07 +0700 Subject: [PATCH 073/173] test: fetchAndValidateColumns --- .../src/sync/utils/downloadByRoot.ts | 4 +- .../unit/sync/utils/downloadByRoot.test.ts | 391 ++++++++++++++++-- 2 files changed, 364 insertions(+), 31 deletions(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRoot.ts b/packages/beacon-node/src/sync/utils/downloadByRoot.ts index 4127b7a01c23..fca92fb67142 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRoot.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRoot.ts @@ -473,7 +473,7 @@ export async function fetchAndValidateColumns({ blockRoot, columnMeta, }: FetchByRootAndValidateColumnsProps): Promise { - let columnSidecars: fulu.DataColumnSidecars = []; + let columnSidecars: fulu.DataColumnSidecars | null = []; try { columnSidecars = await fetchGetBlobsV2AndBuildSidecars({ config, @@ -490,7 +490,7 @@ export async function fetchAndValidateColumns({ ); } - if (columnSidecars.length) { + if (columnSidecars?.length) { // limit reconstructed to only the ones we need const needed = columnSidecars.filter((c) => columnMeta.missing.includes(c.index)); // spec states that reconstructed sidecars need to be published to the network, but only requires diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts index 44526ac38868..202cffa64fc5 100644 --- a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts @@ -2,6 +2,7 @@ import {randomBytes} from "node:crypto"; import {BYTES_PER_BLOB, BYTES_PER_CELL, BYTES_PER_COMMITMENT, BYTES_PER_PROOF} from "@crate-crypto/node-eth-kzg"; import {ForkName, NUMBER_OF_COLUMNS} from "@lodestar/params"; import {deneb, fulu, ssz} from "@lodestar/types"; +import {BlobAndProof} from "@lodestar/types/lib/deneb/types.js"; import {prettyBytes} from "@lodestar/utils"; import {afterAll, afterEach, beforeAll, beforeEach, describe, expect, it, vi} from "vitest"; import { @@ -20,6 +21,7 @@ import { ValidateColumnSidecarsProps, fetchAndValidateBlobs, fetchAndValidateBlock, + fetchAndValidateColumns, // downloadByRoot, // fetchAndValidateBlobs, // fetchAndValidateBlock, @@ -35,13 +37,13 @@ import { validateColumnSidecars, } from "../../../../src/sync/utils/downloadByRoot.js"; import {kzgCommitmentToVersionedHash} from "../../../../src/util/blobs.js"; +import {CustodyConfig} from "../../../../src/util/dataColumns.js"; // import {Clock} from "../../../../src/util/clock.js"; import {kzg} from "../../../../src/util/kzg.js"; import {ROOT_SIZE} from "../../../../src/util/sszBytes.js"; // import {getMockedLogger} from "../../../../test/mocks/loggerMock.js"; import { config, - gen, generateBlock, // custodyConfig, generateBlockWithBlobSidecars, @@ -457,7 +459,9 @@ describe("downloadByRoot.ts", () => { beforeEach(() => { denebBlockWithBlobs = generateBlockWithBlobSidecars({forkName, count: 6}); blobsAndProofs = denebBlockWithBlobs.blobSidecars.map(({blob, kzgProof}) => ({blob, proof: kzgProof})); - blobMeta = denebBlockWithBlobs.versionedHashes.map((versionedHash, index) => ({index, versionedHash})); + blobMeta = denebBlockWithBlobs.versionedHashes.map( + (versionedHash, index) => ({index, versionedHash}) as BlobMeta + ); }); afterEach(() => { @@ -558,7 +562,7 @@ describe("downloadByRoot.ts", () => { }); it("should handle partial blob response from execution engine", async () => { - const engineResponse = [...blobsAndProofs]; + const engineResponse: (BlobAndProof | null)[] = [...blobsAndProofs]; engineResponse[2] = null; engineResponse[4] = null; const getBlobsMock = vi.fn(() => Promise.resolve(engineResponse)); @@ -739,40 +743,369 @@ describe("downloadByRoot.ts", () => { }); }); - // describe("fetchAndValidateColumns", () => { - // it("should fetch columns from execution engine and validate", () => { - // // Test successful fetch from execution engine - // }); + describe("fetchAndValidateColumns", () => { + const forkName = ForkName.fulu; + let fuluBlockWithColumns: ReturnType; + let blobAndProofs: fulu.BlobAndProofV2[]; + let columnMeta: MissingColumnMeta; + let versionedHashes: Uint8Array[]; + let custodyConfig: CustodyConfig; - // it("should gracefully handle executionEngine errors", () => { - // // Test needToPublish logic with custody configuration - // }); + beforeEach(() => { + fuluBlockWithColumns = generateBlockWithColumnSidecars({forkName, returnBlobs: true}); + // biome-ignore lint/style/noNonNullAssertion: returnBlobs = true + const blobs = fuluBlockWithColumns.blobs!; + blobAndProofs = blobs + .map((b) => kzg.computeCellsAndKzgProofs(b)) + .map(({proofs}, i) => ({proofs, blob: blobs[i]})); + versionedHashes = fuluBlockWithColumns.block.message.body.blobKzgCommitments.map((c) => + kzgCommitmentToVersionedHash(c) + ); + columnMeta = { + missing: [0, 1, 2, 3, 4, 5, 6, 7], // Sample a subset of columns + versionedHashes, + }; + custodyConfig = { + custodyColumns: [0, 1, 2, 3], + sampledColumns: [0, 1, 2, 3, 4, 5, 6, 7], + } as CustodyConfig; + }); - // it("should fetch columns from network when execution engine returns empty", () => { - // // Test fallback to network when execution engine fails - // }); + afterEach(() => { + vi.resetAllMocks(); + }); - // it("should publish reconstructed columns to network", () => { - // // Test column publishing after reconstruction - // }); + it("should successfully fetch columns from execution engine only", async () => { + const sendDataColumnSidecarsByRootMock = vi.fn(() => Promise.resolve([])); + const publishDataColumnSidecarMock = vi.fn(() => Promise.resolve()); + network = { + sendDataColumnSidecarsByRoot: sendDataColumnSidecarsByRootMock, + publishDataColumnSidecar: publishDataColumnSidecarMock, + custodyConfig, + logger: { + error: vi.fn(), + }, + } as unknown as INetwork; - // it("should filter needed columns from reconstructed set", () => { - // // Test that only needed columns are returned - // }); + const getBlobsMock = vi.fn(() => Promise.resolve(blobAndProofs)); + executionEngine = { + getBlobs: getBlobsMock, + } as unknown as IExecutionEngine; - // it("should handle publishing errors gracefully", () => { - // // Test that publishing errors don't fail the main operation - // }); + const response = await fetchAndValidateColumns({ + config, + network, + executionEngine, + forkName, + peerIdStr, + blockRoot: fuluBlockWithColumns.blockRoot, + block: fuluBlockWithColumns.block, + columnMeta, + }); - // it("should validate columns correctly in both scenarios", () => { - // // Test validation works for both execution engine and network paths - // }); + expect(getBlobsMock).toHaveBeenCalledExactlyOnceWith(forkName, versionedHashes); + expect(sendDataColumnSidecarsByRootMock).not.toHaveBeenCalled(); + // Should only return the columns we need (missing) + expect(response.map((c) => c.index)).toEqual(columnMeta.missing); + // Should publish columns we custody that weren't already published + expect(publishDataColumnSidecarMock).toHaveBeenCalled(); + }); - // it("should determine correct columns to publish based on custody config", () => { - // // Test needToPublish logic with custody configuration - // }); + it("should only publish columns that have not already been published", async () => { + const publishDataColumnSidecarMock = vi.fn(() => Promise.resolve()); + network = { + sendDataColumnSidecarsByRoot: vi.fn(() => Promise.resolve([])), + publishDataColumnSidecar: publishDataColumnSidecarMock, + custodyConfig, + logger: { + error: vi.fn(), + }, + } as unknown as INetwork; - // }); + const getBlobsMock = vi.fn(() => Promise.resolve(blobAndProofs)); + executionEngine = { + getBlobs: getBlobsMock, + } as unknown as IExecutionEngine; + + // Columns 0, 1 are already published (not in missing) + // Columns 2, 3, 4, 5, 6, 7 are missing sampledColumns and need to be fetched + // After reconstruction, we should publish columns 2, 3 (we custody them and they weren't published) + // Column 5, 6, 7 we sample but do not custody so we don't need to publish + const testColumnMeta = { + missing: [2, 3, 4, 5, 6, 7], + versionedHashes, + }; + + await fetchAndValidateColumns({ + config, + network, + executionEngine, + forkName, + peerIdStr, + blockRoot: fuluBlockWithColumns.blockRoot, + block: fuluBlockWithColumns.block, + columnMeta: testColumnMeta, + }); + + // Should publish columns 2, 3, 4 (custody and were missing) + const publishedIndices = publishDataColumnSidecarMock.mock.calls.map((call) => (call as any)[0]?.index); + expect(publishedIndices).toEqual([2, 3]); + }); + + it("should only return columns that are needed from reconstruction", async () => { + network = { + sendDataColumnSidecarsByRoot: vi.fn(() => Promise.resolve([])), + publishDataColumnSidecar: vi.fn(() => Promise.resolve()), + custodyConfig: { + custodyColumns: [0, 2, 4, 6], + sampledColumns: [0, 2, 4, 6, 8, 10, 12], + }, + logger: { + error: vi.fn(), + }, + } as unknown as INetwork; + + const getBlobsMock = vi.fn(() => Promise.resolve(blobAndProofs)); + executionEngine = { + getBlobs: getBlobsMock, + } as unknown as IExecutionEngine; + + const missing = [0, 4, 6, 10, 12]; + const testColumnMeta = { + missing, // Only need these columns + versionedHashes, + }; + + const response = await fetchAndValidateColumns({ + config, + network, + executionEngine, + forkName, + peerIdStr, + blockRoot: fuluBlockWithColumns.blockRoot, + block: fuluBlockWithColumns.block, + columnMeta: testColumnMeta, + }); + + // Even though reconstruction produces all columns, we should only return what we need + expect(response.length).toBe(5); + expect(response.map((c) => c.index)).toEqual(missing); + }); + + it("should successfully fetch columns from network only", async () => { + const neededColumns = fuluBlockWithColumns.columnSidecars.filter((c) => columnMeta.missing.includes(c.index)); + const sendDataColumnSidecarsByRootMock = vi.fn(() => Promise.resolve(neededColumns)); + network = { + sendDataColumnSidecarsByRoot: sendDataColumnSidecarsByRootMock, + publishDataColumnSidecar: vi.fn(() => Promise.resolve()), + custodyConfig: { + custodyColumns: [0, 1, 2, 3, 4, 5], + sampledColumns: columnMeta.missing, + }, + logger: { + error: vi.fn(), + }, + } as unknown as INetwork; + + const getBlobsMock = vi.fn(() => Promise.resolve(null)); // No blobs from execution engine + executionEngine = { + getBlobs: getBlobsMock, + } as unknown as IExecutionEngine; + + const response = await fetchAndValidateColumns({ + config, + network, + executionEngine, + forkName, + peerIdStr, + blockRoot: fuluBlockWithColumns.blockRoot, + block: fuluBlockWithColumns.block, + columnMeta, + }); + + expect(getBlobsMock).toHaveBeenCalledExactlyOnceWith(forkName, versionedHashes); + expect(sendDataColumnSidecarsByRootMock).toHaveBeenCalledExactlyOnceWith(peerIdStr, [ + {blockRoot: fuluBlockWithColumns.blockRoot, columns: columnMeta.missing}, + ]); + expect(response.map((c) => c.index)).toEqual(columnMeta.missing); + }); + + it("should gracefully handle getBlobsV2 failure", async () => { + const rejectedError = new Error("TESTING_ERROR"); + const getBlobsMock = vi.fn(() => Promise.reject(rejectedError)); + executionEngine = { + getBlobs: getBlobsMock, + } as unknown as IExecutionEngine; + + const neededColumns = fuluBlockWithColumns.columnSidecars.filter((c) => columnMeta.missing.includes(c.index)); + const sendDataColumnSidecarsByRootMock = vi.fn(() => Promise.resolve(neededColumns)); + const loggerMock = { + error: vi.fn(), + }; + network = { + logger: loggerMock, + sendDataColumnSidecarsByRoot: sendDataColumnSidecarsByRootMock, + publishDataColumnSidecar: vi.fn(() => Promise.resolve()), + custodyConfig: { + custodyColumns: [0, 1, 2, 3, 4, 5], + sampledColumns: columnMeta.missing, + }, + } as unknown as INetwork; + + const response = await fetchAndValidateColumns({ + config, + network, + executionEngine, + forkName, + peerIdStr, + blockRoot: fuluBlockWithColumns.blockRoot, + block: fuluBlockWithColumns.block, + columnMeta, + }); + + expect(getBlobsMock).toHaveBeenCalledExactlyOnceWith(forkName, versionedHashes); + expect(loggerMock.error).toHaveBeenCalledExactlyOnceWith( + `error building columnSidecars for blockRoot=${prettyBytes(fuluBlockWithColumns.blockRoot)} via getBlobsV2`, + {}, + rejectedError + ); + expect(sendDataColumnSidecarsByRootMock).toHaveBeenCalledExactlyOnceWith(peerIdStr, [ + {blockRoot: fuluBlockWithColumns.blockRoot, columns: columnMeta.missing}, + ]); + expect(response.map((c) => c.index)).toEqual(columnMeta.missing); + }); + + it("should throw error if column validation fails", async () => { + // biome-ignore lint/style/noNonNullAssertion: exists + const invalidColumn = ssz.fulu.DataColumnSidecar.clone(fuluBlockWithColumns.columnSidecars.at(1)!); + // Corrupt the inclusion proof to make validation fail + invalidColumn.kzgCommitmentsInclusionProof[0] = new Uint8Array(32).fill(255); + + const sendDataColumnSidecarsByRootMock = vi.fn(() => + Promise.resolve([ + fuluBlockWithColumns.columnSidecars[0], + invalidColumn, + fuluBlockWithColumns.columnSidecars.slice(2, 6), + ]) + ); + network = { + sendDataColumnSidecarsByRoot: sendDataColumnSidecarsByRootMock, + publishDataColumnSidecar: vi.fn(() => Promise.resolve()), + custodyConfig: { + custodyColumns: [0, 1, 2, 3, 4, 5], + sampledColumns: [0, 1, 2, 3, 4, 5], + }, + logger: { + error: vi.fn(), + }, + } as unknown as INetwork; + + const getBlobsMock = vi.fn(() => Promise.resolve([])); + executionEngine = { + getBlobs: getBlobsMock, + } as unknown as IExecutionEngine; + + try { + await fetchAndValidateColumns({ + config, + network, + executionEngine, + forkName, + peerIdStr, + blockRoot: fuluBlockWithColumns.blockRoot, + block: fuluBlockWithColumns.block, + columnMeta: { + missing: [0, 1, 2, 3, 4, 5], + versionedHashes, + }, + }); + expect.fail("should have thrown error"); + } catch (err) { + expect(err).toBeInstanceOf(DownloadByRootError); + expect((err as any).type.code).toBe(DownloadByRootErrorCode.INVALID_INCLUSION_PROOF); + expect((err as any).type.peer).toBe(prettyPrintPeerIdStr(peerIdStr)); + expect((err as any).type.blockRoot).toBe(prettyBytes(fuluBlockWithColumns.blockRoot)); + } + }); + + it("should handle error when publishing reconstructed columns", async () => { + const publishError = new Error("PUBLISH_ERROR"); + const publishDataColumnSidecarMock = vi.fn(() => Promise.reject(publishError)); + const loggerMock = { + error: vi.fn(), + }; + network = { + sendDataColumnSidecarsByRoot: vi.fn(() => Promise.resolve([])), + publishDataColumnSidecar: publishDataColumnSidecarMock, + custodyConfig: { + custodyColumns: [0, 1, 2, 3], + sampledColumns: [0, 1, 2, 3, 4, 5, 6, 7], + }, + logger: loggerMock, + } as unknown as INetwork; + + const getBlobsMock = vi.fn(() => Promise.resolve(blobAndProofs)); + executionEngine = { + getBlobs: getBlobsMock, + } as unknown as IExecutionEngine; + + const response = await fetchAndValidateColumns({ + config, + network, + executionEngine, + forkName, + peerIdStr, + blockRoot: fuluBlockWithColumns.blockRoot, + block: fuluBlockWithColumns.block, + columnMeta: { + missing: [0, 1, 2, 3, 4, 5, 6, 7], + versionedHashes, + }, + }); + + // Should still return the columns even if publishing fails + expect(response.map((c) => c.index)).toEqual([0, 1, 2, 3, 4, 5, 6, 7]); + + // Should log the publishing error + expect(loggerMock.error).toHaveBeenCalledTimes(4); + expect(loggerMock.error).toHaveBeenNthCalledWith( + 1, + "Error publishing column after getBlobsV2 reconstruct", + { + index: 0, + blockRoot: prettyBytes(fuluBlockWithColumns.blockRoot), + }, + publishError + ); + expect(loggerMock.error).toHaveBeenNthCalledWith( + 2, + "Error publishing column after getBlobsV2 reconstruct", + { + index: 1, + blockRoot: prettyBytes(fuluBlockWithColumns.blockRoot), + }, + publishError + ); + expect(loggerMock.error).toHaveBeenNthCalledWith( + 3, + "Error publishing column after getBlobsV2 reconstruct", + { + index: 2, + blockRoot: prettyBytes(fuluBlockWithColumns.blockRoot), + }, + publishError + ); + expect(loggerMock.error).toHaveBeenNthCalledWith( + 4, + "Error publishing column after getBlobsV2 reconstruct", + { + index: 3, + blockRoot: prettyBytes(fuluBlockWithColumns.blockRoot), + }, + publishError + ); + }); + }); describe("fetchGetBlobsV2AndBuildSidecars", () => { let fuluBlockWithColumns: ReturnType; From 46cfe6f9aaf6bb46e4f4240cd666eb78be9536d1 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Thu, 28 Aug 2025 03:46:32 +0700 Subject: [PATCH 074/173] fix: clean up unused comments --- .../unit/sync/utils/downloadByRoot.test.ts | 148 +----------------- 1 file changed, 1 insertion(+), 147 deletions(-) diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts index 202cffa64fc5..a51494fa1664 100644 --- a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts @@ -5,16 +5,9 @@ import {deneb, fulu, ssz} from "@lodestar/types"; import {BlobAndProof} from "@lodestar/types/lib/deneb/types.js"; import {prettyBytes} from "@lodestar/utils"; import {afterAll, afterEach, beforeAll, beforeEach, describe, expect, it, vi} from "vitest"; -import { - BlobMeta, - // IBlockInput, - MissingColumnMeta, -} from "../../../../src/chain/blocks/blockInput/types.js"; -// import {ChainEventEmitter} from "../../../../src/chain/index.js"; -// import {SeenBlockInput} from "../../../../src/chain/seenCache/seenGossipBlockInput.js"; +import {BlobMeta, MissingColumnMeta} from "../../../../src/chain/blocks/blockInput/types.js"; import {IExecutionEngine} from "../../../../src/execution/index.js"; import {INetwork, prettyPrintPeerIdStr} from "../../../../src/network/index.js"; -// import {BlockInputSyncCacheItem, PendingBlockInput, PendingBlockInputStatus} from "../../../../src/sync/types.js"; import { DownloadByRootError, DownloadByRootErrorCode, @@ -22,15 +15,9 @@ import { fetchAndValidateBlobs, fetchAndValidateBlock, fetchAndValidateColumns, - // downloadByRoot, - // fetchAndValidateBlobs, - // fetchAndValidateBlock, - // fetchAndValidateColumns, fetchBlobsByRoot, - // fetchByRoot, fetchColumnsByRoot, fetchGetBlobsV1AndBuildSidecars, - // fetchGetBlobsV1AndBuildSidecars, fetchGetBlobsV2AndBuildSidecars, validateBlobs, validateColumnSidecar, @@ -38,153 +25,20 @@ import { } from "../../../../src/sync/utils/downloadByRoot.js"; import {kzgCommitmentToVersionedHash} from "../../../../src/util/blobs.js"; import {CustodyConfig} from "../../../../src/util/dataColumns.js"; -// import {Clock} from "../../../../src/util/clock.js"; import {kzg} from "../../../../src/util/kzg.js"; import {ROOT_SIZE} from "../../../../src/util/sszBytes.js"; -// import {getMockedLogger} from "../../../../test/mocks/loggerMock.js"; import { config, generateBlock, - // custodyConfig, generateBlockWithBlobSidecars, generateBlockWithColumnSidecars, - // generateChainOfBlocks, - // slots, } from "../../../utils/blocksAndData.js"; describe("downloadByRoot.ts", () => { const peerIdStr = "1234567890abcdef1234567890abcdef"; const prettyPeerIdStr = prettyPrintPeerIdStr(peerIdStr); let network: INetwork; - // let cache: SeenBlockInput; let executionEngine: IExecutionEngine; - // const logger = getMockedLogger(); - - // Test data - // let capellaBlock: SignedBeaconBlock; - // let denebBlockWithBlobs: ReturnType; - // let fuluBlockWithColumns: ReturnType; - // let blockRoot: Uint8Array; - // let rootHex: string; - - beforeAll(() => { - // Generate test blocks - // const capellaBlocks = generateChainOfBlocks({forkName: ForkName.capella, count: 1}); - // capellaBlock = capellaBlocks[0].block; - // denebBlockWithBlobs = generateBlockWithBlobSidecars({forkName: ForkName.deneb}); - // fuluBlockWithColumns = generateBlockWithColumnSidecars({forkName: ForkName.fulu}); - // blockRoot = denebBlockWithBlobs.blockRoot; - // rootHex = denebBlockWithBlobs.rootHex; - }); - - beforeEach(() => { - // const abortController = new AbortController(); - // const signal = abortController.signal; - // cache = new SeenBlockInput({ - // config, - // custodyConfig, - // clock: new Clock({config, signal, genesisTime: Math.floor(Date.now() / 1000)}), - // chainEvents: new ChainEventEmitter(), - // signal, - // metrics: null, - // logger, - // }); - // network = { - // sendBeaconBlocksByRoot: vi.fn(), - // sendBlobSidecarsByRoot: vi.fn(), - // sendDataColumnSidecarsByRoot: vi.fn(), - // publishDataColumnSidecar: vi.fn(), - // custodyConfig, - // logger, - // } as unknown as INetwork; - // executionEngine = { - // getBlobs: vi.fn(), - // } as unknown as IExecutionEngine; - }); - - // describe("downloadByRoot", () => { - // it("should successfully download block with blobs for post-Deneb fork", () => { - // // Test downloading a block with blob sidecars in post-Deneb fork - // }); - - // it("should successfully download block with columns for post-Fulu fork", () => { - // // Test downloading a block with column sidecars in post-Fulu fork - // }); - - // it("should successfully download block without additional data for pre-Deneb fork", () => { - // // Test downloading a simple block in pre-Deneb fork - // }); - - // it("should handle pending block input that already has block", () => { - // // Test case where cacheItem is PendingBlockInput and already has the block - // }); - - // it("should handle pending block input that needs block and data", () => { - // // Test case where cacheItem is PendingBlockInput but missing block and data - // }); - - // it("should handle non-pending cache item", () => { - // // Test case where cacheItem is not PendingBlockInput - // }); - - // it("should throw error when blob sidecars are missing for blob input", () => { - // // Test MISSING_BLOB_RESPONSE error - // }); - - // it("should throw error when column sidecars are missing for column input", () => { - // // Test MISSING_COLUMN_RESPONSE error - // }); - - // it("should return downloaded status when block has all data", () => { - // // Test status is set to downloaded when blockInput.hasBlockAndAllData() returns true - // }); - - // it("should return pending status when block is missing data", () => { - // // Test status is set to pending when blockInput.hasBlockAndAllData() returns false - // }); - // }); - - // describe("fetchByRoot", () => { - // it("should fetch block and blobs for pending block input in post-Deneb fork", () => { - // // Test fetching when cacheItem is PendingBlockInput and fork is post-Deneb - // }); - - // it("should fetch block and columns for pending block input in post-Fulu fork", () => { - // // Test fetching when cacheItem is PendingBlockInput and fork is post-Fulu - // }); - - // it("should use existing block from pending block input", () => { - // // Test when cacheItem.blockInput.hasBlock() returns true - // }); - - // it("should fetch new block when pending block input doesn't have block", () => { - // // Test when cacheItem.blockInput.hasBlock() returns false - // }); - - // it("should skip data fetching when pending block input has all data", () => { - // // Test when cacheItem.blockInput.hasAllData() returns true - // }); - - // it("should fetch blobs when pending block input is missing blob data", () => { - // // Test blob fetching for incomplete blob input - // }); - - // it("should fetch columns when pending block input is missing column data", () => { - // // Test column fetching for incomplete column input - // }); - - // it("should fetch block and blobs for non-pending cache item in post-Deneb fork", () => { - // // Test fetching for non-PendingBlockInput in post-Deneb - // }); - - // it("should fetch block and columns for non-pending cache item in post-Fulu fork", () => { - // // Test fetching for non-PendingBlockInput in post-Fulu - // }); - - // it("should fetch only block for non-pending cache item in pre-Deneb fork", () => { - // // Test fetching for non-PendingBlockInput in pre-Deneb - // }); - // }); describe("fetchAndValidateBlock", () => { let capellaBlock: ReturnType; From 7247a428721c261736686a5e9a2e379ae74a18ea Mon Sep 17 00:00:00 2001 From: Cayman Date: Wed, 27 Aug 2025 17:02:59 -0400 Subject: [PATCH 075/173] chore: fix up download by range --- .../src/sync/utils/downloadByRange.ts | 164 +++++------ .../unit/sync/utils/downloadByRange.test.ts | 274 ++++-------------- 2 files changed, 133 insertions(+), 305 deletions(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index 8c931c2ebb75..3bb3d7efb280 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -27,6 +27,10 @@ export type DownloadByRangeResponses = { columnSidecars?: fulu.DataColumnSidecars; }; +export type ValidatedDownloadByRangeResponses = DownloadByRangeResponses & { + blockRoots?: Uint8Array[]; +}; + export type DownloadAndCacheByRangeProps = DownloadByRangeRequests & { config: ChainForkConfig; cache: SeenBlockInput; @@ -34,6 +38,7 @@ export type DownloadAndCacheByRangeProps = DownloadByRangeRequests & { logger: Logger; peerIdStr: string; daOutOfRange: boolean; + batchBlocks?: IBlockInput[]; }; export type DownloadAndCacheByRangeResults = { @@ -48,7 +53,7 @@ export type CacheByRangeResponsesProps = { cache: SeenBlockInput; syncType: RangeSyncType; peerIdStr: PeerIdStr; - responses: DownloadByRangeResponses; + responses: ValidatedDownloadByRangeResponses; batchBlocks: IBlockInput[]; }; @@ -64,9 +69,12 @@ export function cacheByRangeResponses({ const seenTimestampSec = Date.now() / 1000; const updatedBatchBlocks = [...batchBlocks]; - for (const block of responses.blocks ?? []) { + const blocks = responses.blocks ?? []; + const blockRoots = responses.blockRoots ?? []; + for (let i = 0; i < blocks.length; i++) { + const block = blocks[i]; const existing = updatedBatchBlocks.find((b) => b.slot === block.message.slot); - const blockRoot = config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message); + const blockRoot = blockRoots[i] ?? config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message); const blockRootHex = toRootHex(blockRoot); if (existing) { // will throw if root hex does not match (meaning we are following the wrong chain) @@ -181,18 +189,14 @@ export async function downloadByRange({ network, logger, peerIdStr, - daOutOfRange, + batchBlocks, blocksRequest, blobsRequest, columnsRequest, -}: Omit): Promise { - const slotRangeString = validateRequests({ - config, - daOutOfRange, - blocksRequest, - blobsRequest, - columnsRequest, - }); +}: Omit): Promise { + const startSlot = (blocksRequest?.startSlot ?? blobsRequest?.startSlot ?? columnsRequest?.startSlot) as number; + const count = (blocksRequest?.count ?? blobsRequest?.count ?? columnsRequest?.count) as number; + const slotRangeString = `${startSlot} - ${startSlot + count}`; let response: DownloadByRangeResponses; try { @@ -212,16 +216,18 @@ export async function downloadByRange({ }); } - validateResponses({ + const blockRoots = validateResponses({ + config, peerIdStr, slotRangeString, blocksRequest, blobsRequest, columnsRequest, + batchBlocks, ...response, }); - return response; + return {...response, blockRoots}; } /** @@ -386,6 +392,7 @@ export async function requestByRange({ * Should not be called directly. Only exported for unit testing purposes */ export function validateResponses({ + config, peerIdStr, slotRangeString, blocksRequest, @@ -394,17 +401,27 @@ export function validateResponses({ blocks, blobSidecars, columnSidecars, -}: DownloadByRangeRequests & DownloadByRangeResponses & {peerIdStr: string; slotRangeString: string}): void { - if (!blocks) { + batchBlocks, +}: DownloadByRangeRequests & + DownloadByRangeResponses & { + config: ChainForkConfig; + peerIdStr: string; + slotRangeString: string; + batchBlocks?: IBlockInput[]; + }): Uint8Array[] | undefined { + // Blocks are always required for blob/column validation + // If a blocksRequest is provided, blocks have just been downloaded + // If no blocksRequest is provided, batchBlocks must have been provided from cache + if (blocksRequest && !blocks) { throw new DownloadByRangeError( { code: DownloadByRangeErrorCode.MISSING_BLOCKS_RESPONSE, slotRange: slotRangeString, }, - "No blocks to validate requests against" + "No blocks request to validate requests against" ); } - if (!blocksRequest) { + if (!blocksRequest && !batchBlocks) { throw new DownloadByRangeError( { code: DownloadByRangeErrorCode.MISSING_BLOCKS_RESPONSE, @@ -414,39 +431,10 @@ export function validateResponses({ ); } - const {missingSlots, extraSlots} = compareBlockByRangeRequestAndResponse(blocksRequest, blocks); - if (missingSlots) { - throw new DownloadByRangeError( - { - code: DownloadByRangeErrorCode.MISSING_BLOCKS, - peerId: prettyPrintPeerIdStr(peerIdStr), - missingSlots: prettyPrintIndices(missingSlots), - }, - "Not all blocks included in BeaconBlocksByRange response" - ); - } - if (extraSlots) { - // extra slots array is allocated when checking requested length against returned array length. If there are no - // extras found that means there are duplicates - if (extraSlots.length === 0) { - throw new DownloadByRangeError( - { - code: DownloadByRangeErrorCode.DUPLICATE_BLOCKS, - peerId: prettyPrintPeerIdStr(peerIdStr), - }, - "Duplicate blocks in BeaconBlocksByRange response" - ); - } + // Set blocks for validation below + blocks = blocks ?? batchBlocks?.map((blockInput) => blockInput.getBlock()) ?? []; - throw new DownloadByRangeError( - { - code: DownloadByRangeErrorCode.EXTRA_BLOCKS, - peerId: prettyPrintPeerIdStr(peerIdStr), - extraSlots: prettyPrintIndices(extraSlots), - }, - "Extra blocks outside of requested range in BeaconBlocksByRange response" - ); - } + const blockRoots = blocksRequest ? validateBlockByRangeResponse(config, blocksRequest, blocks) : undefined; if (blobsRequest) { if (!blobSidecars) { @@ -458,6 +446,7 @@ export function validateResponses({ "No blobSidecars to validate against blobsRequest" ); } + const { expectedBlobCount, missingBlobCount, @@ -574,45 +563,51 @@ export function validateResponses({ }); } } + return blockRoots; } /** * Should not be called directly. Only exported for unit testing purposes */ -export function compareBlockByRangeRequestAndResponse( +export function validateBlockByRangeResponse( + config: ChainForkConfig, blocksRequest: phase0.BeaconBlocksByRangeRequest, blocks: SignedBeaconBlock[] -): {missingSlots?: number[]; extraSlots?: number[]} { +): Uint8Array[] { const {startSlot, count} = blocksRequest; - const slotsReceived = blocks.map((block) => block.message.slot); - - const extraSlots: number[] = []; - if (slotsReceived.length > count) { - for (const slot of slotsReceived) { - if (slot < startSlot || slot >= startSlot + count) { - extraSlots.push(slot); - } - } - return { - extraSlots, - }; + if (blocks.length > count) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.EXTRA_BLOCKS, + }, + "Extra blocks received in BeaconBlocksByRange response" + ); } - const missingSlots: number[] = []; - for (let slot = startSlot; slot < startSlot + count; slot++) { - if (!slotsReceived.includes(slot)) { - missingSlots.push(slot); - } - } + const lastValidSlot = startSlot + count; + for (let i = 0; i < blocks.length; i++) { + const slot = blocks[i].message.slot; - if (missingSlots.length) { - return { - missingSlots, - }; + if (slot > lastValidSlot) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.OUT_OF_RANGE_BLOCKS, + }, + "Blocks with slots outside of requested range in BeaconBlocksByRange response" + ); + } + if (i < blocks.length - 1 && slot >= blocks[i + 1].message.slot) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.OUT_OF_ORDER_BLOCKS, + }, + "Blocks out of order in BeaconBlocksByRange response" + ); + } } - return {}; + return blocks.map((block) => config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message)); } type BlobComparisonResponse = { @@ -768,7 +763,8 @@ export enum DownloadByRangeErrorCode { REQ_RESP_ERROR = "DOWNLOAD_BY_RANGE_ERROR_REQ_RESP_ERROR", MISSING_BLOCKS = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOCKS", EXTRA_BLOCKS = "DOWNLOAD_BY_RANGE_ERROR_EXTRA_BLOCKS", - DUPLICATE_BLOCKS = "DOWNLOAD_BY_RANGE_ERROR_DUPLICATE_BLOCKS", + OUT_OF_RANGE_BLOCKS = "DOWNLOAD_BY_RANGE_OUT_OF_RANGE_BLOCKS", + OUT_OF_ORDER_BLOCKS = "DOWNLOAD_BY_RANGE_OUT_OF_ORDER_BLOCKS", MISSING_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOBS", EXTRA_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_EXTRA_BLOBS", DUPLICATE_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_DUPLICATE_BLOBS", @@ -803,6 +799,12 @@ export type DownloadByRangeErrorType = blockCount: number; dataCount: number; } + | { + code: DownloadByRangeErrorCode.OUT_OF_RANGE_BLOCKS; + } + | { + code: DownloadByRangeErrorCode.OUT_OF_ORDER_BLOCKS; + } | { code: DownloadByRangeErrorCode.REQ_RESP_ERROR; peerId: string; @@ -815,22 +817,12 @@ export type DownloadByRangeErrorType = } | { code: DownloadByRangeErrorCode.MISSING_BLOCKS; - peerId: string; - missingSlots: string; } | { code: DownloadByRangeErrorCode.EXTRA_BLOCKS; - peerId: string; - extraSlots: string; - } - | { - code: DownloadByRangeErrorCode.DUPLICATE_BLOCKS; - peerId: string; } | { code: DownloadByRangeErrorCode.EXTRA_BLOCKS; - peerId: string; - extraSlots: string; } | { code: DownloadByRangeErrorCode.MISSING_BLOBS; diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts index 64064d281498..f376ca295f03 100644 --- a/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts @@ -1,21 +1,16 @@ import {ForkName} from "@lodestar/params"; -import {DataAvailabilityStatus} from "@lodestar/state-transition"; import {SignedBeaconBlock, WithBytes, deneb, ssz} from "@lodestar/types"; import {Mock, beforeAll, beforeEach, describe, expect, it, vi} from "vitest"; -import {ChainEventEmitter} from "../../../../src/chain/index.js"; -import {SeenBlockInputCache} from "../../../../src/chain/seenCache/seenBlockInput.js"; import {INetwork} from "../../../../src/network/index.js"; import { + DownloadByRangeError, DownloadByRangeRequests, DownloadByRangeResponses, compareBlobsByRangeRequestAndResponse, - compareBlockByRangeRequestAndResponse, requestByRange, - validateRequests, + validateBlockByRangeResponse, } from "../../../../src/sync/utils/downloadByRange.js"; -import {Clock} from "../../../../src/util/clock.js"; -import {getMockedLogger} from "../../../../test/mocks/loggerMock.js"; -import {config, custodyConfig, generateChainOfBlockMaybeSidecars, slots} from "../../../utils/blocksAndData.js"; +import {config, generateChainOfBlockMaybeSidecars, slots} from "../../../utils/blocksAndData.js"; describe("downloadByRange", () => { const peerIdStr = "0x1234567890abcdef"; @@ -25,8 +20,6 @@ describe("downloadByRange", () => { const startSlot = slots.deneb; const count = 32; - const minBlobs = 2; - const maxBlobs = 2; let requests!: DownloadByRangeRequests; let networkResponse!: { blocks: WithBytes[]; @@ -36,10 +29,10 @@ describe("downloadByRange", () => { beforeAll(() => { // expectedBlobCount = count * minBlobs; requests = { - blocksRequest: [{startSlot, count, step: 1}], - blobsRequest: [{count, startSlot}], + blocksRequest: {startSlot, count, step: 1}, + blobsRequest: {count, startSlot}, }; - const blockAndBlobs = generateChainOfBlockMaybeSidecars(ForkName.deneb, startSlot, count, minBlobs, maxBlobs); + const blockAndBlobs = generateChainOfBlockMaybeSidecars(ForkName.deneb, count); const blobSidecars = blockAndBlobs.flatMap(({blobSidecars}) => blobSidecars); networkResponse = { blocks: blockAndBlobs.map(({block}) => ({bytes: new Uint8Array(), data: block})), @@ -72,124 +65,6 @@ describe("downloadByRange", () => { // describe("downloadAndCacheByRange", () => {}); // describe("downloadByRange", () => {}); - describe("validateRequests", () => { - it("should return a slot-range string for unavailable data", () => { - expect( - typeof validateRequests({ - config, - daOutOfRange: DataAvailabilityStatus.PreData, - blocksRequest: {startSlot: slots.capella, count: 1}, - }) === "string" - ).toBeTruthy(); - expect( - typeof validateRequests({ - config, - daOutOfRange: DataAvailabilityStatus.OutOfRange, - blocksRequest: {startSlot: slots.deneb, count: 1}, - }) === "string" - ).toBeTruthy(); - }); - it("should throw for data requests outside of the data availability window", () => { - expect(() => - validateRequests({ - config, - daOutOfRange: DataAvailabilityStatus.OutOfRange, - blocksRequest: {startSlot: slots.deneb, count: 1}, - blobsRequest: {startSlot: slots.deneb, count: 1}, - }) - ).toThrow("Cannot request data if it is not available"); - }); - it("should throw for missing data request within data availability window", () => { - expect(() => - validateRequests({ - config, - daOutOfRange: DataAvailabilityStatus.Available, - blocksRequest: {startSlot: slots.deneb, count: 1}, - }) - ).toThrow("Must request data if it is available"); - }); - it("should throw if requesting blobs and columns", () => { - expect(() => - validateRequests({ - config, - daOutOfRange: DataAvailabilityStatus.Available, - blocksRequest: {startSlot: slots.deneb, count: 1}, - blobsRequest: {startSlot: slots.deneb, count: 1}, - columnsRequest: {startSlot: slots.fulu, count: 1}, - }) - ).toThrow(); - }); - it("should throw for data request pre-deneb", () => { - expect(() => - validateRequests({ - config, - daOutOfRange: DataAvailabilityStatus.Available, - blocksRequest: {startSlot: slots.capella, count: 1}, - columnsRequest: {startSlot: slots.capella, count: 1}, - }) - ).toThrow("Cannot request data pre-deneb"); - }); - it("should throw for missing blobsRequest on blob-fork when data is available", () => { - expect(() => - validateRequests({ - config, - daOutOfRange: DataAvailabilityStatus.Available, - blocksRequest: {startSlot: slots.deneb, count: 1}, - columnsRequest: {startSlot: slots.deneb, count: 1}, - }) - ).toThrow("Must request blobs for blob-only forks"); - }); - it("should throw for missing columnsRequest on column-fork when data is available", () => { - expect(() => - validateRequests({ - config, - daOutOfRange: DataAvailabilityStatus.Available, - blocksRequest: {startSlot: slots.fulu, count: 1}, - blobsRequest: {startSlot: slots.fulu, count: 1}, - }) - ).toThrow("Must request columns for forks with columns"); - }); - it("should throw for mismatch block/data startSlot", () => { - expect(() => - validateRequests({ - config, - daOutOfRange: DataAvailabilityStatus.Available, - blocksRequest: {startSlot: slots.deneb, count: 1}, - blobsRequest: {startSlot: slots.deneb + 1, count: 1}, - }) - ).toThrow(); - }); - it("should throw for mismatch block/data count", () => { - expect(() => - validateRequests({ - config, - daOutOfRange: DataAvailabilityStatus.Available, - blocksRequest: {startSlot: slots.deneb, count: 1}, - blobsRequest: {startSlot: slots.deneb, count: 2}, - }) - ).toThrow(); - }); - it("should return a slot-range string for properly formatted blob-fork requests", () => { - expect( - typeof validateRequests({ - config, - daOutOfRange: DataAvailabilityStatus.Available, - blocksRequest: {startSlot: slots.deneb, count: 1}, - blobsRequest: {startSlot: slots.deneb, count: 1}, - }) === "string" - ).toBeTruthy(); - }); - it("should return a slot-range string for properly formatted column-fork requests", () => { - expect( - typeof validateRequests({ - config, - daOutOfRange: DataAvailabilityStatus.Available, - blocksRequest: {startSlot: slots.fulu, count: 1}, - columnsRequest: {startSlot: slots.fulu, count: 1}, - }) === "string" - ).toBeTruthy(); - }); - }); describe("requestByRange", () => { it("should make block requests", async () => { (network.sendBeaconBlocksByRange as Mock).mockResolvedValueOnce(networkResponse.blocks); @@ -258,117 +133,78 @@ describe("downloadByRange", () => { } }); }); - describe("compareBlockByRangeRequestAndResponse", () => { + describe("validateBlockByRangeRequest", () => { const block1 = ssz.capella.SignedBeaconBlock.defaultValue(); block1.message.slot = slots.capella; const block2 = ssz.capella.SignedBeaconBlock.defaultValue(); block2.message.slot = slots.capella + 1; + block2.message.parentRoot = config.getForkTypes(block1.message.slot).BeaconBlock.hashTreeRoot(block1.message); const block3 = ssz.capella.SignedBeaconBlock.defaultValue(); block3.message.slot = slots.capella + 2; + block3.message.parentRoot = config.getForkTypes(block2.message.slot).BeaconBlock.hashTreeRoot(block2.message); const block4 = ssz.capella.SignedBeaconBlock.defaultValue(); block4.message.slot = slots.capella + 3; + block4.message.parentRoot = config.getForkTypes(block3.message.slot).BeaconBlock.hashTreeRoot(block3.message); const block5 = ssz.capella.SignedBeaconBlock.defaultValue(); block5.message.slot = slots.capella + 4; - it("should always return an object", () => { - const response = compareBlockByRangeRequestAndResponse( - { - startSlot: slots.capella, - count: 0, - }, - [] - ); - expect(response).toBeInstanceOf(Object); - expect(Object.keys(response).length).toEqual(0); - }); + block5.message.parentRoot = config.getForkTypes(block4.message.slot).BeaconBlock.hashTreeRoot(block4.message); + it("should correctly match request with response", () => { - const response = compareBlockByRangeRequestAndResponse( + const blockRoots = validateBlockByRangeResponse( + config, { startSlot: slots.capella, count: 5, + step: 1, }, [block1, block2, block3, block4, block5] ); - expect(response).toBeInstanceOf(Object); - expect(Object.keys(response).length).toEqual(0); - }); - it("should return an empty extraSlots array for duplicates within the given range", () => { - const response = compareBlockByRangeRequestAndResponse( - { - startSlot: slots.capella, - count: 4, - }, - [block1, block2, block3, block4, block4] - ); - expect(response).toBeInstanceOf(Object); - expect(Object.keys(response).length).toEqual(1); - expect("extraSlots" in response).toBeTruthy(); - expect(response.extraSlots).toBeInstanceOf(Array); - expect(response.extraSlots.length).toEqual(0); + expect(blockRoots).toBeInstanceOf(Array); + expect(blockRoots.length).toEqual(5); }); - it("should return the extra slots if more blocks than were requested", () => { - const response = compareBlockByRangeRequestAndResponse( - { - startSlot: slots.capella, - count: 4, - }, - [block1, block2, block3, block4, block5] - ); - expect(response).toBeInstanceOf(Object); - expect(Object.keys(response).length).toEqual(1); - expect("extraSlots" in response).toBeTruthy(); - expect(response.extraSlots).toBeInstanceOf(Array); - expect(response.extraSlots.length).toEqual(1); - expect(response.extraSlots[0]).toEqual(block5.message.slot); - }); - describe("should return the missing slots if less blocks than were requested", () => { - it("beginning of range", () => { - const response = compareBlockByRangeRequestAndResponse( + it("should throw if there are duplicates within the given range", () => { + expect(() => + validateBlockByRangeResponse( + config, { startSlot: slots.capella, - count: 5, + count: 4, + step: 1, }, - [block2, block3, block4, block5] - ); - expect(response).toBeInstanceOf(Object); - expect(Object.keys(response).length).toEqual(1); - expect("missingSlots" in response).toBeTruthy(); - expect(response.missingSlots).toBeInstanceOf(Array); - expect(response.missingSlots.length).toEqual(1); - expect(response.missingSlots[0]).toEqual(block1.message.slot); - }); - it("middle of range", () => { - const response = compareBlockByRangeRequestAndResponse( + [block1, block2, block3, block4, block4] + ) + ).toThrow(DownloadByRangeError); + }); + it("should throw if more blocks than were requested", () => { + expect(() => + validateBlockByRangeResponse( + config, { startSlot: slots.capella, - count: 5, + count: 4, + step: 1, }, - [block1, block2, block4, block5] - ); - expect(response).toBeInstanceOf(Object); - expect(Object.keys(response).length).toEqual(1); - expect("missingSlots" in response).toBeTruthy(); - expect(response.missingSlots).toBeInstanceOf(Array); - expect(response.missingSlots.length).toEqual(1); - expect(response.missingSlots[0]).toEqual(block3.message.slot); - }); - it("end of range", () => { - const response = compareBlockByRangeRequestAndResponse( + [block1, block2, block3, block4, block5] + ) + ).toThrow(DownloadByRangeError); + }); + it("should throw if blocks are returned out of order", () => { + expect(() => + validateBlockByRangeResponse( + config, { startSlot: slots.capella, count: 5, + step: 1, }, - [block1, block2, block3, block4] - ); - expect(response).toBeInstanceOf(Object); - expect(Object.keys(response).length).toEqual(1); - expect("missingSlots" in response).toBeTruthy(); - expect(response.missingSlots).toBeInstanceOf(Array); - expect(response.missingSlots.length).toEqual(1); - expect(response.missingSlots[0]).toEqual(block5.message.slot); - }); + [block1, block3, block2, block4, block5] + ) + ).toThrow(DownloadByRangeError); }); }); describe("compareBlobsByRangeRequestAndResponse", () => { + const expectedBlocks = expected.blocks as SignedBeaconBlock[]; + const expectedBlobSidecars = expected.blobSidecars as deneb.BlobSidecars; it("should return a properly formatted object", () => { const response = compareBlobsByRangeRequestAndResponse([], []); expect(response).instanceOf(Object); @@ -385,7 +221,7 @@ describe("downloadByRange", () => { expect(response.duplicateBlobsDescription.length).toEqual(0); }); it("should identify requested blobs missing from response", () => { - const response = compareBlobsByRangeRequestAndResponse(expected.blocks, expected.blobSidecars?.slice(0, -4)); + const response = compareBlobsByRangeRequestAndResponse(expectedBlocks, expectedBlobSidecars.slice(0, -4)); expect(response.missingBlobCount).toEqual(4); expect(response.missingBlobsDescription.length).toEqual(2); const lastSlot = startSlot + count - 1; @@ -398,7 +234,7 @@ describe("downloadByRange", () => { const badBlob = ssz.deneb.BlobSidecar.clone(blob3); badBlob.signedBlockHeader.message.slot = blob2.signedBlockHeader.message.slot; badBlob.index = 3; - const response = compareBlobsByRangeRequestAndResponse(expected.blocks?.slice(0, 1), [ + const response = compareBlobsByRangeRequestAndResponse(expectedBlocks.slice(0, 1), [ blob0, blob1, blob2, @@ -406,13 +242,13 @@ describe("downloadByRange", () => { ]); expect(response.extraBlobCount).toEqual(1); expect(response.extraBlobsDescription.length).toEqual(1); - expect(response.extraBlobsDescription[0]).toEqual(`${expected.blocks[0].message.slot}[3]`); + expect(response.extraBlobsDescription[0]).toEqual(`${expectedBlocks[0].message.slot}[3]`); }); it("should identify extra blobs from blocks that were requested", () => { // biome-ignore lint/style/noNonNullAssertion: const [blob0, blob1, blob2] = expected.blobSidecars!; const badBlob = ssz.deneb.BlobSidecar.clone(blob2); - const response = compareBlobsByRangeRequestAndResponse(expected.blocks?.slice(0, 1), [ + const response = compareBlobsByRangeRequestAndResponse(expectedBlocks.slice(0, 1), [ blob0, blob1, blob2, @@ -420,16 +256,16 @@ describe("downloadByRange", () => { ]); expect(response.duplicateBlobCount).toEqual(1); expect(response.duplicateBlobsDescription.length).toEqual(1); - expect(response.duplicateBlobsDescription[0]).toEqual(`${expected.blocks[0].message.slot}[2]`); + expect(response.duplicateBlobsDescription[0]).toEqual(`${expectedBlocks[0].message.slot}[2]`); }); it("should identify extra blobs from blocks that were not requested", () => { const response = compareBlobsByRangeRequestAndResponse( - expected.blocks?.slice(0, 1), - expected.blobSidecars?.slice(0, 6) + expectedBlocks.slice(0, 1), + expectedBlobSidecars.slice(0, 6) ); expect(response.extraBlobCount).toEqual(3); expect(response.extraBlobsDescription.length).toEqual(1); - expect(response.extraBlobsDescription[0]).toEqual(`${expected.blocks[1].message.slot}[0,1,2]`); + expect(response.extraBlobsDescription[0]).toEqual(`${expectedBlocks[1].message.slot}[0,1,2]`); }); }); describe("validateResponse", () => { From 56742b1a8029a981545d6e355de4c31e617936e5 Mon Sep 17 00:00:00 2001 From: Cayman Date: Wed, 27 Aug 2025 17:12:47 -0400 Subject: [PATCH 076/173] chore: touch up batch states --- packages/beacon-node/src/sync/range/batch.ts | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/packages/beacon-node/src/sync/range/batch.ts b/packages/beacon-node/src/sync/range/batch.ts index b990079edbee..82077d687996 100644 --- a/packages/beacon-node/src/sync/range/batch.ts +++ b/packages/beacon-node/src/sync/range/batch.ts @@ -40,21 +40,20 @@ export type Attempt = { hash: RootHex; }; -export type BatchStateAwaitingDownload = { +export type AwaitingDownloadState = { status: BatchStatus.AwaitingDownload; blocks: IBlockInput[]; }; -export type DownloadSuccessState = - | BatchStateAwaitingDownload - | { - status: BatchStatus.AwaitingProcessing; - blocks: IBlockInput[]; - }; +export type DownloadSuccessState = { + status: BatchStatus.AwaitingProcessing; + blocks: IBlockInput[]; +}; export type BatchState = - | DownloadSuccessState + | AwaitingDownloadState | {status: BatchStatus.Downloading; peer: PeerIdStr; blocks: IBlockInput[]} + | DownloadSuccessState | {status: BatchStatus.Processing; attempt: Attempt} | {status: BatchStatus.AwaitingValidation; attempt: Attempt}; @@ -221,7 +220,6 @@ export class Batch { getBlocks(): IBlockInput[] { switch (this.state.status) { - case BatchStatus.Downloading: case BatchStatus.AwaitingValidation: case BatchStatus.Processing: throw new BatchError(this.wrongStatusErrorType(BatchStatus.AwaitingDownload)); From 4ba34a72fb90ea791d122eb80d06647711fff7a6 Mon Sep 17 00:00:00 2001 From: Cayman Date: Wed, 27 Aug 2025 18:14:35 -0400 Subject: [PATCH 077/173] chore: more work on by range --- packages/beacon-node/src/sync/range/range.ts | 1 + .../src/sync/utils/downloadByRange.ts | 239 +++++++----------- .../unit/sync/utils/downloadByRange.test.ts | 73 +----- 3 files changed, 110 insertions(+), 203 deletions(-) diff --git a/packages/beacon-node/src/sync/range/range.ts b/packages/beacon-node/src/sync/range/range.ts index 0335720bed07..55a3ea4136f6 100644 --- a/packages/beacon-node/src/sync/range/range.ts +++ b/packages/beacon-node/src/sync/range/range.ts @@ -207,6 +207,7 @@ export class RangeSync extends (EventEmitter as {new (): RangeSyncEmitter}) { logger: this.logger, peerIdStr: peer.peerId, daOutOfRange: isDaOutOfRange(this.config, batch.forkName, batch.startSlot, this.chain.clock.currentEpoch), + batchBlocks: batch.getBlocks(), ...batch.requests, }); const cached = cacheByRangeResponses({ diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index 3bb3d7efb280..b072d68c32c1 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -11,7 +11,6 @@ import { } from "../../chain/blocks/blockInput/index.js"; import {SeenBlockInput} from "../../chain/seenCache/seenGossipBlockInput.js"; import {INetwork, prettyPrintPeerIdStr} from "../../network/index.js"; -import {linspace} from "../../util/numpy.js"; import {PeerIdStr} from "../../util/peerId.js"; import {RangeSyncType} from "./remoteSyncType.js"; @@ -447,45 +446,7 @@ export function validateResponses({ ); } - const { - expectedBlobCount, - missingBlobCount, - missingBlobsDescription, - extraBlobCount, - extraBlobsDescription, - duplicateBlobCount, - duplicateBlobsDescription, - } = compareBlobsByRangeRequestAndResponse(blocks, blobSidecars); - - if (duplicateBlobCount > 0) { - throw new DownloadByRangeError({ - code: DownloadByRangeErrorCode.DUPLICATE_BLOBS, - peerId: prettyPrintPeerIdStr(peerIdStr), - expectedBlobCount, - duplicateBlobCount, - slotsWithIndices: duplicateBlobsDescription.join(","), - }); - } - - if (extraBlobCount > 0) { - throw new DownloadByRangeError({ - code: DownloadByRangeErrorCode.EXTRA_BLOBS, - peerId: prettyPrintPeerIdStr(peerIdStr), - expectedBlobCount, - extraBlobCount, - slotsWithIndices: extraBlobsDescription.join(","), - }); - } - - if (missingBlobCount > 0) { - throw new DownloadByRangeError({ - code: DownloadByRangeErrorCode.MISSING_BLOBS, - peerId: prettyPrintPeerIdStr(peerIdStr), - expectedBlobCount, - missingBlobCount, - slotsWithIndices: missingBlobsDescription.join(","), - }); - } + validateBlobsByRangeResponse(blocks, blobSidecars); } if (columnsRequest) { @@ -580,6 +541,8 @@ export function validateBlockByRangeResponse( throw new DownloadByRangeError( { code: DownloadByRangeErrorCode.EXTRA_BLOCKS, + expected: count, + actual: blocks.length - count, }, "Extra blocks received in BeaconBlocksByRange response" ); @@ -607,102 +570,86 @@ export function validateBlockByRangeResponse( } } - return blocks.map((block) => config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message)); + const blockRoots = blocks.map((block) => + config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message) + ); + for (let i = 0; i < blocks.length - 1; i++) { + // compare the block root against the next block's parent root + const blockRoot = blockRoots[i]; + const parentRoot = blocks[i + 1].message.parentRoot; + if (Buffer.compare(blockRoot, parentRoot) !== 0) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.PARENT_ROOT_MISMATCH, + parentSlot: blocks[i].message.slot, + expected: toRootHex(blockRoot), + actual: toRootHex(parentRoot), + }, + `Block parent root does not match the previous block's root in BeaconBlocksByRange response` + ); + } + } + return blockRoots; } -type BlobComparisonResponse = { - expectedBlobCount: number; - missingBlobCount: number; - extraBlobCount: number; - duplicateBlobCount: number; - missingBlobsDescription: string[]; - extraBlobsDescription: string[]; - duplicateBlobsDescription: string[]; -}; /** * Should not be called directly. Only exported for unit testing purposes */ -export function compareBlobsByRangeRequestAndResponse( - blocks: SignedBeaconBlock[], - blobSidecars: deneb.BlobSidecars -): BlobComparisonResponse { - let expectedBlobCount = 0; - let missingBlobCount = 0; - let extraBlobCount = 0; - let duplicateBlobCount = 0; - const missingBlobsDescription: string[] = []; - const extraBlobsDescription: string[] = []; - const duplicateBlobsDescription: string[] = []; - for (const block of blocks) { - const slot = block.message.slot; - const expectedBlobs = (block as SignedBeaconBlock).message.body.blobKzgCommitments.length; - const expectedIndices = linspace(0, expectedBlobs - 1); - expectedBlobCount += expectedBlobs; - const receivedBlobs = blobSidecars - .filter((blobSidecar) => { - return blobSidecar.signedBlockHeader.message.slot === slot; - }) - .map((blobSidecar) => blobSidecar.index); - - const missingIndices: number[] = []; - const duplicateIndices: number[] = []; - for (const index of expectedIndices) { - if (!receivedBlobs.includes(index)) { - missingIndices.push(index); - } - if (receivedBlobs.filter((blobIndex) => blobIndex === index).length > 1) { - duplicateIndices.push(index); - } - } - if (missingIndices.length > 0) { - missingBlobCount += missingIndices.length; - missingBlobsDescription.push(`${slot}${prettyPrintIndices(missingIndices)}`); - } - if (duplicateIndices.length > 0) { - duplicateBlobCount += duplicateIndices.length; - duplicateBlobsDescription.push(`${slot}${prettyPrintIndices(duplicateIndices)}`); - } - - const extraIndices: number[] = []; - for (const index of receivedBlobs) { - if (!expectedIndices.includes(index)) { - extraIndices.push(index); - } - } - if (extraIndices.length > 0) { - extraBlobCount += extraIndices.length; - extraBlobsDescription.push(`${slot}${prettyPrintIndices(extraIndices)}`); - } +export function validateBlobsByRangeResponse(blocks: SignedBeaconBlock[], blobSidecars: deneb.BlobSidecars): void { + const expectedBlobCount = blocks.reduce( + (acc, block) => (block as SignedBeaconBlock).message.body.blobKzgCommitments.length + acc, + 0 + ); + if (blobSidecars.length > expectedBlobCount) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.EXTRA_BLOBS, + expected: expectedBlobCount, + actual: blobSidecars.length - expectedBlobCount, + }, + "Extra blobs received in BlobSidecarsByRange response" + ); } - - if (expectedBlobCount !== blobSidecars.length) { - const expectedSlots = blocks.map((block) => block.message.slot); - const extraBlocks = new Map(); - for (const blobSidecar of blobSidecars) { - const blobSlot = blobSidecar.signedBlockHeader.message.slot; - if (!expectedSlots.includes(blobSlot)) { - const extra = extraBlocks.get(blobSlot) ?? []; - extra.push(blobSidecar.index); - extraBlocks.set(blobSlot, extra); - extraBlobCount++; + if (blobSidecars.length < expectedBlobCount) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.MISSING_BLOBS, + expected: expectedBlobCount, + actual: expectedBlobCount - blobSidecars.length, + }, + "Missing blobs in BlobSidecarsByRange response" + ); + } + // cheap sanity checks (proper validation is done in the caching step) + for (let blockIndex = 0, blobSidecarIndex = 0; blockIndex < blocks.length; blockIndex++) { + const block = blocks[blockIndex]; + const expectedBlobs = (block as SignedBeaconBlock).message.body.blobKzgCommitments.length; + for (let i = 0; i < expectedBlobs; i++, blobSidecarIndex++) { + const blobSidecar = blobSidecars[blobSidecarIndex]; + const slot = block.message.slot; + if (blobSidecar.signedBlockHeader.message.slot !== slot) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.WRONG_SLOT_BLOBS, + expected: slot, + actual: blobSidecar.signedBlockHeader.message.slot, + }, + "BlobSidecar doesn't match corresponding block in BlobSidecarsByRange response" + ); } - } - if (extraBlocks.size) { - for (const [slot, extraIndices] of extraBlocks) { - extraBlobsDescription.push(`${slot}${prettyPrintIndices(extraIndices)}`); + if (blobSidecar.index !== i) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.WRONG_INDEX_BLOBS, + slot, + expected: i, + actual: blobSidecar.index, + }, + "BlobSidecar out of order in BlobSidecarsByRange response" + ); } } } - - return { - expectedBlobCount, - missingBlobCount, - extraBlobCount, - duplicateBlobCount, - missingBlobsDescription, - extraBlobsDescription, - duplicateBlobsDescription, - }; } type ColumnComparisonResponse = { @@ -761,12 +708,14 @@ export enum DownloadByRangeErrorCode { START_SLOT_MISMATCH = "DOWNLOAD_BY_RANGE_ERROR_START_SLOT_MISMATCH", COUNT_MISMATCH = "DOWNLOAD_BY_RANGE_ERROR_COUNT_MISMATCH", REQ_RESP_ERROR = "DOWNLOAD_BY_RANGE_ERROR_REQ_RESP_ERROR", - MISSING_BLOCKS = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOCKS", + PARENT_ROOT_MISMATCH = "DOWNLOAD_BY_RANGE_ERROR_PARENT_ROOT_MISMATCH", EXTRA_BLOCKS = "DOWNLOAD_BY_RANGE_ERROR_EXTRA_BLOCKS", OUT_OF_RANGE_BLOCKS = "DOWNLOAD_BY_RANGE_OUT_OF_RANGE_BLOCKS", OUT_OF_ORDER_BLOCKS = "DOWNLOAD_BY_RANGE_OUT_OF_ORDER_BLOCKS", MISSING_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOBS", EXTRA_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_EXTRA_BLOBS", + WRONG_SLOT_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_WRONG_SLOT_BLOBS", + WRONG_INDEX_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_WRONG_INDEX_BLOBS", DUPLICATE_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_DUPLICATE_BLOBS", MISSING_COLUMNS = "DOWNLOAD_BY_RANGE_ERROR_MISSING_COLUMNS", EXTRA_COLUMNS_ALL_SLOTS = "DOWNLOAD_BY_RANGE_ERROR_EXTRA_COLUMNS_ALL_SLOTS", @@ -816,34 +765,36 @@ export type DownloadByRangeErrorType = message: string; } | { - code: DownloadByRangeErrorCode.MISSING_BLOCKS; - } - | { - code: DownloadByRangeErrorCode.EXTRA_BLOCKS; + code: DownloadByRangeErrorCode.PARENT_ROOT_MISMATCH; + parentSlot: number; + expected: string; + actual: string; } | { code: DownloadByRangeErrorCode.EXTRA_BLOCKS; + expected: number; + actual: number; } | { code: DownloadByRangeErrorCode.MISSING_BLOBS; - peerId: string; - expectedBlobCount: number; - missingBlobCount: number; - slotsWithIndices: string; + expected: number; + actual: number; } | { code: DownloadByRangeErrorCode.EXTRA_BLOBS; - peerId: string; - expectedBlobCount: number; - extraBlobCount: number; - slotsWithIndices: string; + expected: number; + actual: number; } | { - code: DownloadByRangeErrorCode.DUPLICATE_BLOBS; - peerId: string; - expectedBlobCount: number; - duplicateBlobCount: number; - slotsWithIndices: string; + code: DownloadByRangeErrorCode.WRONG_SLOT_BLOBS; + expected: number; + actual: number; + } + | { + code: DownloadByRangeErrorCode.WRONG_INDEX_BLOBS; + slot: number; + expected: number; + actual: number; } | { code: DownloadByRangeErrorCode.MISSING_COLUMNS; diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts index f376ca295f03..f7843aa2ac7b 100644 --- a/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts @@ -6,8 +6,8 @@ import { DownloadByRangeError, DownloadByRangeRequests, DownloadByRangeResponses, - compareBlobsByRangeRequestAndResponse, requestByRange, + validateBlobsByRangeResponse, validateBlockByRangeResponse, } from "../../../../src/sync/utils/downloadByRange.js"; import {config, generateChainOfBlockMaybeSidecars, slots} from "../../../utils/blocksAndData.js"; @@ -205,67 +205,22 @@ describe("downloadByRange", () => { describe("compareBlobsByRangeRequestAndResponse", () => { const expectedBlocks = expected.blocks as SignedBeaconBlock[]; const expectedBlobSidecars = expected.blobSidecars as deneb.BlobSidecars; - it("should return a properly formatted object", () => { - const response = compareBlobsByRangeRequestAndResponse([], []); - expect(response).instanceOf(Object); - expect(Object.keys(response).length).toEqual(7); - expect(response.expectedBlobCount).toEqual(0); - expect(response.missingBlobCount).toEqual(0); - expect(response.extraBlobCount).toEqual(0); - expect(response.duplicateBlobCount).toEqual(0); - expect(response.missingBlobsDescription).toBeInstanceOf(Array); - expect(response.missingBlobsDescription.length).toEqual(0); - expect(response.extraBlobsDescription).toBeInstanceOf(Array); - expect(response.extraBlobsDescription.length).toEqual(0); - expect(response.duplicateBlobsDescription).toBeInstanceOf(Array); - expect(response.duplicateBlobsDescription.length).toEqual(0); + it("should not throw when all blobs are present in response", () => { + expect(() => validateBlobsByRangeResponse(expectedBlocks, expectedBlobSidecars)).not.toThrow(); }); - it("should identify requested blobs missing from response", () => { - const response = compareBlobsByRangeRequestAndResponse(expectedBlocks, expectedBlobSidecars.slice(0, -4)); - expect(response.missingBlobCount).toEqual(4); - expect(response.missingBlobsDescription.length).toEqual(2); - const lastSlot = startSlot + count - 1; - expect(response.missingBlobsDescription[0]).toEqual(`${lastSlot - 1}[2]`); - expect(response.missingBlobsDescription[1]).toEqual(`${lastSlot}[0,1,2]`); - }); - it("should identify extra blobs from blocks that were requested", () => { - // biome-ignore lint/style/noNonNullAssertion: - const [blob0, blob1, blob2, blob3] = expected.blobSidecars!; - const badBlob = ssz.deneb.BlobSidecar.clone(blob3); - badBlob.signedBlockHeader.message.slot = blob2.signedBlockHeader.message.slot; - badBlob.index = 3; - const response = compareBlobsByRangeRequestAndResponse(expectedBlocks.slice(0, 1), [ - blob0, - blob1, - blob2, - badBlob, - ]); - expect(response.extraBlobCount).toEqual(1); - expect(response.extraBlobsDescription.length).toEqual(1); - expect(response.extraBlobsDescription[0]).toEqual(`${expectedBlocks[0].message.slot}[3]`); + it("should throw when blobs are missing from response", () => { + expect(() => validateBlobsByRangeResponse(expectedBlocks, expectedBlobSidecars.slice(0, -4))).toThrow( + DownloadByRangeError + ); }); - it("should identify extra blobs from blocks that were requested", () => { - // biome-ignore lint/style/noNonNullAssertion: - const [blob0, blob1, blob2] = expected.blobSidecars!; - const badBlob = ssz.deneb.BlobSidecar.clone(blob2); - const response = compareBlobsByRangeRequestAndResponse(expectedBlocks.slice(0, 1), [ - blob0, - blob1, - blob2, - badBlob, - ]); - expect(response.duplicateBlobCount).toEqual(1); - expect(response.duplicateBlobsDescription.length).toEqual(1); - expect(response.duplicateBlobsDescription[0]).toEqual(`${expectedBlocks[0].message.slot}[2]`); + it("should throw when extra blobs are in response", () => { + expect(() => + validateBlobsByRangeResponse(expectedBlocks.slice(0, 1), expectedBlobSidecars.concat(expectedBlobSidecars)) + ).toThrow(DownloadByRangeError); }); - it("should identify extra blobs from blocks that were not requested", () => { - const response = compareBlobsByRangeRequestAndResponse( - expectedBlocks.slice(0, 1), - expectedBlobSidecars.slice(0, 6) - ); - expect(response.extraBlobCount).toEqual(3); - expect(response.extraBlobsDescription.length).toEqual(1); - expect(response.extraBlobsDescription[0]).toEqual(`${expectedBlocks[1].message.slot}[0,1,2]`); + it("should throw when blobs are not in order", () => { + const blobSidecars = expectedBlobSidecars.slice().reverse(); + expect(() => validateBlobsByRangeResponse(expectedBlocks, blobSidecars)).toThrow(DownloadByRangeError); }); }); describe("validateResponse", () => { From d440a6acc0b475876f46958ca1df8954591b215b Mon Sep 17 00:00:00 2001 From: Cayman Date: Wed, 27 Aug 2025 18:29:51 -0400 Subject: [PATCH 078/173] chore: temporarily disable extra blobs check --- .../src/sync/utils/downloadByRange.ts | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index b072d68c32c1..dc92496dc86d 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -600,22 +600,22 @@ export function validateBlobsByRangeResponse(blocks: SignedBeaconBlock[], blobSi (acc, block) => (block as SignedBeaconBlock).message.body.blobKzgCommitments.length + acc, 0 ); - if (blobSidecars.length > expectedBlobCount) { - throw new DownloadByRangeError( - { - code: DownloadByRangeErrorCode.EXTRA_BLOBS, - expected: expectedBlobCount, - actual: blobSidecars.length - expectedBlobCount, - }, - "Extra blobs received in BlobSidecarsByRange response" - ); - } + // if (blobSidecars.length > expectedBlobCount) { + // throw new DownloadByRangeError( + // { + // code: DownloadByRangeErrorCode.EXTRA_BLOBS, + // expected: expectedBlobCount, + // actual: blobSidecars.length, + // }, + // "Extra blobs received in BlobSidecarsByRange response" + // ); + // } if (blobSidecars.length < expectedBlobCount) { throw new DownloadByRangeError( { code: DownloadByRangeErrorCode.MISSING_BLOBS, expected: expectedBlobCount, - actual: expectedBlobCount - blobSidecars.length, + actual: blobSidecars.length, }, "Missing blobs in BlobSidecarsByRange response" ); From 8ac7c8053d8d9ffecbdc67882b1a31863cb10d52 Mon Sep 17 00:00:00 2001 From: Cayman Date: Wed, 27 Aug 2025 18:52:05 -0400 Subject: [PATCH 079/173] chore: update validate blobs input --- packages/beacon-node/src/sync/utils/downloadByRange.ts | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index dc92496dc86d..6ac83abf7953 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -431,9 +431,9 @@ export function validateResponses({ } // Set blocks for validation below - blocks = blocks ?? batchBlocks?.map((blockInput) => blockInput.getBlock()) ?? []; + // blocks = blocks ?? batchBlocks?.map((blockInput) => blockInput.getBlock()) ?? []; - const blockRoots = blocksRequest ? validateBlockByRangeResponse(config, blocksRequest, blocks) : undefined; + const blockRoots = blocksRequest ? validateBlockByRangeResponse(config, blocksRequest, blocks ?? []) : undefined; if (blobsRequest) { if (!blobSidecars) { @@ -446,7 +446,10 @@ export function validateResponses({ ); } - validateBlobsByRangeResponse(blocks, blobSidecars); + validateBlobsByRangeResponse( + [...(blocks ?? []), ...(batchBlocks?.map((blockInput) => blockInput.getBlock()) ?? [])], + blobSidecars + ); } if (columnsRequest) { From 92e97c85f6f8e7c4fe3350a6b074e2571d07fadd Mon Sep 17 00:00:00 2001 From: Cayman Date: Wed, 27 Aug 2025 18:59:13 -0400 Subject: [PATCH 080/173] chore: update vlaidate blobs input --- packages/beacon-node/src/sync/utils/downloadByRange.ts | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index 6ac83abf7953..cc1da645ecd0 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -447,6 +447,7 @@ export function validateResponses({ } validateBlobsByRangeResponse( + blobsRequest, [...(blocks ?? []), ...(batchBlocks?.map((blockInput) => blockInput.getBlock()) ?? [])], blobSidecars ); @@ -598,7 +599,14 @@ export function validateBlockByRangeResponse( /** * Should not be called directly. Only exported for unit testing purposes */ -export function validateBlobsByRangeResponse(blocks: SignedBeaconBlock[], blobSidecars: deneb.BlobSidecars): void { +export function validateBlobsByRangeResponse( + request: deneb.BlobSidecarsByRangeRequest, + blocks: SignedBeaconBlock[], + blobSidecars: deneb.BlobSidecars +): void { + const startSlot = request.startSlot; + const endSlot = startSlot + request.count; + blocks = blocks.filter((block) => block.message.slot >= startSlot && block.message.slot <= endSlot); const expectedBlobCount = blocks.reduce( (acc, block) => (block as SignedBeaconBlock).message.body.blobKzgCommitments.length + acc, 0 From 836186e089b6f46738e973798bea0f86228523bb Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Thu, 28 Aug 2025 12:59:57 +0700 Subject: [PATCH 081/173] fix: address spacing for @nflaig --- .../test/unit/sync/utils/downloadByRange.test.ts | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts index f7843aa2ac7b..d01ad5874b33 100644 --- a/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts @@ -76,6 +76,7 @@ describe("downloadByRange", () => { expect(network.sendBeaconBlocksByRange).toHaveBeenCalledWith(peerIdStr, requests.blocksRequest); expect(response.blocks).toEqual(expected.blocks); }); + it("should make blob requests", async () => { (network.sendBlobSidecarsByRange as Mock).mockResolvedValueOnce(networkResponse.blobSidecars); const response = await requestByRange({ @@ -86,6 +87,7 @@ describe("downloadByRange", () => { expect(network.sendBlobSidecarsByRange).toHaveBeenCalledWith(peerIdStr, requests.blobsRequest); expect(response.blobSidecars).toEqual(expected.blobSidecars); }); + // it("should make column requests", async () => { // const response = await requestByRange({ // network, @@ -95,6 +97,7 @@ describe("downloadByRange", () => { // expect(network.sendColumnSidecarsByRange).toHaveBeenCalledWith(peerIdStr, requests.columnsRequest); // expect(response.columnSidecars).toBe(expected.columnSidecars); // }); + it("should make concurrent block/blob/column requests from the same peer", async () => { (network.sendBeaconBlocksByRange as Mock).mockResolvedValueOnce(networkResponse.blocks); (network.sendBlobSidecarsByRange as Mock).mockResolvedValueOnce(networkResponse.blobSidecars); @@ -112,6 +115,7 @@ describe("downloadByRange", () => { expect(response.blobSidecars).toEqual(expected.blobSidecars); // expect(response.columnSidecars).toBe(expected.columnSidecars); }); + it("should throw if one of the calls fails", async () => { (network.sendBeaconBlocksByRange as Mock).mockResolvedValueOnce(networkResponse.blocks); const rejectionError = new Error("TEST_ERROR_MESSAGE"); @@ -162,6 +166,7 @@ describe("downloadByRange", () => { expect(blockRoots).toBeInstanceOf(Array); expect(blockRoots.length).toEqual(5); }); + it("should throw if there are duplicates within the given range", () => { expect(() => validateBlockByRangeResponse( @@ -175,6 +180,7 @@ describe("downloadByRange", () => { ) ).toThrow(DownloadByRangeError); }); + it("should throw if more blocks than were requested", () => { expect(() => validateBlockByRangeResponse( @@ -188,6 +194,7 @@ describe("downloadByRange", () => { ) ).toThrow(DownloadByRangeError); }); + it("should throw if blocks are returned out of order", () => { expect(() => validateBlockByRangeResponse( @@ -205,19 +212,23 @@ describe("downloadByRange", () => { describe("compareBlobsByRangeRequestAndResponse", () => { const expectedBlocks = expected.blocks as SignedBeaconBlock[]; const expectedBlobSidecars = expected.blobSidecars as deneb.BlobSidecars; + it("should not throw when all blobs are present in response", () => { expect(() => validateBlobsByRangeResponse(expectedBlocks, expectedBlobSidecars)).not.toThrow(); }); + it("should throw when blobs are missing from response", () => { expect(() => validateBlobsByRangeResponse(expectedBlocks, expectedBlobSidecars.slice(0, -4))).toThrow( DownloadByRangeError ); }); + it("should throw when extra blobs are in response", () => { expect(() => validateBlobsByRangeResponse(expectedBlocks.slice(0, 1), expectedBlobSidecars.concat(expectedBlobSidecars)) ).toThrow(DownloadByRangeError); }); + it("should throw when blobs are not in order", () => { const blobSidecars = expectedBlobSidecars.slice().reverse(); expect(() => validateBlobsByRangeResponse(expectedBlocks, blobSidecars)).toThrow(DownloadByRangeError); From 11582b33ab02d90988e826d78d76985a092bf2ad Mon Sep 17 00:00:00 2001 From: Cayman Date: Thu, 28 Aug 2025 10:50:16 -0400 Subject: [PATCH 082/173] chore: fix some bugs --- .../src/chain/blocks/blockInput/blockInput.ts | 19 +++----- .../src/sync/utils/downloadByRange.ts | 47 ++++++++++--------- 2 files changed, 30 insertions(+), 36 deletions(-) diff --git a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts index cf454a8c4724..d380d5e62998 100644 --- a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts +++ b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts @@ -428,19 +428,8 @@ export class BlockInputBlobs extends AbstractBlockInput blockInput.getBlock()) ?? [])], - blobSidecars - ); + const startSlot = blobsRequest.startSlot; + const endSlot = startSlot + blobsRequest.count; + // Prepend batch blocks (pre-fetched blocks) to the blocks received in this response + // This is safe because blocks are always downloaded from first to last in the range + const blobsRequestBlocks = [ + ...(batchBlocks?.map((blockInput) => blockInput.getBlock()) ?? []), + ...(blocks ?? []), + ].filter((block) => block.message.slot >= startSlot && block.message.slot <= endSlot); + + validateBlobsByRangeResponse(blobsRequestBlocks, blobSidecars); } if (columnsRequest) { @@ -600,27 +605,23 @@ export function validateBlockByRangeResponse( * Should not be called directly. Only exported for unit testing purposes */ export function validateBlobsByRangeResponse( - request: deneb.BlobSidecarsByRangeRequest, - blocks: SignedBeaconBlock[], + requestBlocks: SignedBeaconBlock[], blobSidecars: deneb.BlobSidecars ): void { - const startSlot = request.startSlot; - const endSlot = startSlot + request.count; - blocks = blocks.filter((block) => block.message.slot >= startSlot && block.message.slot <= endSlot); - const expectedBlobCount = blocks.reduce( + const expectedBlobCount = requestBlocks.reduce( (acc, block) => (block as SignedBeaconBlock).message.body.blobKzgCommitments.length + acc, 0 ); - // if (blobSidecars.length > expectedBlobCount) { - // throw new DownloadByRangeError( - // { - // code: DownloadByRangeErrorCode.EXTRA_BLOBS, - // expected: expectedBlobCount, - // actual: blobSidecars.length, - // }, - // "Extra blobs received in BlobSidecarsByRange response" - // ); - // } + if (blobSidecars.length > expectedBlobCount) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.EXTRA_BLOBS, + expected: expectedBlobCount, + actual: blobSidecars.length, + }, + "Extra blobs received in BlobSidecarsByRange response" + ); + } if (blobSidecars.length < expectedBlobCount) { throw new DownloadByRangeError( { @@ -632,8 +633,8 @@ export function validateBlobsByRangeResponse( ); } // cheap sanity checks (proper validation is done in the caching step) - for (let blockIndex = 0, blobSidecarIndex = 0; blockIndex < blocks.length; blockIndex++) { - const block = blocks[blockIndex]; + for (let blockIndex = 0, blobSidecarIndex = 0; blockIndex < requestBlocks.length; blockIndex++) { + const block = requestBlocks[blockIndex]; const expectedBlobs = (block as SignedBeaconBlock).message.body.blobKzgCommitments.length; for (let i = 0; i < expectedBlobs; i++, blobSidecarIndex++) { const blobSidecar = blobSidecars[blobSidecarIndex]; From cd68138762ef1891ab73dde39bc343f3db9cb18a Mon Sep 17 00:00:00 2001 From: Cayman Date: Thu, 28 Aug 2025 11:51:45 -0400 Subject: [PATCH 083/173] chore: dedupe blobsRequestBlocks --- .../beacon-node/src/sync/utils/downloadByRange.ts | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index 81e69955d268..f6f5d64a3944 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -448,12 +448,15 @@ export function validateResponses({ const startSlot = blobsRequest.startSlot; const endSlot = startSlot + blobsRequest.count; - // Prepend batch blocks (pre-fetched blocks) to the blocks received in this response - // This is safe because blocks are always downloaded from first to last in the range - const blobsRequestBlocks = [ - ...(batchBlocks?.map((blockInput) => blockInput.getBlock()) ?? []), - ...(blocks ?? []), - ].filter((block) => block.message.slot >= startSlot && block.message.slot <= endSlot); + // Organize pre-fetched blocks plus the blocks received in this response + const blobsRequestBlocks: SignedBeaconBlock[] = []; + let lastSlot = startSlot - 1; + for (const block of [...(batchBlocks?.map((blockInput) => blockInput.getBlock()) ?? []), ...(blocks ?? [])]) { + if (block.message.slot >= startSlot && block.message.slot <= endSlot && block.message.slot > lastSlot) { + blobsRequestBlocks.push(block); + lastSlot = block.message.slot; + } + } validateBlobsByRangeResponse(blobsRequestBlocks, blobSidecars); } From 6073afc78aa7b95461a5702fdd51d398e1fd5652 Mon Sep 17 00:00:00 2001 From: Cayman Date: Thu, 28 Aug 2025 12:08:29 -0400 Subject: [PATCH 084/173] chore: improve log --- packages/beacon-node/src/sync/range/chain.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/packages/beacon-node/src/sync/range/chain.ts b/packages/beacon-node/src/sync/range/chain.ts index 78f7a65d5b47..ab2000e7b5c0 100644 --- a/packages/beacon-node/src/sync/range/chain.ts +++ b/packages/beacon-node/src/sync/range/chain.ts @@ -460,7 +460,10 @@ export class SyncChain { }; for (const block of downloadSuccessOutput.blocks) { if (isBlockInputBlobs(block)) { - logMeta.blobCount = (logMeta.blobCount ?? 0) + block.getLogMeta().receivedBlobs; + const blockLogMeta = block.getLogMeta(); + const expectedBlobs = typeof blockLogMeta.expectedBlobs === "number" ? blockLogMeta.expectedBlobs : 0; + logMeta.expectedBlobCount = (logMeta.expecteeBlobCount ?? 0) + expectedBlobs; + logMeta.receivedBlobCount = (logMeta.receivedBlobCount ?? 0) + blockLogMeta.receivedBlobs; } else if (isBlockInputColumns(block)) { logMeta.columnCount = (logMeta.columnCount ?? 0) + block.getLogMeta().receivedColumns; } From fbe6919d28e1bce37c289d58c418f6f55e07ffd4 Mon Sep 17 00:00:00 2001 From: Cayman Date: Thu, 28 Aug 2025 12:14:56 -0400 Subject: [PATCH 085/173] chore: fix typo --- packages/beacon-node/src/sync/range/chain.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/beacon-node/src/sync/range/chain.ts b/packages/beacon-node/src/sync/range/chain.ts index ab2000e7b5c0..13a88229c8a5 100644 --- a/packages/beacon-node/src/sync/range/chain.ts +++ b/packages/beacon-node/src/sync/range/chain.ts @@ -462,7 +462,7 @@ export class SyncChain { if (isBlockInputBlobs(block)) { const blockLogMeta = block.getLogMeta(); const expectedBlobs = typeof blockLogMeta.expectedBlobs === "number" ? blockLogMeta.expectedBlobs : 0; - logMeta.expectedBlobCount = (logMeta.expecteeBlobCount ?? 0) + expectedBlobs; + logMeta.expectedBlobCount = (logMeta.expectedBlobCount ?? 0) + expectedBlobs; logMeta.receivedBlobCount = (logMeta.receivedBlobCount ?? 0) + blockLogMeta.receivedBlobs; } else if (isBlockInputColumns(block)) { logMeta.columnCount = (logMeta.columnCount ?? 0) + block.getLogMeta().receivedColumns; From d50165508fcc321a4ffa65dd1a31d2427f26cc5b Mon Sep 17 00:00:00 2001 From: Cayman Date: Thu, 28 Aug 2025 12:49:20 -0400 Subject: [PATCH 086/173] chore: fix batch update when there are skip slots --- packages/beacon-node/src/sync/range/batch.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/beacon-node/src/sync/range/batch.ts b/packages/beacon-node/src/sync/range/batch.ts index 82077d687996..c0bafc15cde5 100644 --- a/packages/beacon-node/src/sync/range/batch.ts +++ b/packages/beacon-node/src/sync/range/batch.ts @@ -267,7 +267,7 @@ export class Batch { status: this.state.status, }); } - if (slots.size === this.count && allComplete) { + if (allComplete) { this.state = {status: BatchStatus.AwaitingProcessing, blocks}; } else { this.requests = this.getRequests(blocks); From 218d997f3c98d9ec7eae95512070a88c2ab75aa9 Mon Sep 17 00:00:00 2001 From: Cayman Date: Thu, 28 Aug 2025 17:40:16 -0400 Subject: [PATCH 087/173] chore: wip fix blob/column reqresp validation --- .../src/sync/utils/downloadByRange.ts | 316 +++++++++++------- 1 file changed, 190 insertions(+), 126 deletions(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index f6f5d64a3944..eb6a9898ec2f 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -1,7 +1,7 @@ import {ChainForkConfig} from "@lodestar/config"; import {ForkPostDeneb, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; import {SignedBeaconBlock, Slot, deneb, fulu, phase0} from "@lodestar/types"; -import {LodestarError, Logger, prettyBytes, prettyPrintIndices, toRootHex} from "@lodestar/utils"; +import {LodestarError, Logger, fromHex, prettyBytes, toRootHex} from "@lodestar/utils"; import { BlockInputSource, DAType, @@ -10,7 +10,12 @@ import { isBlockInputColumns, } from "../../chain/blocks/blockInput/index.js"; import {SeenBlockInput} from "../../chain/seenCache/seenGossipBlockInput.js"; -import {INetwork, prettyPrintPeerIdStr} from "../../network/index.js"; +import {validateBlobSidecarInclusionProof, validateBlobsAndBlobProofs} from "../../chain/validation/blobSidecar.js"; +import { + verifyDataColumnSidecar, + verifyDataColumnSidecarInclusionProof, +} from "../../chain/validation/dataColumnSidecar.js"; +import {INetwork} from "../../network/index.js"; import {PeerIdStr} from "../../util/peerId.js"; import {RangeSyncType} from "./remoteSyncType.js"; @@ -215,7 +220,7 @@ export async function downloadByRange({ }); } - const blockRoots = validateResponses({ + const blockRoots = await validateResponses({ config, peerIdStr, slotRangeString, @@ -390,9 +395,8 @@ export async function requestByRange({ /** * Should not be called directly. Only exported for unit testing purposes */ -export function validateResponses({ +export async function validateResponses({ config, - peerIdStr, slotRangeString, blocksRequest, blobsRequest, @@ -407,7 +411,7 @@ export function validateResponses({ peerIdStr: string; slotRangeString: string; batchBlocks?: IBlockInput[]; - }): Uint8Array[] | undefined { + }): Promise { // Blocks are always required for blob/column validation // If a blocksRequest is provided, blocks have just been downloaded // If no blocksRequest is provided, batchBlocks must have been provided from cache @@ -433,7 +437,7 @@ export function validateResponses({ // Set blocks for validation below // blocks = blocks ?? batchBlocks?.map((blockInput) => blockInput.getBlock()) ?? []; - const blockRoots = blocksRequest ? validateBlockByRangeResponse(config, blocksRequest, blocks ?? []) : undefined; + const blockRoots = blocksRequest ? validateBlockByRangeResponse(config, blocksRequest, blocks ?? []) : []; if (blobsRequest) { if (!blobSidecars) { @@ -448,17 +452,33 @@ export function validateResponses({ const startSlot = blobsRequest.startSlot; const endSlot = startSlot + blobsRequest.count; - // Organize pre-fetched blocks plus the blocks received in this response + + // Organize pre-fetched blocks and the blocks received in this response, only including those in the requested slot range const blobsRequestBlocks: SignedBeaconBlock[] = []; + const blobsRequestBlockRoots: Uint8Array[] = []; let lastSlot = startSlot - 1; - for (const block of [...(batchBlocks?.map((blockInput) => blockInput.getBlock()) ?? []), ...(blocks ?? [])]) { - if (block.message.slot >= startSlot && block.message.slot <= endSlot && block.message.slot > lastSlot) { - blobsRequestBlocks.push(block); - lastSlot = block.message.slot; + if (batchBlocks) { + for (let i = 0; i < batchBlocks.length; i++) { + const blockInput = batchBlocks[i]; + if (blockInput.slot >= startSlot && blockInput.slot < endSlot && blockInput.slot > lastSlot) { + blobsRequestBlocks.push(blockInput.getBlock()); + blobsRequestBlockRoots.push(fromHex(blockInput.blockRootHex)); + lastSlot = blockInput.slot; + } + } + } + if (blocks) { + for (let i = 0; i < blocks.length; i++) { + const block = blocks[i]; + if (block.message.slot >= startSlot && block.message.slot < endSlot && block.message.slot > lastSlot) { + blobsRequestBlocks.push(block); + blobsRequestBlockRoots.push(blockRoots[i]); + lastSlot = block.message.slot; + } } } - validateBlobsByRangeResponse(blobsRequestBlocks, blobSidecars); + await validateBlobsByRangeResponse(config, blobsRequestBlocks, blobsRequestBlockRoots, blobSidecars); } if (columnsRequest) { @@ -472,69 +492,42 @@ export function validateResponses({ ); } - const {missingByIndex, extraByIndex} = compareColumnsByRangeRequestAndResponse(columnsRequest, columnSidecars); + const startSlot = columnsRequest.startSlot; + const endSlot = startSlot + columnsRequest.count; - if (extraByIndex.size > 0) { - const fullExtraColumns: number[] = []; - let extraColumnCount = 0; - const partialExtraColumns: string[] = []; - for (const [index, extraSlots] of extraByIndex) { - if (extraSlots.length === columnsRequest.count) { - fullExtraColumns.push(index); - } else { - extraColumnCount += extraSlots.length; - partialExtraColumns.push(`${index}${prettyPrintIndices(extraSlots)}`); + // Organize pre-fetched blocks and the blocks received in this response, only including those in the requested slot range + // (logic copy pasted from blobsRequest validation above) + const columnsRequestBlocks: SignedBeaconBlock[] = []; + const columnsRequestBlockRoots: Uint8Array[] = []; + let lastSlot = startSlot - 1; + if (batchBlocks) { + for (let i = 0; i < batchBlocks.length; i++) { + const blockInput = batchBlocks[i]; + if (blockInput.slot >= startSlot && blockInput.slot < endSlot && blockInput.slot > lastSlot) { + columnsRequestBlocks.push(blockInput.getBlock()); + columnsRequestBlockRoots.push(fromHex(blockInput.blockRootHex)); + lastSlot = blockInput.slot; } } - - if (fullExtraColumns.length) { - // this should be severe peer infraction - throw new DownloadByRangeError({ - code: DownloadByRangeErrorCode.EXTRA_COLUMNS_ALL_SLOTS, - peerId: prettyPrintPeerIdStr(peerIdStr), - extraColumns: prettyPrintIndices(fullExtraColumns), - }); - } - - // this should be a minor peer infraction? What do you think @twoeths @g11tech? - throw new DownloadByRangeError({ - code: DownloadByRangeErrorCode.EXTRA_COLUMNS_SOME_SLOTS, - peerId: prettyPrintPeerIdStr(peerIdStr), - extraColumnCount, - indicesWithSlots: partialExtraColumns.join(", "), - }); - } - - if (missingByIndex.size > 0) { - const missingPeerCustody = []; - let missingColumnCount = 0; - const indicesWithSlots = []; - for (const [index, missingSlots] of missingByIndex) { - if (missingSlots.length === columnsRequest.count) { - missingPeerCustody.push(index); - } else { - missingColumnCount += missingSlots.length; - indicesWithSlots.push(`${index}${prettyPrintIndices(missingSlots)}`); + } + if (blocks) { + for (let i = 0; i < blocks.length; i++) { + const block = blocks[i]; + if (block.message.slot >= startSlot && block.message.slot < endSlot && block.message.slot > lastSlot) { + columnsRequestBlocks.push(block); + columnsRequestBlockRoots.push(blockRoots[i]); + lastSlot = block.message.slot; } } - - if (missingPeerCustody.length) { - // this should be a severe peer infraction - throw new DownloadByRangeError({ - code: DownloadByRangeErrorCode.PEER_CUSTODY_FAILURE, - peerId: prettyPrintPeerIdStr(peerIdStr), - missingColumns: prettyPrintIndices(missingPeerCustody), - }); - } - - // this should be a minor peer infraction? What do you think @twoeths @g11tech? - throw new DownloadByRangeError({ - code: DownloadByRangeErrorCode.MISSING_COLUMNS, - peerId: prettyPrintPeerIdStr(peerIdStr), - missingColumnCount, - indicesWithSlots: indicesWithSlots.join(", "), - }); } + + await validateColumnsByRangeResponse( + config, + columnsRequest, + columnsRequestBlocks, + columnsRequestBlockRoots, + columnSidecars + ); } return blockRoots; } @@ -607,10 +600,12 @@ export function validateBlockByRangeResponse( /** * Should not be called directly. Only exported for unit testing purposes */ -export function validateBlobsByRangeResponse( +export async function validateBlobsByRangeResponse( + config: ChainForkConfig, requestBlocks: SignedBeaconBlock[], + requestBlockRoots: Uint8Array[], blobSidecars: deneb.BlobSidecars -): void { +): Promise { const expectedBlobCount = requestBlocks.reduce( (acc, block) => (block as SignedBeaconBlock).message.body.blobKzgCommitments.length + acc, 0 @@ -635,19 +630,24 @@ export function validateBlobsByRangeResponse( "Missing blobs in BlobSidecarsByRange response" ); } - // cheap sanity checks (proper validation is done in the caching step) + + // First loop to do cheap validation before expensive proof and blob validation below + // Check block roots, indices match expected blocks for (let blockIndex = 0, blobSidecarIndex = 0; blockIndex < requestBlocks.length; blockIndex++) { const block = requestBlocks[blockIndex]; const expectedBlobs = (block as SignedBeaconBlock).message.body.blobKzgCommitments.length; for (let i = 0; i < expectedBlobs; i++, blobSidecarIndex++) { const blobSidecar = blobSidecars[blobSidecarIndex]; + const blockRoot = config + .getForkTypes(block.message.slot) + .BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message); const slot = block.message.slot; - if (blobSidecar.signedBlockHeader.message.slot !== slot) { + if (Buffer.compare(requestBlockRoots[blockIndex], blockRoot) !== 0) { throw new DownloadByRangeError( { - code: DownloadByRangeErrorCode.WRONG_SLOT_BLOBS, - expected: slot, - actual: blobSidecar.signedBlockHeader.message.slot, + code: DownloadByRangeErrorCode.WRONG_BLOCK_BLOBS, + expected: toRootHex(requestBlockRoots[blockIndex]), + actual: toRootHex(blockRoot), }, "BlobSidecar doesn't match corresponding block in BlobSidecarsByRange response" ); @@ -665,50 +665,109 @@ export function validateBlobsByRangeResponse( } } } + + // Second loop to do more expensive validation after cheap checks above + for (let blockIndex = 0, blobSidecarIndex = 0; blockIndex < requestBlocks.length; blockIndex++) { + const block = requestBlocks[blockIndex]; + const expectedKzgCommitments = (block as SignedBeaconBlock).message.body.blobKzgCommitments; + const blobs = []; + const proofs = []; + for (let i = 0; i < expectedKzgCommitments.length; i++, blobSidecarIndex++) { + const blobSidecar = blobSidecars[blobSidecarIndex]; + validateBlobSidecarInclusionProof(blobSidecar); + blobs.push(blobSidecar.blob); + proofs.push(blobSidecar.kzgProof); + } + await validateBlobsAndBlobProofs(expectedKzgCommitments, blobs, proofs); + } } -type ColumnComparisonResponse = { - missingByIndex: Map; - extraByIndex: Map; -}; /** * Should not be called directly. Only exported for unit testing purposes */ -export function compareColumnsByRangeRequestAndResponse( - columnRequest: fulu.DataColumnSidecarsByRangeRequest, +export async function validateColumnsByRangeResponse( + config: ChainForkConfig, + request: fulu.DataColumnSidecarsByRangeRequest, + requestBlocks: SignedBeaconBlock[], + requestBlockRoots: Uint8Array[], columnSidecars: fulu.DataColumnSidecars -): ColumnComparisonResponse { - const {startSlot, count, columns: expectedColumns} = columnRequest; - - const missingByIndex = new Map(); - const extraByIndex = new Map(); - - for (let slot = startSlot; slot < startSlot + count; slot++) { - const receivedIndices = columnSidecars - .filter((columnSidecar) => columnSidecar.signedBlockHeader.message.slot === slot) - .map((columnSidecar) => columnSidecar.index); - - for (const index of receivedIndices) { - if (!expectedColumns.includes(index)) { - const extraSlots = extraByIndex.get(index) ?? []; - extraSlots.push(slot); - extraByIndex.set(index, extraSlots); +): Promise { + const expectedColumnCount = requestBlocks.reduce((acc, block) => { + return (block as SignedBeaconBlock).message.body.blobKzgCommitments.length > 0 + ? request.columns.length + acc + : acc; + }, 0); + if (columnSidecars.length > expectedColumnCount) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.EXTRA_COLUMNS, + expected: expectedColumnCount, + actual: columnSidecars.length, + }, + "Extra data columns received in DataColumnSidecarsByRange response" + ); + } + if (columnSidecars.length < expectedColumnCount) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.MISSING_COLUMNS, + expected: expectedColumnCount, + actual: columnSidecars.length, + }, + "Missing data columns in DataColumnSidecarsByRange response" + ); + } + // First loop to do cheap validation before expensive proof validation below + // Check block roots, indices match expected blocks + for (let blockIndex = 0, columnSidecarIndex = 0; blockIndex < requestBlocks.length; blockIndex++) { + const block = requestBlocks[blockIndex]; + const expectedColumns = (block as SignedBeaconBlock).message.body.blobKzgCommitments.length + ? request.columns.length + : 0; + for (let i = 0; i < expectedColumns; i++, columnSidecarIndex++) { + const columnIndex = request.columns[i]; + const columnSidecar = columnSidecars[columnSidecarIndex]; + const blockRoot = config + .getForkTypes(block.message.slot) + .BeaconBlockHeader.hashTreeRoot(columnSidecar.signedBlockHeader.message); + const slot = block.message.slot; + if (Buffer.compare(requestBlockRoots[blockIndex], blockRoot) !== 0) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.WRONG_BLOCK_COLUMNS, + expected: toRootHex(requestBlockRoots[blockIndex]), + actual: toRootHex(blockRoot), + }, + "DataColumnSidecar doesn't match corresponding block in DataColumnSidecarsByRange response" + ); } - } - - for (const index of expectedColumns) { - if (!receivedIndices.includes(index)) { - const missingSlots = missingByIndex.get(index) ?? []; - missingSlots.push(slot); - missingByIndex.set(index, missingSlots); + if (columnSidecar.index !== columnIndex) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.WRONG_INDEX_COLUMNS, + slot, + expected: columnIndex, + actual: columnSidecar.index, + }, + "DataColumnSidecar out of order in DataColumnSidecarsByRange response" + ); } } } - return { - missingByIndex, - extraByIndex, - }; + // Second loop to do more expensive validation after cheap checks above + for (let blockIndex = 0, columnSidecarIndex = 0; blockIndex < requestBlocks.length; blockIndex++) { + const block = requestBlocks[blockIndex]; + const expectedColumns = (block as SignedBeaconBlock).message.body.blobKzgCommitments.length + ? request.columns.length + : 0; + for (let i = 0; i < expectedColumns; i++, columnSidecarIndex++) { + const columnSidecar = columnSidecars[columnSidecarIndex]; + verifyDataColumnSidecar(columnSidecar); + // await verifyDataColumnSidecarKzgProofs(...); + verifyDataColumnSidecarInclusionProof(columnSidecar); + } + } } export enum DownloadByRangeErrorCode { @@ -729,12 +788,13 @@ export enum DownloadByRangeErrorCode { OUT_OF_ORDER_BLOCKS = "DOWNLOAD_BY_RANGE_OUT_OF_ORDER_BLOCKS", MISSING_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOBS", EXTRA_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_EXTRA_BLOBS", - WRONG_SLOT_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_WRONG_SLOT_BLOBS", + WRONG_BLOCK_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_WRONG_BLOCK_BLOBS", WRONG_INDEX_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_WRONG_INDEX_BLOBS", DUPLICATE_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_DUPLICATE_BLOBS", MISSING_COLUMNS = "DOWNLOAD_BY_RANGE_ERROR_MISSING_COLUMNS", - EXTRA_COLUMNS_ALL_SLOTS = "DOWNLOAD_BY_RANGE_ERROR_EXTRA_COLUMNS_ALL_SLOTS", - EXTRA_COLUMNS_SOME_SLOTS = "DOWNLOAD_BY_RANGE_ERROR_EXTRA_COLUMNS_SOME_SLOTS", + EXTRA_COLUMNS = "DOWNLOAD_BY_RANGE_ERROR_EXTRA_COLUMNS", + WRONG_BLOCK_COLUMNS = "DOWNLOAD_BY_RANGE_ERROR_WRONG_BLOCK_COLUMNS", + WRONG_INDEX_COLUMNS = "DOWNLOAD_BY_RANGE_ERROR_WRONG_INDEX_COLUMNS", PEER_CUSTODY_FAILURE = "DOWNLOAD_BY_RANGE_ERROR_PEER_CUSTODY_FAILURE", CACHING_ERROR = "DOWNLOAD_BY_RANGE_CACHING_ERROR", MISMATCH_BLOCK_INPUT_TYPE = "DOWNLOAD_BY_RANGE_MISMATCH_BLOCK_INPUT_TYPE", @@ -801,9 +861,9 @@ export type DownloadByRangeErrorType = actual: number; } | { - code: DownloadByRangeErrorCode.WRONG_SLOT_BLOBS; - expected: number; - actual: number; + code: DownloadByRangeErrorCode.WRONG_BLOCK_BLOBS; + expected: string; + actual: string; } | { code: DownloadByRangeErrorCode.WRONG_INDEX_BLOBS; @@ -813,20 +873,24 @@ export type DownloadByRangeErrorType = } | { code: DownloadByRangeErrorCode.MISSING_COLUMNS; - peerId: string; - missingColumnCount: number; - indicesWithSlots: string; + expected: number; + actual: number; } | { - code: DownloadByRangeErrorCode.EXTRA_COLUMNS_ALL_SLOTS; - peerId: string; - extraColumns: string; + code: DownloadByRangeErrorCode.EXTRA_COLUMNS; + expected: number; + actual: number; } | { - code: DownloadByRangeErrorCode.EXTRA_COLUMNS_SOME_SLOTS; - peerId: string; - extraColumnCount: number; - indicesWithSlots: string; + code: DownloadByRangeErrorCode.WRONG_BLOCK_COLUMNS; + expected: string; + actual: string; + } + | { + code: DownloadByRangeErrorCode.WRONG_INDEX_COLUMNS; + slot: number; + expected: number; + actual: number; } | { code: DownloadByRangeErrorCode.PEER_CUSTODY_FAILURE; From 97b9426df79da557b8909ed64e1a78b7e2ff5817 Mon Sep 17 00:00:00 2001 From: twoeths <10568965+twoeths@users.noreply.github.com> Date: Fri, 29 Aug 2025 18:04:42 +0700 Subject: [PATCH 088/173] fix: wait for data before writing to db (#8291) **Motivation** - await for data to be available before writing to db **Description** - a fix for #8200 Closes #8290 --------- Co-authored-by: Tuyen Nguyen --- .../src/chain/blocks/verifyBlocksDataAvailability.ts | 2 +- packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts b/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts index 004fc64036ba..31f89e6f8362 100644 --- a/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts +++ b/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts @@ -3,7 +3,7 @@ import {DAType, IBlockInput} from "./blockInput/index.js"; // we can now wait for full 12 seconds because unavailable block sync will try pulling // the blobs from the network anyway after 500ms of seeing the block -const BLOB_AVAILABILITY_TIMEOUT = 12_000; +export const BLOB_AVAILABILITY_TIMEOUT = 12_000; /** * Verifies that all block inputs have data available. diff --git a/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts b/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts index 781d9c2619f8..fe411e46a16c 100644 --- a/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts +++ b/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts @@ -2,6 +2,7 @@ import {fulu} from "@lodestar/types"; import {prettyPrintIndices, toRootHex} from "@lodestar/utils"; import {BeaconChain} from "../chain.js"; import {IBlockInput, isBlockInputBlobs, isBlockInputColumns} from "./blockInput/index.js"; +import {BLOB_AVAILABILITY_TIMEOUT} from "./verifyBlocksDataAvailability.js"; /** * Persists block input data to DB. This operation must be eventually completed if a block is imported to the fork-choice. @@ -37,6 +38,10 @@ export async function writeBlockInputToDb(this: BeaconChain, blocksInputs: IBloc inputType: blockInput.type, }); + if (!blockInput.hasAllData()) { + await blockInput.waitForAllData(BLOB_AVAILABILITY_TIMEOUT); + } + // NOTE: Old data is pruned on archive if (isBlockInputColumns(blockInput)) { const {custodyColumns} = this.custodyConfig; From e66e36a292f9cc95569306232c92265ff408d351 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Fri, 29 Aug 2025 19:47:38 +0700 Subject: [PATCH 089/173] docs: add checklist docstring to downloadByRange --- .../src/sync/utils/downloadByRange.ts | 147 +++++------------- 1 file changed, 41 insertions(+), 106 deletions(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index eb6a9898ec2f..495606c75a74 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -19,6 +19,47 @@ import {INetwork} from "../../network/index.js"; import {PeerIdStr} from "../../util/peerId.js"; import {RangeSyncType} from "./remoteSyncType.js"; +/** + * + * blocks + * - check all slots are within range of startSlot (inclusive) through startSlot + count (exclusive) + * - don't have more than count number of blocks + * - slots are in ascending order + * - must allow for skip slots + * - check is a chain of blocks where via parentRoot matches hashTreeRoot of block before + * + * blobs + * - check that expected sidecar count matches the returned count + * - slots are in ascending order + * - allows for skip slots in validation + * - indices are in ascending order + * - check that the number of blobCount for a slot matches block.message.body.blobKzgCommitments.length + * - check that blobSidecar.kzgCommitment matches block.message.body.blobKzgCommitments[blobSidecar.index] + * - hashTreeRoot(block.message) equals the hashTreeRoot(blobSidecar.signedBlockHeader.message) + * - verify_blob_sidecar, verify_kzg_inclusion_proof, verify_kzg_proof (spec verification) + * + * + * Clients MUST respond with at least the blob sidecars of the first blob-carrying block that exists + * in the range, if they have it, and no more than MAX_REQUEST_BLOB_SIDECARS sidecars. + * + * Clients MUST include all blob sidecars of each block from which they include blob sidecars. + * + * The following blob sidecars, where they exist, MUST be sent in consecutive (slot, index) order. + * + * + * + * + * + * + * + * columns + * - check that expected sidecar count matches the returned count (discount slots with 0 blobKzgCommitment.length) + * - slots are in ascending order + * - indices are in ascending order + * - check that blobCount = 0 in a slot (come back to this) + * - verify_blob_sidecar, verify_kzg_inclusion_proof, verify_kzg_proof + */ + export type DownloadByRangeRequests = { blocksRequest?: phase0.BeaconBlocksByRangeRequest; blobsRequest?: deneb.BlobSidecarsByRangeRequest; @@ -234,112 +275,6 @@ export async function downloadByRange({ return {...response, blockRoots}; } -/** - * Should not be called directly. Only exported for unit testing purposes - */ -export function validateRequests({ - config, - daOutOfRange, - blocksRequest, - blobsRequest, - columnsRequest, -}: DownloadByRangeRequests & Pick): string { - const startSlot = (blocksRequest?.startSlot ?? blobsRequest?.startSlot ?? columnsRequest?.startSlot) as number; - const count = (blocksRequest?.count ?? blobsRequest?.count ?? columnsRequest?.count) as number; - const slotRange = `${startSlot} - ${startSlot + count}`; - const dataRequest = blobsRequest ?? columnsRequest; - - if (!blocksRequest) { - throw new DownloadByRangeError({ - code: DownloadByRangeErrorCode.MISSING_BLOCKS_REQUEST, - slotRange, - }); - } - - if (daOutOfRange) { - if (dataRequest) { - throw new DownloadByRangeError( - { - code: DownloadByRangeErrorCode.INVALID_DATA_REQUEST, - slotRange, - }, - "Cannot request data if it is outside of the availability range" - ); - } - - return slotRange; - } - - if (!dataRequest) { - throw new DownloadByRangeError( - { - code: DownloadByRangeErrorCode.MISSING_DATA_REQUEST, - slotRange, - }, - "Must request data if it is available" - ); - } - - if (blobsRequest && columnsRequest) { - throw new DownloadByRangeError( - { - code: DownloadByRangeErrorCode.INVALID_DATA_REQUEST, - slotRange, - }, - "Cannot request both blob and column data in the same slot range" - ); - } - - const forkName = config.getForkName(startSlot); - if (!isForkPostDeneb(forkName)) { - throw new DownloadByRangeError( - { - code: DownloadByRangeErrorCode.INVALID_DATA_REQUEST, - slotRange, - }, - "Cannot request data pre-deneb" - ); - } - - if (isForkPostDeneb(forkName) && !isForkPostFulu(forkName) && !blobsRequest) { - throw new DownloadByRangeError( - { - code: DownloadByRangeErrorCode.MISSING_BLOBS_REQUEST, - slotRange, - }, - "Must request blobs for blob-only forks" - ); - } - - if (isForkPostFulu(forkName) && !columnsRequest) { - throw new DownloadByRangeError( - { - code: DownloadByRangeErrorCode.MISSING_COLUMNS_REQUEST, - slotRange, - }, - "Must request columns for forks with columns" - ); - } - - if (blocksRequest.startSlot !== dataRequest.startSlot) { - throw new DownloadByRangeError({ - code: DownloadByRangeErrorCode.START_SLOT_MISMATCH, - blockStartSlot: blocksRequest.startSlot, - dataStartSlot: dataRequest.startSlot, - }); - } - - if (blocksRequest.count !== dataRequest.count) { - throw new DownloadByRangeError({ - code: DownloadByRangeErrorCode.COUNT_MISMATCH, - blockCount: blocksRequest.count, - dataCount: dataRequest.count, - }); - } - - return slotRange; -} - /** * Should not be called directly. Only exported for unit testing purposes */ From 0cfc9e4128d0d4688b135258cafe8bb651ed5569 Mon Sep 17 00:00:00 2001 From: Cayman Date: Fri, 29 Aug 2025 10:43:08 -0400 Subject: [PATCH 090/173] chore: refactor sidecar validation in by-range --- .../src/chain/errors/blobSidecarError.ts | 26 ++ .../chain/errors/dataColumnSidecarError.ts | 48 +++ .../src/chain/validation/blobSidecar.ts | 121 +++++++- .../src/chain/validation/dataColumnSidecar.ts | 137 ++++++++- .../src/sync/utils/downloadByRange.ts | 276 ++++++------------ 5 files changed, 416 insertions(+), 192 deletions(-) diff --git a/packages/beacon-node/src/chain/errors/blobSidecarError.ts b/packages/beacon-node/src/chain/errors/blobSidecarError.ts index bf7628b27881..85db43f4d352 100644 --- a/packages/beacon-node/src/chain/errors/blobSidecarError.ts +++ b/packages/beacon-node/src/chain/errors/blobSidecarError.ts @@ -1,4 +1,5 @@ import {RootHex, Slot, SubnetID, ValidatorIndex} from "@lodestar/types"; +import {LodestarError} from "@lodestar/utils"; import {GossipActionError} from "./gossipValidation.js"; export enum BlobSidecarErrorCode { @@ -15,6 +16,19 @@ export enum BlobSidecarErrorCode { /** !bls.KeyValidate(blobs_sidecar.kzg_aggregated_proof) */ INVALID_KZG_PROOF = "BLOBS_SIDECAR_ERROR_INVALID_KZG_PROOF", + // Validation errors when validating against an existing block + + /** Block and sidecars blob count mismatch */ + INCORRECT_SIDECAR_COUNT = "BLOBS_SIDECAR_ERROR_INCORRECT_SIDECAR_COUNT", + /** Sidecar doesn't match block */ + INCORRECT_BLOCK = "BLOBS_SIDECAR_ERROR_INCORRECT_BLOCK", + /** Sidecar index is not as expected */ + INCORRECT_INDEX = "BLOBS_SIDECAR_ERROR_INCORRECT_INDEX", + /** Sidecar kzg commitment is not as expected */ + INCORRECT_KZG_COMMITMENT = "BLOBS_SIDECAR_ERROR_INCORRECT_KZG_COMMITMENT", + /** Sidecars proofs not valid */ + INVALID_KZG_PROOF_BATCH = "BLOBS_SIDECAR_ERROR_INVALID_KZG_PROOF_BATCH", + // following errors are adapted from the block errors FUTURE_SLOT = "BLOB_SIDECAR_ERROR_FUTURE_SLOT", WOULD_REVERT_FINALIZED_SLOT = "BLOB_SIDECAR_ERROR_WOULD_REVERT_FINALIZED_SLOT", @@ -34,6 +48,17 @@ export type BlobSidecarErrorType = | {code: BlobSidecarErrorCode.INCORRECT_SLOT; blockSlot: Slot; blobSlot: Slot; blobIdx: number} | {code: BlobSidecarErrorCode.INVALID_BLOB; blobIdx: number} | {code: BlobSidecarErrorCode.INVALID_KZG_PROOF; blobIdx: number} + | {code: BlobSidecarErrorCode.INCORRECT_SIDECAR_COUNT; slot: number; expected: number; actual: number} + | {code: BlobSidecarErrorCode.INCORRECT_BLOCK; slot: number; blobIdx: number; expected: string; actual: string} + | {code: BlobSidecarErrorCode.INCORRECT_INDEX; slot: number; expected: number; actual: number} + | { + code: BlobSidecarErrorCode.INCORRECT_KZG_COMMITMENT; + slot: number; + blobIdx: number; + expected: string; + actual: string; + } + | {code: BlobSidecarErrorCode.INVALID_KZG_PROOF_BATCH; slot: number; reason: string} | {code: BlobSidecarErrorCode.FUTURE_SLOT; blockSlot: Slot; currentSlot: Slot} | {code: BlobSidecarErrorCode.WOULD_REVERT_FINALIZED_SLOT; blockSlot: Slot; finalizedSlot: Slot} | {code: BlobSidecarErrorCode.ALREADY_KNOWN; root: RootHex} @@ -44,3 +69,4 @@ export type BlobSidecarErrorType = | {code: BlobSidecarErrorCode.INCORRECT_PROPOSER; proposerIndex: ValidatorIndex}; export class BlobSidecarGossipError extends GossipActionError {} +export class BlobSidecarValidationError extends LodestarError {} diff --git a/packages/beacon-node/src/chain/errors/dataColumnSidecarError.ts b/packages/beacon-node/src/chain/errors/dataColumnSidecarError.ts index a8715156bde8..92a575afc470 100644 --- a/packages/beacon-node/src/chain/errors/dataColumnSidecarError.ts +++ b/packages/beacon-node/src/chain/errors/dataColumnSidecarError.ts @@ -1,4 +1,5 @@ import {RootHex, Slot, SubnetID} from "@lodestar/types"; +import {LodestarError} from "@lodestar/utils"; import {GossipActionError} from "./gossipValidation.js"; export enum DataColumnSidecarErrorCode { @@ -8,6 +9,23 @@ export enum DataColumnSidecarErrorCode { INVALID_SUBNET = "DATA_COLUMN_SIDECAR_ERROR_INVALID_SUBNET", INVALID_KZG_PROOF = "DATA_COLUMN_SIDECAR_ERROR_INVALID_KZG_PROOF", + // Validation errors when validating against an existing block + + /** Block and sidecars data column count mismatch */ + INCORRECT_SIDECAR_COUNT = "DATA_COLUMN_SIDECAR_ERROR_INCORRECT_SIDECAR_COUNT", + /** Sidecar doesn't match block */ + INCORRECT_BLOCK = "DATA_COLUMN_SIDECAR_ERROR_INCORRECT_BLOCK", + /** Sidecar index is not as expected */ + INCORRECT_INDEX = "DATA_COLUMN_SIDECAR_ERROR_INCORRECT_INDEX", + /** Sidecar kzg proof count not as expected */ + INCORRECT_KZG_COMMITMENTS_COUNT = "DATA_COLUMN_SIDECAR_ERROR_INCORRECT_KZG_COMMITMENTS_COUNT", + /** Sidecar kzg commitments are not as expected */ + INCORRECT_KZG_COMMITMENTS = "DATA_COLUMN_SIDECAR_ERROR_INCORRECT_KZG_COMMITMENTS", + /** Sidecar kzg proof count not as expected */ + INCORRECT_KZG_PROOF_COUNT = "DATA_COLUMN_SIDECAR_ERROR_INCORRECT_KZG_PROOF_COUNT", + /** Sidecars proofs not valid */ + INVALID_KZG_PROOF_BATCH = "DATA_COLUMN_SIDECAR_ERROR_INVALID_KZG_PROOF_BATCH", + // following errors are adapted from the block errors ALREADY_KNOWN = "DATA_COLUMN_SIDECAR_ERROR_ALREADY_KNOWN", FUTURE_SLOT = "DATA_COLUMN_SIDECAR_ERROR_FUTURE_SLOT", @@ -37,6 +55,36 @@ export type DataColumnSidecarErrorType = | {code: DataColumnSidecarErrorCode.NOT_LATER_THAN_PARENT; parentSlot: Slot; slot: Slot} | {code: DataColumnSidecarErrorCode.INCLUSION_PROOF_INVALID; slot: Slot; columnIdx: number} | {code: DataColumnSidecarErrorCode.INVALID_KZG_PROOF; slot: Slot; columnIdx: number} + | {code: DataColumnSidecarErrorCode.INCORRECT_SIDECAR_COUNT; slot: number; expected: number; actual: number} + | { + code: DataColumnSidecarErrorCode.INCORRECT_BLOCK; + slot: number; + columnIdx: number; + expected: string; + actual: string; + } + | {code: DataColumnSidecarErrorCode.INCORRECT_INDEX; slot: number; expected: number; actual: number} + | { + code: DataColumnSidecarErrorCode.INCORRECT_KZG_COMMITMENTS_COUNT; + slot: number; + columnIdx: number; + expected: number; + actual: number; + } + | { + code: DataColumnSidecarErrorCode.INCORRECT_KZG_COMMITMENTS; + slot: number; + columnIdx: number; + } + | { + code: DataColumnSidecarErrorCode.INCORRECT_KZG_PROOF_COUNT; + slot: number; + columnIdx: number; + expected: number; + actual: number; + } + | {code: DataColumnSidecarErrorCode.INVALID_KZG_PROOF_BATCH; slot: number; reason: string} | {code: DataColumnSidecarErrorCode.INCORRECT_PROPOSER; actualProposerIndex: number; expectedProposerIndex: number}; export class DataColumnSidecarGossipError extends GossipActionError {} +export class DataColumnSidecarValidationError extends LodestarError {} diff --git a/packages/beacon-node/src/chain/validation/blobSidecar.ts b/packages/beacon-node/src/chain/validation/blobSidecar.ts index 5d31d6a948c8..75c09cfc8354 100644 --- a/packages/beacon-node/src/chain/validation/blobSidecar.ts +++ b/packages/beacon-node/src/chain/validation/blobSidecar.ts @@ -11,11 +11,11 @@ import { getBlockHeaderProposerSignatureSet, } from "@lodestar/state-transition"; import {BlobIndex, Root, Slot, SubnetID, deneb, ssz} from "@lodestar/types"; -import {toRootHex, verifyMerkleBranch} from "@lodestar/utils"; +import {toHex, toRootHex, verifyMerkleBranch} from "@lodestar/utils"; import {byteArrayEquals} from "../../util/bytes.js"; import {kzg} from "../../util/kzg.js"; -import {BlobSidecarErrorCode, BlobSidecarGossipError} from "../errors/blobSidecarError.js"; +import {BlobSidecarErrorCode, BlobSidecarGossipError, BlobSidecarValidationError} from "../errors/blobSidecarError.js"; import {GossipAction} from "../errors/gossipValidation.js"; import {IBeaconChain} from "../interface.js"; import {RegenCaller} from "../regen/index.js"; @@ -219,6 +219,123 @@ export async function validateBlobSidecars( } } +/** + * Validate all blob sidecars in a block + * + * Requires the block to be known to the node + */ +export async function validateBlockBlobSidecars( + blockSlot: Slot, + blockRoot: Root, + blockKzgCommitments: deneb.BlobKzgCommitments, + blobSidecars: deneb.BlobSidecars +): Promise { + if (blockKzgCommitments.length !== blobSidecars.length) { + throw new BlobSidecarValidationError({ + code: BlobSidecarErrorCode.INCORRECT_SIDECAR_COUNT, + slot: blockSlot, + expected: blockKzgCommitments.length, + actual: blobSidecars.length, + }); + } + + if (blobSidecars.length === 0) { + return; + } + + // Hash the first sidecar block header and compare the rest via (cheaper) equality + const firstSidecarBlockHeader = blobSidecars[0].signedBlockHeader.message; + const firstBlockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(firstSidecarBlockHeader); + if (Buffer.compare(blockRoot, firstBlockRoot) !== 0) { + throw new BlobSidecarValidationError( + { + code: BlobSidecarErrorCode.INCORRECT_BLOCK, + slot: blockSlot, + blobIdx: 0, + expected: toRootHex(blockRoot), + actual: toRootHex(firstBlockRoot), + }, + "BlobSidecar doesn't match corresponding block" + ); + } + + const blobs = []; + const proofs = []; + for (let i = 0; i < blobSidecars.length; i++) { + const blobSidecar = blobSidecars[i]; + const blobKzgCommitment = blockKzgCommitments[i]; + if (blobSidecar.index !== i) { + throw new BlobSidecarValidationError( + { + code: BlobSidecarErrorCode.INCORRECT_INDEX, + slot: blockSlot, + expected: i, + actual: blobSidecar.index, + }, + "BlobSidecar index out of order" + ); + } + if (Buffer.compare(blobSidecar.kzgCommitment, blobKzgCommitment) !== 0) { + throw new BlobSidecarValidationError( + { + code: BlobSidecarErrorCode.INCORRECT_KZG_COMMITMENT, + slot: blockSlot, + blobIdx: i, + expected: toHex(blobKzgCommitment), + actual: toHex(blobSidecar.kzgCommitment), + }, + "BlobSidecar KZG commitment doesn't match corresponding block commitment" + ); + } + if (!ssz.phase0.BeaconBlockHeader.equals(blobSidecar.signedBlockHeader.message, firstSidecarBlockHeader)) { + throw new BlobSidecarValidationError( + { + code: BlobSidecarErrorCode.INCORRECT_BLOCK, + slot: blockSlot, + blobIdx: i, + expected: toRootHex(blockRoot), + actual: "unknown - compared via equality", + }, + "BlobSidecar doesn't match corresponding block" + ); + } + + if (!validateBlobSidecarInclusionProof(blobSidecar)) { + throw new BlobSidecarValidationError( + { + code: BlobSidecarErrorCode.INCLUSION_PROOF_INVALID, + slot: blockSlot, + blobIdx: i, + }, + "BlobSidecar inclusion proof invalid" + ); + } + + blobs.push(blobSidecar.blob); + proofs.push(blobSidecar.kzgProof); + } + + // Final batch KZG proof verification + let reason: string | undefined = undefined; + try { + if (!(await kzg.asyncVerifyBlobKzgProofBatch(blobs, blockKzgCommitments, proofs))) { + reason = "Invalid verifyBlobKzgProofBatch"; + } + } catch (e) { + reason = (e as Error).message; + } + if (reason !== undefined) { + throw new BlobSidecarValidationError( + { + code: BlobSidecarErrorCode.INVALID_KZG_PROOF_BATCH, + slot: blockSlot, + reason, + }, + "BlobSidecar has invalid KZG proof batch" + ); + } +} + export async function validateBlobsAndBlobProofs( expectedKzgCommitments: deneb.BlobKzgCommitments, blobs: deneb.Blobs, diff --git a/packages/beacon-node/src/chain/validation/dataColumnSidecar.ts b/packages/beacon-node/src/chain/validation/dataColumnSidecar.ts index 6218a2dbd320..eb0ea88c7b5d 100644 --- a/packages/beacon-node/src/chain/validation/dataColumnSidecar.ts +++ b/packages/beacon-node/src/chain/validation/dataColumnSidecar.ts @@ -4,14 +4,18 @@ import { KZG_COMMITMENTS_SUBTREE_INDEX, NUMBER_OF_COLUMNS, } from "@lodestar/params"; -import {Root, Slot, SubnetID, deneb, fulu, ssz} from "@lodestar/types"; +import {ColumnIndex, Root, Slot, SubnetID, deneb, fulu, ssz} from "@lodestar/types"; import {toRootHex, verifyMerkleBranch} from "@lodestar/utils"; import {computeStartSlotAtEpoch, getBlockHeaderProposerSignatureSet} from "@lodestar/state-transition"; import {Metrics} from "../../metrics/metrics.js"; import {byteArrayEquals} from "../../util/bytes.js"; import {kzg} from "../../util/kzg.js"; -import {DataColumnSidecarErrorCode, DataColumnSidecarGossipError} from "../errors/dataColumnSidecarError.js"; +import { + DataColumnSidecarErrorCode, + DataColumnSidecarGossipError, + DataColumnSidecarValidationError, +} from "../errors/dataColumnSidecarError.js"; import {GossipAction} from "../errors/gossipValidation.js"; import {IBeaconChain} from "../interface.js"; import {RegenCaller} from "../regen/interface.js"; @@ -313,6 +317,135 @@ export function verifyDataColumnSidecarInclusionProof(dataColumnSidecar: fulu.Da ); } +/** + * Validate a subset of data column sidecars in a block + * + * Requires the block to be known to the node + */ +export async function validateBlockDataColumnSidecars( + blockSlot: Slot, + blockRoot: Root, + blockKzgCommitments: deneb.BlobKzgCommitments, + expectedColumnIndices: ColumnIndex[], + dataColumnSidecars: fulu.DataColumnSidecars +): Promise { + if (dataColumnSidecars.length !== expectedColumnIndices.length) { + throw new DataColumnSidecarValidationError({ + code: DataColumnSidecarErrorCode.INCORRECT_SIDECAR_COUNT, + slot: blockSlot, + expected: expectedColumnIndices.length, + actual: dataColumnSidecars.length, + }); + } + + if (dataColumnSidecars.length === 0) { + return; + } + + // Hash the first sidecar block header and compare the rest via (cheaper) equality + const firstSidecarBlockHeader = dataColumnSidecars[0].signedBlockHeader.message; + const firstBlockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(firstSidecarBlockHeader); + if (Buffer.compare(blockRoot, firstBlockRoot) !== 0) { + throw new DataColumnSidecarValidationError( + { + code: DataColumnSidecarErrorCode.INCORRECT_BLOCK, + slot: blockSlot, + columnIdx: 0, + expected: toRootHex(blockRoot), + actual: toRootHex(firstBlockRoot), + }, + "DataColumnSidecar doesn't match corresponding block" + ); + } + + const cellIndices: number[] = []; + const cells: Uint8Array[] = []; + const proofs: Uint8Array[] = []; + for (let i = 0; i < dataColumnSidecars.length; i++) { + const columnSidecar = dataColumnSidecars[i]; + const expectedIndex = expectedColumnIndices[i]; + if (columnSidecar.index !== expectedIndex) { + throw new DataColumnSidecarValidationError( + { + code: DataColumnSidecarErrorCode.INCORRECT_INDEX, + slot: blockSlot, + expected: expectedIndex, + actual: columnSidecar.index, + }, + "DataColumnSidecar has unexpected index" + ); + } + + if (columnSidecar.kzgCommitments.length !== blockKzgCommitments.length) { + throw new DataColumnSidecarValidationError({ + code: DataColumnSidecarErrorCode.INCORRECT_KZG_COMMITMENTS_COUNT, + slot: blockSlot, + columnIdx: columnSidecar.index, + expected: blockKzgCommitments.length, + actual: columnSidecar.kzgCommitments.length, + }); + } + + if (columnSidecar.kzgProofs.length !== columnSidecar.kzgCommitments.length) { + throw new DataColumnSidecarValidationError({ + code: DataColumnSidecarErrorCode.INCORRECT_KZG_PROOF_COUNT, + slot: blockSlot, + columnIdx: columnSidecar.index, + expected: columnSidecar.kzgCommitments.length, + actual: columnSidecar.kzgProofs.length, + }); + } + + if ( + columnSidecar.kzgCommitments.some((commitment, cIx) => Buffer.compare(commitment, blockKzgCommitments[cIx]) !== 0) + ) { + throw new DataColumnSidecarValidationError( + { + code: DataColumnSidecarErrorCode.INCORRECT_KZG_COMMITMENTS, + slot: blockSlot, + columnIdx: columnSidecar.index, + }, + "DataColumnSidecar has unexpected KZG commitments" + ); + } + + if (!verifyDataColumnSidecarInclusionProof(columnSidecar)) { + throw new DataColumnSidecarValidationError( + { + code: DataColumnSidecarErrorCode.INCLUSION_PROOF_INVALID, + slot: blockSlot, + columnIdx: columnSidecar.index, + }, + "DataColumnSidecar has invalid inclusion proof" + ); + } + + cellIndices.push(...Array.from({length: columnSidecar.column.length}, () => columnSidecar.index)); + cells.push(...columnSidecar.column); + proofs.push(...columnSidecar.kzgProofs); + } + + let reason: string | undefined; + try { + const valid = await kzg.verifyCellKzgProofBatch(blockKzgCommitments, cellIndices, cells, proofs); + if (!valid) { + reason = "Invalid KZG proof batch"; + } + } catch (e) { + reason = (e as Error).message; + } + if (reason !== undefined) { + throw new DataColumnSidecarValidationError( + { + code: DataColumnSidecarErrorCode.INVALID_KZG_PROOF_BATCH, + slot: blockSlot, + reason, + }, + "DataColumnSidecar has invalid KZG proof batch" + ); + } +} + /** * SPEC FUNCTION * https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.4/specs/fulu/p2p-interface.md#compute_subnet_for_data_column_sidecar diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index 495606c75a74..ce04161eb02d 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -1,5 +1,5 @@ import {ChainForkConfig} from "@lodestar/config"; -import {ForkPostDeneb, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; +import {ForkPostDeneb, ForkPostFulu, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; import {SignedBeaconBlock, Slot, deneb, fulu, phase0} from "@lodestar/types"; import {LodestarError, Logger, fromHex, prettyBytes, toRootHex} from "@lodestar/utils"; import { @@ -10,11 +10,8 @@ import { isBlockInputColumns, } from "../../chain/blocks/blockInput/index.js"; import {SeenBlockInput} from "../../chain/seenCache/seenGossipBlockInput.js"; -import {validateBlobSidecarInclusionProof, validateBlobsAndBlobProofs} from "../../chain/validation/blobSidecar.js"; -import { - verifyDataColumnSidecar, - verifyDataColumnSidecarInclusionProof, -} from "../../chain/validation/dataColumnSidecar.js"; +import {validateBlockBlobSidecars} from "../../chain/validation/blobSidecar.js"; +import {validateBlockDataColumnSidecars} from "../../chain/validation/dataColumnSidecar.js"; import {INetwork} from "../../network/index.js"; import {PeerIdStr} from "../../util/peerId.js"; import {RangeSyncType} from "./remoteSyncType.js"; @@ -385,35 +382,8 @@ export async function validateResponses({ ); } - const startSlot = blobsRequest.startSlot; - const endSlot = startSlot + blobsRequest.count; - - // Organize pre-fetched blocks and the blocks received in this response, only including those in the requested slot range - const blobsRequestBlocks: SignedBeaconBlock[] = []; - const blobsRequestBlockRoots: Uint8Array[] = []; - let lastSlot = startSlot - 1; - if (batchBlocks) { - for (let i = 0; i < batchBlocks.length; i++) { - const blockInput = batchBlocks[i]; - if (blockInput.slot >= startSlot && blockInput.slot < endSlot && blockInput.slot > lastSlot) { - blobsRequestBlocks.push(blockInput.getBlock()); - blobsRequestBlockRoots.push(fromHex(blockInput.blockRootHex)); - lastSlot = blockInput.slot; - } - } - } - if (blocks) { - for (let i = 0; i < blocks.length; i++) { - const block = blocks[i]; - if (block.message.slot >= startSlot && block.message.slot < endSlot && block.message.slot > lastSlot) { - blobsRequestBlocks.push(block); - blobsRequestBlockRoots.push(blockRoots[i]); - lastSlot = block.message.slot; - } - } - } - - await validateBlobsByRangeResponse(config, blobsRequestBlocks, blobsRequestBlockRoots, blobSidecars); + const requested = getDataRequestBlocks(blobsRequest, batchBlocks, blocks ? {blocks, blockRoots} : undefined); + await validateBlobsByRangeResponse(requested.blocks, requested.blockRoots, blobSidecars); } if (columnsRequest) { @@ -427,42 +397,8 @@ export async function validateResponses({ ); } - const startSlot = columnsRequest.startSlot; - const endSlot = startSlot + columnsRequest.count; - - // Organize pre-fetched blocks and the blocks received in this response, only including those in the requested slot range - // (logic copy pasted from blobsRequest validation above) - const columnsRequestBlocks: SignedBeaconBlock[] = []; - const columnsRequestBlockRoots: Uint8Array[] = []; - let lastSlot = startSlot - 1; - if (batchBlocks) { - for (let i = 0; i < batchBlocks.length; i++) { - const blockInput = batchBlocks[i]; - if (blockInput.slot >= startSlot && blockInput.slot < endSlot && blockInput.slot > lastSlot) { - columnsRequestBlocks.push(blockInput.getBlock()); - columnsRequestBlockRoots.push(fromHex(blockInput.blockRootHex)); - lastSlot = blockInput.slot; - } - } - } - if (blocks) { - for (let i = 0; i < blocks.length; i++) { - const block = blocks[i]; - if (block.message.slot >= startSlot && block.message.slot < endSlot && block.message.slot > lastSlot) { - columnsRequestBlocks.push(block); - columnsRequestBlockRoots.push(blockRoots[i]); - lastSlot = block.message.slot; - } - } - } - - await validateColumnsByRangeResponse( - config, - columnsRequest, - columnsRequestBlocks, - columnsRequestBlockRoots, - columnSidecars - ); + const requested = getDataRequestBlocks(columnsRequest, batchBlocks, blocks ? {blocks, blockRoots} : undefined); + await validateColumnsByRangeResponse(columnsRequest, requested.blocks, requested.blockRoots, columnSidecars); } return blockRoots; } @@ -536,7 +472,6 @@ export function validateBlockByRangeResponse( * Should not be called directly. Only exported for unit testing purposes */ export async function validateBlobsByRangeResponse( - config: ChainForkConfig, requestBlocks: SignedBeaconBlock[], requestBlockRoots: Uint8Array[], blobSidecars: deneb.BlobSidecars @@ -566,62 +501,31 @@ export async function validateBlobsByRangeResponse( ); } - // First loop to do cheap validation before expensive proof and blob validation below - // Check block roots, indices match expected blocks + const validateSidecarsPromises: Promise[] = []; for (let blockIndex = 0, blobSidecarIndex = 0; blockIndex < requestBlocks.length; blockIndex++) { const block = requestBlocks[blockIndex]; - const expectedBlobs = (block as SignedBeaconBlock).message.body.blobKzgCommitments.length; - for (let i = 0; i < expectedBlobs; i++, blobSidecarIndex++) { - const blobSidecar = blobSidecars[blobSidecarIndex]; - const blockRoot = config - .getForkTypes(block.message.slot) - .BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message); - const slot = block.message.slot; - if (Buffer.compare(requestBlockRoots[blockIndex], blockRoot) !== 0) { - throw new DownloadByRangeError( - { - code: DownloadByRangeErrorCode.WRONG_BLOCK_BLOBS, - expected: toRootHex(requestBlockRoots[blockIndex]), - actual: toRootHex(blockRoot), - }, - "BlobSidecar doesn't match corresponding block in BlobSidecarsByRange response" - ); - } - if (blobSidecar.index !== i) { - throw new DownloadByRangeError( - { - code: DownloadByRangeErrorCode.WRONG_INDEX_BLOBS, - slot, - expected: i, - actual: blobSidecar.index, - }, - "BlobSidecar out of order in BlobSidecarsByRange response" - ); - } + const blockRoot = requestBlockRoots[blockIndex]; + const blockKzgCommitments = (block as SignedBeaconBlock).message.body.blobKzgCommitments; + if (blockKzgCommitments.length === 0) { + continue; } - } - // Second loop to do more expensive validation after cheap checks above - for (let blockIndex = 0, blobSidecarIndex = 0; blockIndex < requestBlocks.length; blockIndex++) { - const block = requestBlocks[blockIndex]; - const expectedKzgCommitments = (block as SignedBeaconBlock).message.body.blobKzgCommitments; - const blobs = []; - const proofs = []; - for (let i = 0; i < expectedKzgCommitments.length; i++, blobSidecarIndex++) { - const blobSidecar = blobSidecars[blobSidecarIndex]; - validateBlobSidecarInclusionProof(blobSidecar); - blobs.push(blobSidecar.blob); - proofs.push(blobSidecar.kzgProof); - } - await validateBlobsAndBlobProofs(expectedKzgCommitments, blobs, proofs); + const blockBlobSidecars = blobSidecars.slice(blobSidecarIndex, blobSidecarIndex + blockKzgCommitments.length); + blobSidecarIndex += blockKzgCommitments.length; + + validateSidecarsPromises.push( + validateBlockBlobSidecars(block.message.slot, blockRoot, blockKzgCommitments, blockBlobSidecars) + ); } + + // Await all sidecar validations in parallel + await Promise.all(validateSidecarsPromises); } /** * Should not be called directly. Only exported for unit testing purposes */ export async function validateColumnsByRangeResponse( - config: ChainForkConfig, request: fulu.DataColumnSidecarsByRangeRequest, requestBlocks: SignedBeaconBlock[], requestBlockRoots: Uint8Array[], @@ -652,57 +556,72 @@ export async function validateColumnsByRangeResponse( "Missing data columns in DataColumnSidecarsByRange response" ); } - // First loop to do cheap validation before expensive proof validation below - // Check block roots, indices match expected blocks + + const validateSidecarsPromises: Promise[] = []; for (let blockIndex = 0, columnSidecarIndex = 0; blockIndex < requestBlocks.length; blockIndex++) { const block = requestBlocks[blockIndex]; - const expectedColumns = (block as SignedBeaconBlock).message.body.blobKzgCommitments.length - ? request.columns.length - : 0; - for (let i = 0; i < expectedColumns; i++, columnSidecarIndex++) { - const columnIndex = request.columns[i]; - const columnSidecar = columnSidecars[columnSidecarIndex]; - const blockRoot = config - .getForkTypes(block.message.slot) - .BeaconBlockHeader.hashTreeRoot(columnSidecar.signedBlockHeader.message); - const slot = block.message.slot; - if (Buffer.compare(requestBlockRoots[blockIndex], blockRoot) !== 0) { - throw new DownloadByRangeError( - { - code: DownloadByRangeErrorCode.WRONG_BLOCK_COLUMNS, - expected: toRootHex(requestBlockRoots[blockIndex]), - actual: toRootHex(blockRoot), - }, - "DataColumnSidecar doesn't match corresponding block in DataColumnSidecarsByRange response" - ); - } - if (columnSidecar.index !== columnIndex) { - throw new DownloadByRangeError( - { - code: DownloadByRangeErrorCode.WRONG_INDEX_COLUMNS, - slot, - expected: columnIndex, - actual: columnSidecar.index, - }, - "DataColumnSidecar out of order in DataColumnSidecarsByRange response" - ); + const blockRoot = requestBlockRoots[blockIndex]; + const blockKzgCommitments = (block as SignedBeaconBlock).message.body.blobKzgCommitments; + const expectedColumns = blockKzgCommitments.length ? request.columns.length : 0; + + if (expectedColumns === 0) { + continue; + } + const blockColumnSidecars = columnSidecars.slice(columnSidecarIndex, columnSidecarIndex + expectedColumns); + columnSidecarIndex += expectedColumns; + + validateSidecarsPromises.push( + validateBlockDataColumnSidecars( + block.message.slot, + blockRoot, + blockKzgCommitments, + request.columns, + blockColumnSidecars + ) + ); + } + + // Await all sidecar validations in parallel + await Promise.all(validateSidecarsPromises); +} + +/** + * Given a data request, return only the blocks and roots that correspond to the data request (sorted) + */ +export function getDataRequestBlocks( + dataRequest: {startSlot: Slot; count: number}, + cached: IBlockInput[] | undefined, + current: {blocks: SignedBeaconBlock[]; blockRoots: Uint8Array[]} | undefined +): {blocks: SignedBeaconBlock[]; blockRoots: Uint8Array[]} { + const startSlot = dataRequest.startSlot; + const endSlot = startSlot + dataRequest.count; + + // Organize cached blocks and current blocks, only including those in the requested slot range + const dataRequestBlocks: SignedBeaconBlock[] = []; + const dataRequestBlockRoots: Uint8Array[] = []; + let lastSlot = startSlot - 1; + if (cached) { + for (let i = 0; i < cached.length; i++) { + const blockInput = cached[i]; + if (blockInput.slot >= startSlot && blockInput.slot < endSlot && blockInput.slot > lastSlot) { + dataRequestBlocks.push(blockInput.getBlock()); + dataRequestBlockRoots.push(fromHex(blockInput.blockRootHex)); + lastSlot = blockInput.slot; } } } - - // Second loop to do more expensive validation after cheap checks above - for (let blockIndex = 0, columnSidecarIndex = 0; blockIndex < requestBlocks.length; blockIndex++) { - const block = requestBlocks[blockIndex]; - const expectedColumns = (block as SignedBeaconBlock).message.body.blobKzgCommitments.length - ? request.columns.length - : 0; - for (let i = 0; i < expectedColumns; i++, columnSidecarIndex++) { - const columnSidecar = columnSidecars[columnSidecarIndex]; - verifyDataColumnSidecar(columnSidecar); - // await verifyDataColumnSidecarKzgProofs(...); - verifyDataColumnSidecarInclusionProof(columnSidecar); + if (current) { + const {blocks, blockRoots} = current; + for (let i = 0; i < blocks.length; i++) { + const block = blocks[i]; + if (block.message.slot >= startSlot && block.message.slot < endSlot && block.message.slot > lastSlot) { + dataRequestBlocks.push(block); + dataRequestBlockRoots.push(blockRoots[i]); + lastSlot = block.message.slot; + } } } + return {blocks: dataRequestBlocks, blockRoots: dataRequestBlockRoots}; } export enum DownloadByRangeErrorCode { @@ -716,20 +635,23 @@ export enum DownloadByRangeErrorCode { MISSING_DATA_REQUEST = "DOWNLOAD_BY_RANGE_ERROR_MISSING_DATA_REQUEST", START_SLOT_MISMATCH = "DOWNLOAD_BY_RANGE_ERROR_START_SLOT_MISMATCH", COUNT_MISMATCH = "DOWNLOAD_BY_RANGE_ERROR_COUNT_MISMATCH", + + /** Error at the reqresp layer */ REQ_RESP_ERROR = "DOWNLOAD_BY_RANGE_ERROR_REQ_RESP_ERROR", + + // Errors validating a chain of blocks (not considering associated data) + PARENT_ROOT_MISMATCH = "DOWNLOAD_BY_RANGE_ERROR_PARENT_ROOT_MISMATCH", EXTRA_BLOCKS = "DOWNLOAD_BY_RANGE_ERROR_EXTRA_BLOCKS", OUT_OF_RANGE_BLOCKS = "DOWNLOAD_BY_RANGE_OUT_OF_RANGE_BLOCKS", OUT_OF_ORDER_BLOCKS = "DOWNLOAD_BY_RANGE_OUT_OF_ORDER_BLOCKS", + MISSING_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOBS", EXTRA_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_EXTRA_BLOBS", - WRONG_BLOCK_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_WRONG_BLOCK_BLOBS", - WRONG_INDEX_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_WRONG_INDEX_BLOBS", - DUPLICATE_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_DUPLICATE_BLOBS", + MISSING_COLUMNS = "DOWNLOAD_BY_RANGE_ERROR_MISSING_COLUMNS", EXTRA_COLUMNS = "DOWNLOAD_BY_RANGE_ERROR_EXTRA_COLUMNS", - WRONG_BLOCK_COLUMNS = "DOWNLOAD_BY_RANGE_ERROR_WRONG_BLOCK_COLUMNS", - WRONG_INDEX_COLUMNS = "DOWNLOAD_BY_RANGE_ERROR_WRONG_INDEX_COLUMNS", + PEER_CUSTODY_FAILURE = "DOWNLOAD_BY_RANGE_ERROR_PEER_CUSTODY_FAILURE", CACHING_ERROR = "DOWNLOAD_BY_RANGE_CACHING_ERROR", MISMATCH_BLOCK_INPUT_TYPE = "DOWNLOAD_BY_RANGE_MISMATCH_BLOCK_INPUT_TYPE", @@ -795,17 +717,6 @@ export type DownloadByRangeErrorType = expected: number; actual: number; } - | { - code: DownloadByRangeErrorCode.WRONG_BLOCK_BLOBS; - expected: string; - actual: string; - } - | { - code: DownloadByRangeErrorCode.WRONG_INDEX_BLOBS; - slot: number; - expected: number; - actual: number; - } | { code: DownloadByRangeErrorCode.MISSING_COLUMNS; expected: number; @@ -816,17 +727,6 @@ export type DownloadByRangeErrorType = expected: number; actual: number; } - | { - code: DownloadByRangeErrorCode.WRONG_BLOCK_COLUMNS; - expected: string; - actual: string; - } - | { - code: DownloadByRangeErrorCode.WRONG_INDEX_COLUMNS; - slot: number; - expected: number; - actual: number; - } | { code: DownloadByRangeErrorCode.PEER_CUSTODY_FAILURE; peerId: string; From 01e6174845815e559ec8371364cc1439ff06b47f Mon Sep 17 00:00:00 2001 From: Cayman Date: Fri, 29 Aug 2025 11:01:47 -0400 Subject: [PATCH 091/173] chore: fix column proof verification --- .../beacon-node/src/chain/validation/dataColumnSidecar.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/beacon-node/src/chain/validation/dataColumnSidecar.ts b/packages/beacon-node/src/chain/validation/dataColumnSidecar.ts index eb0ea88c7b5d..6fdd086a5664 100644 --- a/packages/beacon-node/src/chain/validation/dataColumnSidecar.ts +++ b/packages/beacon-node/src/chain/validation/dataColumnSidecar.ts @@ -358,6 +358,7 @@ export async function validateBlockDataColumnSidecars( ); } + const commitments: Uint8Array[] = []; const cellIndices: number[] = []; const cells: Uint8Array[] = []; const proofs: Uint8Array[] = []; @@ -420,6 +421,7 @@ export async function validateBlockDataColumnSidecars( ); } + commitments.push(...columnSidecar.kzgCommitments); cellIndices.push(...Array.from({length: columnSidecar.column.length}, () => columnSidecar.index)); cells.push(...columnSidecar.column); proofs.push(...columnSidecar.kzgProofs); @@ -427,7 +429,7 @@ export async function validateBlockDataColumnSidecars( let reason: string | undefined; try { - const valid = await kzg.verifyCellKzgProofBatch(blockKzgCommitments, cellIndices, cells, proofs); + const valid = await kzg.asyncVerifyCellKzgProofBatch(commitments, cellIndices, cells, proofs); if (!valid) { reason = "Invalid KZG proof batch"; } From 1b7e37a9180a86b0761936cc9c2548d42f91e91b Mon Sep 17 00:00:00 2001 From: Cayman Date: Fri, 29 Aug 2025 11:06:45 -0400 Subject: [PATCH 092/173] chore: fix up columns block input --- .../src/chain/blocks/blockInput/blockInput.ts | 56 ++----------------- 1 file changed, 4 insertions(+), 52 deletions(-) diff --git a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts index d380d5e62998..73dcaab3e27c 100644 --- a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts +++ b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts @@ -708,13 +708,6 @@ export class BlockInputColumns extends AbstractBlockInput, - columnSidecar: fulu.DataColumnSidecar -): boolean { - return ( - block.message.body.blobKzgCommitments.length === columnSidecar.kzgCommitments.length && - block.message.body.blobKzgCommitments.every((commitment, index) => - Buffer.compare(commitment, columnSidecar.kzgCommitments[index]) - ) - ); -} - -function assertBlockAndColumnArePaired( - blockRootHex: string, - block: SignedBeaconBlock, - columnSidecar: fulu.DataColumnSidecar -): void { - if (!blockAndColumnArePaired(block, columnSidecar)) { - throw new BlockInputError( - { - code: BlockInputErrorCode.MISMATCHED_KZG_COMMITMENT, - blockRoot: blockRootHex, - slot: block.message.slot, - sidecarIndex: columnSidecar.index, - }, - "DataColumnsSidecar kzgCommitment does not match block kzgCommitment" - ); - } -} From 6d225a259be38c30a3a2a79564448d3463df4f00 Mon Sep 17 00:00:00 2001 From: Cayman Date: Sun, 31 Aug 2025 14:00:12 -0400 Subject: [PATCH 093/173] chore: remove unnecessary sidecar validation step --- .../src/chain/errors/blobSidecarError.ts | 9 ------- .../chain/errors/dataColumnSidecarError.ts | 7 ------ .../src/chain/validation/blobSidecar.ts | 25 ++++++------------- .../src/chain/validation/dataColumnSidecar.ts | 19 +++----------- .../src/sync/utils/downloadByRange.ts | 4 +-- 5 files changed, 12 insertions(+), 52 deletions(-) diff --git a/packages/beacon-node/src/chain/errors/blobSidecarError.ts b/packages/beacon-node/src/chain/errors/blobSidecarError.ts index 85db43f4d352..b1e5e7d644db 100644 --- a/packages/beacon-node/src/chain/errors/blobSidecarError.ts +++ b/packages/beacon-node/src/chain/errors/blobSidecarError.ts @@ -24,8 +24,6 @@ export enum BlobSidecarErrorCode { INCORRECT_BLOCK = "BLOBS_SIDECAR_ERROR_INCORRECT_BLOCK", /** Sidecar index is not as expected */ INCORRECT_INDEX = "BLOBS_SIDECAR_ERROR_INCORRECT_INDEX", - /** Sidecar kzg commitment is not as expected */ - INCORRECT_KZG_COMMITMENT = "BLOBS_SIDECAR_ERROR_INCORRECT_KZG_COMMITMENT", /** Sidecars proofs not valid */ INVALID_KZG_PROOF_BATCH = "BLOBS_SIDECAR_ERROR_INVALID_KZG_PROOF_BATCH", @@ -51,13 +49,6 @@ export type BlobSidecarErrorType = | {code: BlobSidecarErrorCode.INCORRECT_SIDECAR_COUNT; slot: number; expected: number; actual: number} | {code: BlobSidecarErrorCode.INCORRECT_BLOCK; slot: number; blobIdx: number; expected: string; actual: string} | {code: BlobSidecarErrorCode.INCORRECT_INDEX; slot: number; expected: number; actual: number} - | { - code: BlobSidecarErrorCode.INCORRECT_KZG_COMMITMENT; - slot: number; - blobIdx: number; - expected: string; - actual: string; - } | {code: BlobSidecarErrorCode.INVALID_KZG_PROOF_BATCH; slot: number; reason: string} | {code: BlobSidecarErrorCode.FUTURE_SLOT; blockSlot: Slot; currentSlot: Slot} | {code: BlobSidecarErrorCode.WOULD_REVERT_FINALIZED_SLOT; blockSlot: Slot; finalizedSlot: Slot} diff --git a/packages/beacon-node/src/chain/errors/dataColumnSidecarError.ts b/packages/beacon-node/src/chain/errors/dataColumnSidecarError.ts index 92a575afc470..4eaa77bacb96 100644 --- a/packages/beacon-node/src/chain/errors/dataColumnSidecarError.ts +++ b/packages/beacon-node/src/chain/errors/dataColumnSidecarError.ts @@ -19,8 +19,6 @@ export enum DataColumnSidecarErrorCode { INCORRECT_INDEX = "DATA_COLUMN_SIDECAR_ERROR_INCORRECT_INDEX", /** Sidecar kzg proof count not as expected */ INCORRECT_KZG_COMMITMENTS_COUNT = "DATA_COLUMN_SIDECAR_ERROR_INCORRECT_KZG_COMMITMENTS_COUNT", - /** Sidecar kzg commitments are not as expected */ - INCORRECT_KZG_COMMITMENTS = "DATA_COLUMN_SIDECAR_ERROR_INCORRECT_KZG_COMMITMENTS", /** Sidecar kzg proof count not as expected */ INCORRECT_KZG_PROOF_COUNT = "DATA_COLUMN_SIDECAR_ERROR_INCORRECT_KZG_PROOF_COUNT", /** Sidecars proofs not valid */ @@ -71,11 +69,6 @@ export type DataColumnSidecarErrorType = expected: number; actual: number; } - | { - code: DataColumnSidecarErrorCode.INCORRECT_KZG_COMMITMENTS; - slot: number; - columnIdx: number; - } | { code: DataColumnSidecarErrorCode.INCORRECT_KZG_PROOF_COUNT; slot: number; diff --git a/packages/beacon-node/src/chain/validation/blobSidecar.ts b/packages/beacon-node/src/chain/validation/blobSidecar.ts index 75c09cfc8354..68cebb8da5d0 100644 --- a/packages/beacon-node/src/chain/validation/blobSidecar.ts +++ b/packages/beacon-node/src/chain/validation/blobSidecar.ts @@ -11,7 +11,7 @@ import { getBlockHeaderProposerSignatureSet, } from "@lodestar/state-transition"; import {BlobIndex, Root, Slot, SubnetID, deneb, ssz} from "@lodestar/types"; -import {toHex, toRootHex, verifyMerkleBranch} from "@lodestar/utils"; +import {toRootHex, verifyMerkleBranch} from "@lodestar/utils"; import {byteArrayEquals} from "../../util/bytes.js"; import {kzg} from "../../util/kzg.js"; @@ -227,14 +227,14 @@ export async function validateBlobSidecars( export async function validateBlockBlobSidecars( blockSlot: Slot, blockRoot: Root, - blockKzgCommitments: deneb.BlobKzgCommitments, + blockBlobCount: number, blobSidecars: deneb.BlobSidecars ): Promise { - if (blockKzgCommitments.length !== blobSidecars.length) { + if (blockBlobCount !== blobSidecars.length) { throw new BlobSidecarValidationError({ code: BlobSidecarErrorCode.INCORRECT_SIDECAR_COUNT, slot: blockSlot, - expected: blockKzgCommitments.length, + expected: blockBlobCount, actual: blobSidecars.length, }); } @@ -259,11 +259,11 @@ export async function validateBlockBlobSidecars( ); } + const commitments = []; const blobs = []; const proofs = []; for (let i = 0; i < blobSidecars.length; i++) { const blobSidecar = blobSidecars[i]; - const blobKzgCommitment = blockKzgCommitments[i]; if (blobSidecar.index !== i) { throw new BlobSidecarValidationError( { @@ -275,18 +275,6 @@ export async function validateBlockBlobSidecars( "BlobSidecar index out of order" ); } - if (Buffer.compare(blobSidecar.kzgCommitment, blobKzgCommitment) !== 0) { - throw new BlobSidecarValidationError( - { - code: BlobSidecarErrorCode.INCORRECT_KZG_COMMITMENT, - slot: blockSlot, - blobIdx: i, - expected: toHex(blobKzgCommitment), - actual: toHex(blobSidecar.kzgCommitment), - }, - "BlobSidecar KZG commitment doesn't match corresponding block commitment" - ); - } if (!ssz.phase0.BeaconBlockHeader.equals(blobSidecar.signedBlockHeader.message, firstSidecarBlockHeader)) { throw new BlobSidecarValidationError( { @@ -311,6 +299,7 @@ export async function validateBlockBlobSidecars( ); } + commitments.push(blobSidecar.kzgCommitment); blobs.push(blobSidecar.blob); proofs.push(blobSidecar.kzgProof); } @@ -318,7 +307,7 @@ export async function validateBlockBlobSidecars( // Final batch KZG proof verification let reason: string | undefined = undefined; try { - if (!(await kzg.asyncVerifyBlobKzgProofBatch(blobs, blockKzgCommitments, proofs))) { + if (!(await kzg.asyncVerifyBlobKzgProofBatch(blobs, commitments, proofs))) { reason = "Invalid verifyBlobKzgProofBatch"; } } catch (e) { diff --git a/packages/beacon-node/src/chain/validation/dataColumnSidecar.ts b/packages/beacon-node/src/chain/validation/dataColumnSidecar.ts index 6fdd086a5664..e9bee15b78dc 100644 --- a/packages/beacon-node/src/chain/validation/dataColumnSidecar.ts +++ b/packages/beacon-node/src/chain/validation/dataColumnSidecar.ts @@ -325,7 +325,7 @@ export function verifyDataColumnSidecarInclusionProof(dataColumnSidecar: fulu.Da export async function validateBlockDataColumnSidecars( blockSlot: Slot, blockRoot: Root, - blockKzgCommitments: deneb.BlobKzgCommitments, + blockBlobCount: number, expectedColumnIndices: ColumnIndex[], dataColumnSidecars: fulu.DataColumnSidecars ): Promise { @@ -377,12 +377,12 @@ export async function validateBlockDataColumnSidecars( ); } - if (columnSidecar.kzgCommitments.length !== blockKzgCommitments.length) { + if (columnSidecar.kzgCommitments.length !== blockBlobCount) { throw new DataColumnSidecarValidationError({ code: DataColumnSidecarErrorCode.INCORRECT_KZG_COMMITMENTS_COUNT, slot: blockSlot, columnIdx: columnSidecar.index, - expected: blockKzgCommitments.length, + expected: blockBlobCount, actual: columnSidecar.kzgCommitments.length, }); } @@ -397,19 +397,6 @@ export async function validateBlockDataColumnSidecars( }); } - if ( - columnSidecar.kzgCommitments.some((commitment, cIx) => Buffer.compare(commitment, blockKzgCommitments[cIx]) !== 0) - ) { - throw new DataColumnSidecarValidationError( - { - code: DataColumnSidecarErrorCode.INCORRECT_KZG_COMMITMENTS, - slot: blockSlot, - columnIdx: columnSidecar.index, - }, - "DataColumnSidecar has unexpected KZG commitments" - ); - } - if (!verifyDataColumnSidecarInclusionProof(columnSidecar)) { throw new DataColumnSidecarValidationError( { diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index ce04161eb02d..a79530af2a0a 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -514,7 +514,7 @@ export async function validateBlobsByRangeResponse( blobSidecarIndex += blockKzgCommitments.length; validateSidecarsPromises.push( - validateBlockBlobSidecars(block.message.slot, blockRoot, blockKzgCommitments, blockBlobSidecars) + validateBlockBlobSidecars(block.message.slot, blockRoot, blockKzgCommitments.length, blockBlobSidecars) ); } @@ -574,7 +574,7 @@ export async function validateColumnsByRangeResponse( validateBlockDataColumnSidecars( block.message.slot, blockRoot, - blockKzgCommitments, + blockKzgCommitments.length, request.columns, blockColumnSidecars ) From f5b2f680604907e5dd29902c4fbf9c86f6a4830f Mon Sep 17 00:00:00 2001 From: Cayman Date: Mon, 1 Sep 2025 12:02:11 -0400 Subject: [PATCH 094/173] chore: remove unused validation fn, consolidate validation fn usage --- .../src/chain/validation/blobSidecar.ts | 47 ------------------- .../test/spec/presets/fork_choice.test.ts | 23 ++++----- .../beacon-node/test/unit/util/kzg.test.ts | 4 +- 3 files changed, 10 insertions(+), 64 deletions(-) diff --git a/packages/beacon-node/src/chain/validation/blobSidecar.ts b/packages/beacon-node/src/chain/validation/blobSidecar.ts index 68cebb8da5d0..89f31200ad4a 100644 --- a/packages/beacon-node/src/chain/validation/blobSidecar.ts +++ b/packages/beacon-node/src/chain/validation/blobSidecar.ts @@ -13,7 +13,6 @@ import { import {BlobIndex, Root, Slot, SubnetID, deneb, ssz} from "@lodestar/types"; import {toRootHex, verifyMerkleBranch} from "@lodestar/utils"; -import {byteArrayEquals} from "../../util/bytes.js"; import {kzg} from "../../util/kzg.js"; import {BlobSidecarErrorCode, BlobSidecarGossipError, BlobSidecarValidationError} from "../errors/blobSidecarError.js"; import {GossipAction} from "../errors/gossipValidation.js"; @@ -173,52 +172,6 @@ export async function validateGossipBlobSidecar( } } -// https://github.com/ethereum/consensus-specs/blob/dev/specs/eip4844/beacon-chain.md#validate_blobs_sidecar -export async function validateBlobSidecars( - blockSlot: Slot, - blockRoot: Root, - expectedKzgCommitments: deneb.BlobKzgCommitments, - blobSidecars: deneb.BlobSidecars, - opts: {skipProofsCheck: boolean} = {skipProofsCheck: false} -): Promise { - // assert len(expected_kzg_commitments) == len(blobs) - if (expectedKzgCommitments.length !== blobSidecars.length) { - throw new Error( - `blobSidecars length to commitments length mismatch. Blob length: ${blobSidecars.length}, Expected commitments length ${expectedKzgCommitments.length}` - ); - } - - // No need to verify the aggregate proof of zero blobs - if (blobSidecars.length > 0) { - // Verify the blob slot and root matches - const blobs = []; - const proofs = []; - for (let index = 0; index < blobSidecars.length; index++) { - const blobSidecar = blobSidecars[index]; - const blobBlockHeader = blobSidecar.signedBlockHeader.message; - const blobBlockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(blobBlockHeader); - if ( - blobBlockHeader.slot !== blockSlot || - !byteArrayEquals(blobBlockRoot, blockRoot) || - blobSidecar.index !== index || - !byteArrayEquals(expectedKzgCommitments[index], blobSidecar.kzgCommitment) - ) { - throw new Error( - `Invalid blob with slot=${blobBlockHeader.slot} blobBlockRoot=${toRootHex(blobBlockRoot)} index=${ - blobSidecar.index - } for the block blockRoot=${toRootHex(blockRoot)} slot=${blockSlot} index=${index}` - ); - } - blobs.push(blobSidecar.blob); - proofs.push(blobSidecar.kzgProof); - } - - if (!opts.skipProofsCheck) { - await validateBlobsAndBlobProofs(expectedKzgCommitments, blobs, proofs); - } - } -} - /** * Validate all blob sidecars in a block * diff --git a/packages/beacon-node/test/spec/presets/fork_choice.test.ts b/packages/beacon-node/test/spec/presets/fork_choice.test.ts index 7d4a16d04efc..11912f037654 100644 --- a/packages/beacon-node/test/spec/presets/fork_choice.test.ts +++ b/packages/beacon-node/test/spec/presets/fork_choice.test.ts @@ -29,11 +29,7 @@ import { import {AttestationImportOpt, BlobSidecarValidation} from "../../../src/chain/blocks/types.js"; import {BeaconChain, ChainEvent} from "../../../src/chain/index.js"; import {defaultChainOptions} from "../../../src/chain/options.js"; -import { - verifyDataColumnSidecar, - verifyDataColumnSidecarInclusionProof, - verifyDataColumnSidecarKzgProofs, -} from "../../../src/chain/validation/dataColumnSidecar.js"; +import {validateBlockDataColumnSidecars} from "../../../src/chain/validation/dataColumnSidecar.js"; import {ZERO_HASH_HEX} from "../../../src/constants/constants.js"; import {Eth1ForBlockProductionDisabled} from "../../../src/eth1/index.js"; import {PowMergeBlock} from "../../../src/eth1/interface.js"; @@ -221,16 +217,13 @@ const forkChoiceTest = columns = []; } - for (const column of columns) { - verifyDataColumnSidecar(column); - verifyDataColumnSidecarInclusionProof(column); - await verifyDataColumnSidecarKzgProofs( - column.kzgCommitments, - Array.from({length: column.column.length}, () => column.index), - column.column, - column.kzgProofs - ); - } + await validateBlockDataColumnSidecars( + slot, + blockRoot, + (signedBlock as SignedBeaconBlock).message.body.blobKzgCommitments.length, + columns.map((c) => c.index), + columns + ); blockImport = BlockInputColumns.createFromBlock({ forkName: fork, diff --git a/packages/beacon-node/test/unit/util/kzg.test.ts b/packages/beacon-node/test/unit/util/kzg.test.ts index 3e2c46b8d69c..ac242ee03819 100644 --- a/packages/beacon-node/test/unit/util/kzg.test.ts +++ b/packages/beacon-node/test/unit/util/kzg.test.ts @@ -3,7 +3,7 @@ import {NUMBER_OF_COLUMNS} from "@lodestar/params"; import {signedBlockToSignedHeader} from "@lodestar/state-transition"; import {deneb, fulu, ssz} from "@lodestar/types"; import {afterEach, describe, expect, it} from "vitest"; -import {validateBlobSidecars, validateGossipBlobSidecar} from "../../../src/chain/validation/blobSidecar.js"; +import {validateBlockBlobSidecars, validateGossipBlobSidecar} from "../../../src/chain/validation/blobSidecar.js"; import {getBlobSidecars, recoverDataColumnSidecars} from "../../../src/util/blobs.js"; import {getDataColumnSidecarsFromBlock} from "../../../src/util/dataColumns.js"; import {kzg} from "../../../src/util/kzg.js"; @@ -62,7 +62,7 @@ describe("KZG", () => { expect(blobSidecars.length).toBe(2); // Full validation - await validateBlobSidecars(slot, blockRoot, kzgCommitments, blobSidecars); + await validateBlockBlobSidecars(slot, blockRoot, kzgCommitments.length, blobSidecars); for (const blobSidecar of blobSidecars) { try { From 43590a701c198e846fa1495a198c59ebdd16e454 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 2 Sep 2025 23:50:12 +0700 Subject: [PATCH 095/173] feat: downscore peers for sending range batches on wrong chain and use set instead of array for cacheByRangeResponses --- packages/beacon-node/src/sync/range/range.ts | 1 + .../src/sync/utils/downloadByRange.ts | 157 ++++++++++-------- 2 files changed, 91 insertions(+), 67 deletions(-) diff --git a/packages/beacon-node/src/sync/range/range.ts b/packages/beacon-node/src/sync/range/range.ts index 55a3ea4136f6..9ff2459ba1a6 100644 --- a/packages/beacon-node/src/sync/range/range.ts +++ b/packages/beacon-node/src/sync/range/range.ts @@ -212,6 +212,7 @@ export class RangeSync extends (EventEmitter as {new (): RangeSyncEmitter}) { }); const cached = cacheByRangeResponses({ config: this.config, + network: this.network, cache: this.chain.seenBlockInputCache, syncType, peerIdStr: peer.peerId, diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index a79530af2a0a..e35d0e26265c 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -12,7 +12,7 @@ import { import {SeenBlockInput} from "../../chain/seenCache/seenGossipBlockInput.js"; import {validateBlockBlobSidecars} from "../../chain/validation/blobSidecar.js"; import {validateBlockDataColumnSidecars} from "../../chain/validation/dataColumnSidecar.js"; -import {INetwork} from "../../network/index.js"; +import {INetwork, PeerAction} from "../../network/index.js"; import {PeerIdStr} from "../../util/peerId.js"; import {RangeSyncType} from "./remoteSyncType.js"; @@ -92,6 +92,7 @@ export type DownloadAndCacheByRangeResults = { export type CacheByRangeResponsesProps = { config: ChainForkConfig; + network: INetwork; cache: SeenBlockInput; syncType: RangeSyncType; peerIdStr: PeerIdStr; @@ -101,45 +102,53 @@ export type CacheByRangeResponsesProps = { export function cacheByRangeResponses({ config, + network, cache, - // syncType, + syncType, peerIdStr, responses, batchBlocks, }: CacheByRangeResponsesProps): IBlockInput[] { const source = BlockInputSource.byRange; const seenTimestampSec = Date.now() / 1000; - const updatedBatchBlocks = [...batchBlocks]; + const updatedBatchBlocks = new Map(batchBlocks.map((block) => [block.slot, block])); const blocks = responses.blocks ?? []; const blockRoots = responses.blockRoots ?? []; for (let i = 0; i < blocks.length; i++) { const block = blocks[i]; - const existing = updatedBatchBlocks.find((b) => b.slot === block.message.slot); + const existing = updatedBatchBlocks.get(block.message.slot); const blockRoot = blockRoots[i] ?? config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message); const blockRootHex = toRootHex(blockRoot); if (existing) { - // will throw if root hex does not match (meaning we are following the wrong chain) - existing.addBlock( - { - block, - blockRootHex, - source, - peerIdStr, - seenTimestampSec, - }, - {throwOnDuplicateAdd: false} - ); + try { + // will throw if root hex does not match (meaning we are following the wrong chain) + existing.addBlock( + { + block, + blockRootHex, + source, + peerIdStr, + seenTimestampSec, + }, + {throwOnDuplicateAdd: false} + ); + } catch (err) { + network.logger.debug("Following wrong chain for ByRange request", {}, err as Error); + if (syncType === RangeSyncType.Finalized) { + network.reportPeer(peerIdStr, PeerAction.LowToleranceError, "Missing or mismatching dataColumnSidecars"); + } + break; + } } else { - updatedBatchBlocks.push( - cache.getByBlock({ - block, - blockRootHex, - source, - peerIdStr, - seenTimestampSec, - }) - ); + const blockInput = cache.getByBlock({ + block, + blockRootHex, + source, + peerIdStr, + seenTimestampSec, + }); + updatedBatchBlocks.set(blockInput.slot, blockInput); } } @@ -148,7 +157,7 @@ export function cacheByRangeResponses({ .getForkTypes(blobSidecar.signedBlockHeader.message.slot) .BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message); const blockRootHex = toRootHex(blockRoot); - const existing = updatedBatchBlocks.find((b) => b.slot === blobSidecar.signedBlockHeader.message.slot); + const existing = updatedBatchBlocks.get(blobSidecar.signedBlockHeader.message.slot); if (existing) { if (!isBlockInputBlobs(existing)) { throw new DownloadByRangeError({ @@ -159,27 +168,34 @@ export function cacheByRangeResponses({ blockRoot: prettyBytes(existing.blockRootHex), }); } - // will throw if root hex does not match (meaning we are following the wrong chain) - existing.addBlob( - { - blobSidecar, - blockRootHex, - seenTimestampSec, - peerIdStr, - source, - }, - {throwOnDuplicateAdd: false} - ); + try { + // will throw if root hex does not match (meaning we are following the wrong chain) + existing.addBlob( + { + blobSidecar, + blockRootHex, + seenTimestampSec, + peerIdStr, + source, + }, + {throwOnDuplicateAdd: false} + ); + } catch (err) { + network.logger.debug("Following wrong chain for ByRange request", {}, err as Error); + if (syncType === RangeSyncType.Finalized) { + network.reportPeer(peerIdStr, PeerAction.LowToleranceError, "Missing or mismatching dataColumnSidecars"); + } + break; + } } else { - updatedBatchBlocks.push( - cache.getByBlob({ - blockRootHex, - blobSidecar, - source, - peerIdStr, - seenTimestampSec, - }) - ); + const blockInput = cache.getByBlob({ + blockRootHex, + blobSidecar, + source, + peerIdStr, + seenTimestampSec, + }); + updatedBatchBlocks.set(blockInput.slot, blockInput); } } @@ -188,7 +204,7 @@ export function cacheByRangeResponses({ .getForkTypes(columnSidecar.signedBlockHeader.message.slot) .BeaconBlockHeader.hashTreeRoot(columnSidecar.signedBlockHeader.message); const blockRootHex = toRootHex(blockRoot); - const existing = updatedBatchBlocks.find((b) => b.slot === columnSidecar.signedBlockHeader.message.slot); + const existing = updatedBatchBlocks.get(columnSidecar.signedBlockHeader.message.slot); if (existing) { if (!isBlockInputColumns(existing)) { throw new DownloadByRangeError({ @@ -199,31 +215,38 @@ export function cacheByRangeResponses({ blockRoot: prettyBytes(existing.blockRootHex), }); } - // will throw if root hex does not match (meaning we are following the wrong chain) - existing.addColumn( - { - columnSidecar, - blockRootHex, - seenTimestampSec, - peerIdStr, - source, - }, - {throwOnDuplicateAdd: false} - ); + try { + // will throw if root hex does not match (meaning we are following the wrong chain) + existing.addColumn( + { + columnSidecar, + blockRootHex, + seenTimestampSec, + peerIdStr, + source, + }, + {throwOnDuplicateAdd: false} + ); + } catch (err) { + network.logger.debug("Following wrong chain for ByRange request", {}, err as Error); + if (syncType === RangeSyncType.Finalized) { + network.reportPeer(peerIdStr, PeerAction.LowToleranceError, "Missing or mismatching dataColumnSidecars"); + } + break; + } } else { - updatedBatchBlocks.push( - cache.getByColumn({ - blockRootHex, - columnSidecar, - source, - peerIdStr, - seenTimestampSec, - }) - ); + const blockInput = cache.getByColumn({ + blockRootHex, + columnSidecar, + source, + peerIdStr, + seenTimestampSec, + }); + updatedBatchBlocks.set(blockInput.slot, blockInput); } } - return updatedBatchBlocks; + return Array.from(updatedBatchBlocks.values()); } export async function downloadByRange({ From 136e07639474dbf981b761c81036f429446d84cb Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 2 Sep 2025 23:50:54 +0700 Subject: [PATCH 096/173] fix: put sorting of block in batch.downloadingSuccess --- packages/beacon-node/src/sync/range/batch.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/packages/beacon-node/src/sync/range/batch.ts b/packages/beacon-node/src/sync/range/batch.ts index c0bafc15cde5..03ced70290c0 100644 --- a/packages/beacon-node/src/sync/range/batch.ts +++ b/packages/beacon-node/src/sync/range/batch.ts @@ -148,7 +148,7 @@ export class Batch { const neededColumns = new Set(); // ensure blocks are in slot-wise order - for (const blockInput of blocks.sort((a, b) => a.slot - b.slot)) { + for (const blockInput of blocks) { const blockSlot = blockInput.slot; // check if block/data is present (hasBlock/hasAllData). If present then check if startSlot is the same as // blockSlot. If it is then do not need to pull that slot so increment startSlot by 1. check will fail @@ -247,6 +247,9 @@ export class Batch { throw new BatchError(this.wrongStatusErrorType(BatchStatus.Downloading)); } + // ensure that blocks are always sorted before getting stored on the batch.state or being used to getRequests + blocks.sort((a, b) => b.slot - a.slot); + this.goodPeers.push(peer); let allComplete = true; From 805a201902f02dc1217e8d72928a2f85111a342c Mon Sep 17 00:00:00 2001 From: Cayman Date: Tue, 2 Sep 2025 15:57:48 -0400 Subject: [PATCH 097/173] chore: simplify validation (use same validation functions everywhere) --- .../chain/errors/dataColumnSidecarError.ts | 7 +- .../src/chain/validation/dataColumnSidecar.ts | 116 +---- .../src/sync/utils/downloadByRange.ts | 34 +- .../src/sync/utils/downloadByRoot.ts | 236 ++-------- .../test/spec/presets/fork_choice.test.ts | 1 - .../unit/chain/validation/blobSidecar.test.ts | 82 ++++ .../validation/dataColumnSidecar.test.ts | 88 ++++ .../unit/sync/utils/downloadByRoot.test.ts | 413 +----------------- .../test/unit/util/dataColumn.test.ts | 8 +- 9 files changed, 282 insertions(+), 703 deletions(-) create mode 100644 packages/beacon-node/test/unit/chain/validation/blobSidecar.test.ts create mode 100644 packages/beacon-node/test/unit/chain/validation/dataColumnSidecar.test.ts diff --git a/packages/beacon-node/src/chain/errors/dataColumnSidecarError.ts b/packages/beacon-node/src/chain/errors/dataColumnSidecarError.ts index 4eaa77bacb96..937a56d507d7 100644 --- a/packages/beacon-node/src/chain/errors/dataColumnSidecarError.ts +++ b/packages/beacon-node/src/chain/errors/dataColumnSidecarError.ts @@ -15,8 +15,6 @@ export enum DataColumnSidecarErrorCode { INCORRECT_SIDECAR_COUNT = "DATA_COLUMN_SIDECAR_ERROR_INCORRECT_SIDECAR_COUNT", /** Sidecar doesn't match block */ INCORRECT_BLOCK = "DATA_COLUMN_SIDECAR_ERROR_INCORRECT_BLOCK", - /** Sidecar index is not as expected */ - INCORRECT_INDEX = "DATA_COLUMN_SIDECAR_ERROR_INCORRECT_INDEX", /** Sidecar kzg proof count not as expected */ INCORRECT_KZG_COMMITMENTS_COUNT = "DATA_COLUMN_SIDECAR_ERROR_INCORRECT_KZG_COMMITMENTS_COUNT", /** Sidecar kzg proof count not as expected */ @@ -36,8 +34,8 @@ export enum DataColumnSidecarErrorCode { } export type DataColumnSidecarErrorType = - | {code: DataColumnSidecarErrorCode.INVALID_INDEX; columnIdx: number} - | {code: DataColumnSidecarErrorCode.NO_COMMITMENTS; columnIdx: number} + | {code: DataColumnSidecarErrorCode.INVALID_INDEX; slot: Slot; columnIdx: number} + | {code: DataColumnSidecarErrorCode.NO_COMMITMENTS; slot: Slot; columnIdx: number} | { code: DataColumnSidecarErrorCode.MISMATCHED_LENGTHS; columnLength: number; @@ -61,7 +59,6 @@ export type DataColumnSidecarErrorType = expected: string; actual: string; } - | {code: DataColumnSidecarErrorCode.INCORRECT_INDEX; slot: number; expected: number; actual: number} | { code: DataColumnSidecarErrorCode.INCORRECT_KZG_COMMITMENTS_COUNT; slot: number; diff --git a/packages/beacon-node/src/chain/validation/dataColumnSidecar.ts b/packages/beacon-node/src/chain/validation/dataColumnSidecar.ts index 37efdf17adc6..123e348c23d6 100644 --- a/packages/beacon-node/src/chain/validation/dataColumnSidecar.ts +++ b/packages/beacon-node/src/chain/validation/dataColumnSidecar.ts @@ -4,12 +4,11 @@ import { KZG_COMMITMENTS_SUBTREE_INDEX, NUMBER_OF_COLUMNS, } from "@lodestar/params"; -import {ColumnIndex, Root, Slot, SubnetID, deneb, fulu, ssz} from "@lodestar/types"; +import {Root, Slot, SubnetID, fulu, ssz} from "@lodestar/types"; import {toRootHex, verifyMerkleBranch} from "@lodestar/utils"; import {computeStartSlotAtEpoch, getBlockHeaderProposerSignatureSet} from "@lodestar/state-transition"; import {Metrics} from "../../metrics/metrics.js"; -import {byteArrayEquals} from "../../util/bytes.js"; import {kzg} from "../../util/kzg.js"; import { DataColumnSidecarErrorCode, @@ -182,90 +181,15 @@ export async function validateGossipDataColumnSidecar( // -- Handled in seenGossipBlockInput } -export async function validateDataColumnsSidecars( - blockSlot: Slot, - blockRoot: Root, - blockKzgCommitments: deneb.BlobKzgCommitments, - dataColumnSidecars: fulu.DataColumnSidecars, - metrics: Metrics | null, - opts: {skipProofsCheck: boolean} = {skipProofsCheck: false} -): Promise { - // Skip verification if there are no data columns - if (dataColumnSidecars.length === 0) { - return; - } - - const commitmentBytes: Uint8Array[] = []; - const cellIndices: number[] = []; - const cells: Uint8Array[] = []; - const proofBytes: Uint8Array[] = []; - - for (let sidecarsIndex = 0; sidecarsIndex < dataColumnSidecars.length; sidecarsIndex++) { - const columnSidecar = dataColumnSidecars[sidecarsIndex]; - const {index: columnIndex, column, kzgCommitments, kzgProofs} = columnSidecar; - const columnBlockHeader = columnSidecar.signedBlockHeader.message; - const columnBlockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(columnBlockHeader); - if ( - columnBlockHeader.slot !== blockSlot || - !byteArrayEquals(columnBlockRoot, blockRoot) || - kzgCommitments.length === 0 || - blockKzgCommitments.length === 0 || - blockKzgCommitments.length !== kzgCommitments.length || - blockKzgCommitments - .map((commitment, i) => byteArrayEquals(commitment, kzgCommitments[i])) - .filter((result) => result === false).length - ) { - throw new Error( - `Invalid data column sidecar slot=${columnBlockHeader.slot} columnBlockRoot=${toRootHex(columnBlockRoot)} columnIndex=${columnIndex} for the block blockRoot=${toRootHex(blockRoot)} slot=${blockSlot} sidecarsIndex=${sidecarsIndex} kzgCommitments=${kzgCommitments.length} blockKzgCommitments=${blockKzgCommitments.length}` - ); - } - - if (columnIndex >= NUMBER_OF_COLUMNS) { - throw new Error( - `Invalid data sidecar columnIndex=${columnIndex} in slot=${blockSlot} blockRoot=${toRootHex(blockRoot)} sidecarsIndex=${sidecarsIndex}` - ); - } - - if (column.length !== kzgCommitments.length || column.length !== kzgProofs.length) { - throw new Error( - `Invalid data sidecar array lengths for columnIndex=${columnIndex} in slot=${blockSlot} blockRoot=${toRootHex(blockRoot)}` - ); - } - - commitmentBytes.push(...kzgCommitments); - cellIndices.push(...Array.from({length: column.length}, () => columnIndex)); - cells.push(...column); - proofBytes.push(...kzgProofs); - } - - if (opts.skipProofsCheck) { - return; - } - - let valid: boolean; - try { - const timer = metrics?.peerDas.kzgVerificationDataColumnBatchTime.startTimer(); - valid = await kzg.asyncVerifyCellKzgProofBatch(commitmentBytes, cellIndices, cells, proofBytes); - timer?.(); - } catch (err) { - (err as Error).message = - `Error in verifyCellKzgProofBatch for slot=${blockSlot} blockRoot=${toRootHex(blockRoot)} commitmentBytes=${commitmentBytes.length} cellIndices=${cellIndices.length} cells=${cells.length} proofBytes=${proofBytes.length}`; - throw err; - } - - if (!valid) { - throw new Error(`Invalid data column sidecars in slot=${blockSlot} blockRoot=${toRootHex(blockRoot)}`); - } -} - /** * SPEC FUNCTION * https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.4/specs/fulu/p2p-interface.md#verify_data_column_sidecar */ -export function verifyDataColumnSidecar(dataColumnSidecar: fulu.DataColumnSidecar): void { +function verifyDataColumnSidecar(dataColumnSidecar: fulu.DataColumnSidecar): void { if (dataColumnSidecar.index >= NUMBER_OF_COLUMNS) { throw new DataColumnSidecarGossipError(GossipAction.REJECT, { code: DataColumnSidecarErrorCode.INVALID_INDEX, + slot: dataColumnSidecar.signedBlockHeader.message.slot, columnIdx: dataColumnSidecar.index, }); } @@ -273,6 +197,7 @@ export function verifyDataColumnSidecar(dataColumnSidecar: fulu.DataColumnSideca if (dataColumnSidecar.kzgCommitments.length === 0) { throw new DataColumnSidecarGossipError(GossipAction.REJECT, { code: DataColumnSidecarErrorCode.NO_COMMITMENTS, + slot: dataColumnSidecar.signedBlockHeader.message.slot, columnIdx: dataColumnSidecar.index, }); } @@ -335,22 +260,24 @@ export async function validateBlockDataColumnSidecars( blockSlot: Slot, blockRoot: Root, blockBlobCount: number, - expectedColumnIndices: ColumnIndex[], dataColumnSidecars: fulu.DataColumnSidecars ): Promise { - if (dataColumnSidecars.length !== expectedColumnIndices.length) { - throw new DataColumnSidecarValidationError({ - code: DataColumnSidecarErrorCode.INCORRECT_SIDECAR_COUNT, - slot: blockSlot, - expected: expectedColumnIndices.length, - actual: dataColumnSidecars.length, - }); - } - if (dataColumnSidecars.length === 0) { return; } + if (blockBlobCount === 0) { + throw new DataColumnSidecarValidationError( + { + code: DataColumnSidecarErrorCode.INCORRECT_SIDECAR_COUNT, + slot: blockSlot, + expected: 0, + actual: dataColumnSidecars.length, + }, + "Block has no blob commitments but data column sidecars were provided" + ); + } + // Hash the first sidecar block header and compare the rest via (cheaper) equality const firstSidecarBlockHeader = dataColumnSidecars[0].signedBlockHeader.message; const firstBlockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(firstSidecarBlockHeader); @@ -373,16 +300,15 @@ export async function validateBlockDataColumnSidecars( const proofs: Uint8Array[] = []; for (let i = 0; i < dataColumnSidecars.length; i++) { const columnSidecar = dataColumnSidecars[i]; - const expectedIndex = expectedColumnIndices[i]; - if (columnSidecar.index !== expectedIndex) { + + if (columnSidecar.index >= NUMBER_OF_COLUMNS) { throw new DataColumnSidecarValidationError( { - code: DataColumnSidecarErrorCode.INCORRECT_INDEX, + code: DataColumnSidecarErrorCode.INVALID_INDEX, slot: blockSlot, - expected: expectedIndex, - actual: columnSidecar.index, + columnIdx: columnSidecar.index, }, - "DataColumnSidecar has unexpected index" + "DataColumnSidecar has invalid index" ); } diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index e35d0e26265c..d6e9d3741321 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -1,5 +1,5 @@ import {ChainForkConfig} from "@lodestar/config"; -import {ForkPostDeneb, ForkPostFulu, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; +import {ForkPostDeneb, ForkPostFulu} from "@lodestar/params"; import {SignedBeaconBlock, Slot, deneb, fulu, phase0} from "@lodestar/types"; import {LodestarError, Logger, fromHex, prettyBytes, toRootHex} from "@lodestar/utils"; import { @@ -593,14 +593,32 @@ export async function validateColumnsByRangeResponse( const blockColumnSidecars = columnSidecars.slice(columnSidecarIndex, columnSidecarIndex + expectedColumns); columnSidecarIndex += expectedColumns; + // Validate that all requested columns are present and in order + if (blockColumnSidecars.length !== expectedColumns) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.MISSING_COLUMNS, + expected: expectedColumns, + actual: blockColumnSidecars.length, + }, + "Missing data columns in DataColumnSidecarsByRange response" + ); + } + for (let i = 0; i < blockColumnSidecars.length; i++) { + if (blockColumnSidecars[i].index !== request.columns[i]) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.MISSING_COLUMNS, + expected: expectedColumns, + actual: blockColumnSidecars.length, + }, + "Data columns not in order or do not match requested columns in DataColumnSidecarsByRange response" + ); + } + } + validateSidecarsPromises.push( - validateBlockDataColumnSidecars( - block.message.slot, - blockRoot, - blockKzgCommitments.length, - request.columns, - blockColumnSidecars - ) + validateBlockDataColumnSidecars(block.message.slot, blockRoot, blockKzgCommitments.length, blockColumnSidecars) ); } diff --git a/packages/beacon-node/src/sync/utils/downloadByRoot.ts b/packages/beacon-node/src/sync/utils/downloadByRoot.ts index fca92fb67142..a2e47051265e 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRoot.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRoot.ts @@ -6,11 +6,8 @@ import {LodestarError, fromHex, prettyBytes, toRootHex} from "@lodestar/utils"; import {isBlockInputBlobs, isBlockInputColumns} from "../../chain/blocks/blockInput/blockInput.js"; import {BlobMeta, BlockInputSource, IBlockInput, MissingColumnMeta} from "../../chain/blocks/blockInput/types.js"; import {SeenBlockInput} from "../../chain/seenCache/seenGossipBlockInput.js"; -import {validateBlobSidecarInclusionProof, validateBlobsAndBlobProofs} from "../../chain/validation/blobSidecar.js"; -import { - verifyDataColumnSidecarInclusionProof, - verifyDataColumnSidecarKzgProofs, -} from "../../chain/validation/dataColumnSidecar.js"; +import {validateBlockBlobSidecars} from "../../chain/validation/blobSidecar.js"; +import {validateBlockDataColumnSidecars} from "../../chain/validation/dataColumnSidecar.js"; import {IExecutionEngine} from "../../execution/index.js"; import {INetwork} from "../../network/interface.js"; import {prettyPrintPeerIdStr} from "../../network/util.js"; @@ -331,7 +328,7 @@ export async function fetchAndValidateBlobs({ // responses can be sparse for both types of requests to sort to make sure its in sequential order blobSidecars.sort((a, b) => a.index - b.index); - await validateBlobs({config, peerIdStr, blockRoot, blobMeta, blobSidecars}); + await validateBlockBlobSidecars(block.message.slot, blockRoot, blobMeta.length, blobSidecars); return blobSidecars; } @@ -397,72 +394,6 @@ export async function fetchBlobsByRoot({ return await network.sendBlobSidecarsByRoot(peerIdStr, blobsRequest); } -export async function validateBlobs({ - config, - blockRoot, - peerIdStr, - blobMeta, - blobSidecars, -}: Pick & { - blobSidecars: deneb.BlobSidecars; -}): Promise { - const requestedIndices = blobMeta.map((b) => b.index); - for (const blobSidecar of blobSidecars) { - if (!requestedIndices.includes(blobSidecar.index)) { - throw new DownloadByRootError( - { - code: DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED, - peer: prettyPrintPeerIdStr(peerIdStr), - blockRoot: prettyBytes(blockRoot), - invalidIndex: blobSidecar.index, - }, - "received a blobSidecar that was not requested" - ); - } - - const headerRoot = config - .getForkTypes(blobSidecar.signedBlockHeader.message.slot) - .BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message); - if (!byteArrayEquals(blockRoot, headerRoot)) { - throw new DownloadByRootError( - { - code: DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT, - peer: prettyPrintPeerIdStr(peerIdStr), - requestedBlockRoot: prettyBytes(blockRoot), - receivedBlockRoot: prettyBytes(headerRoot), - }, - `blobSidecar header root did not match requested blockRoot for index=${blobSidecar.index}` - ); - } - - if (!validateBlobSidecarInclusionProof(blobSidecar)) { - throw new DownloadByRootError( - { - code: DownloadByRootErrorCode.INVALID_INCLUSION_PROOF, - peer: prettyPrintPeerIdStr(peerIdStr), - blockRoot: prettyBytes(blockRoot), - sidecarIndex: blobSidecar.index, - }, - `invalid inclusion proof for blobSidecar at index=${blobSidecar.index}` - ); - } - } - - try { - await validateBlobsAndBlobProofs( - blobSidecars.map((b) => b.kzgCommitment), - blobSidecars.map((b) => b.blob), - blobSidecars.map((b) => b.kzgProof) - ); - } catch { - throw new DownloadByRootError({ - code: DownloadByRootErrorCode.INVALID_KZG_PROOF, - peer: prettyPrintPeerIdStr(peerIdStr), - blockRoot: prettyBytes(blockRoot), - }); - } -} - export async function fetchAndValidateColumns({ config, network, @@ -473,43 +404,35 @@ export async function fetchAndValidateColumns({ blockRoot, columnMeta, }: FetchByRootAndValidateColumnsProps): Promise { - let columnSidecars: fulu.DataColumnSidecars | null = []; - try { - columnSidecars = await fetchGetBlobsV2AndBuildSidecars({ - config, - executionEngine, - forkName, - block, - columnMeta, - }); - } catch (err) { + const slot = block.message.slot; + const blobCount = block.message.body.blobKzgCommitments.length; + const blobsV2ColumnSidecars = await fetchGetBlobsV2AndBuildSidecars({ + config, + executionEngine, + forkName, + block, + columnMeta, + }).catch((err) => { network.logger.error( - `error building columnSidecars for blockRoot=${prettyBytes(blockRoot)} via getBlobsV2`, - {}, + "error building columnSidecars via getBlobsV2", + {slot, blockRoot: prettyBytes(blockRoot)}, err as Error ); - } - - if (columnSidecars?.length) { + return null; + }); + if (blobsV2ColumnSidecars?.length) { // limit reconstructed to only the ones we need - const needed = columnSidecars.filter((c) => columnMeta.missing.includes(c.index)); + const needed = blobsV2ColumnSidecars.filter((c) => columnMeta.missing.includes(c.index)); // spec states that reconstructed sidecars need to be published to the network, but only requires // publishing the ones that we custody and have not already been published. const alreadyPublished = network.custodyConfig.custodyColumns.filter( (index) => !columnMeta.missing.includes(index) ); - const needToPublish = columnSidecars.filter( + const needToPublish = blobsV2ColumnSidecars.filter( (c) => network.custodyConfig.custodyColumns.includes(c.index) && !alreadyPublished.includes(c.index) ); - // need to validate the ones we sample and will process - await validateColumnSidecars({ - config, - peerIdStr, - blockRoot, - columnMeta, - needed, - needToPublish, - }); + // need to validate both the ones we sample AND ones we will publish + await validateBlockDataColumnSidecars(slot, blockRoot, blobCount, [...needed, ...needToPublish]); needToPublish.map((column) => network.publishDataColumnSidecar(column).catch((err) => network.logger.error( @@ -525,14 +448,24 @@ export async function fetchAndValidateColumns({ return needed; } - columnSidecars = await network.sendDataColumnSidecarsByRoot(peerIdStr, [{blockRoot, columns: columnMeta.missing}]); - await validateColumnSidecars({ - config, - peerIdStr, - blockRoot, - columnMeta, - needed: columnSidecars, - }); + const columnSidecars = await network.sendDataColumnSidecarsByRoot(peerIdStr, [ + {blockRoot, columns: columnMeta.missing}, + ]); + for (let i = 0; i < columnMeta.missing.length; i++) { + const columnSidecar = columnSidecars[i]; + if (columnSidecar.index !== columnMeta.missing[i]) { + throw new DownloadByRootError( + { + code: DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED, + peer: prettyPrintPeerIdStr(peerIdStr), + blockRoot: prettyBytes(blockRoot), + invalidIndex: columnSidecar.index, + }, + "Received a columnSidecar that was not requested" + ); + } + } + await validateBlockDataColumnSidecars(slot, blockRoot, blobCount, columnSidecars); return columnSidecars; } @@ -568,55 +501,23 @@ export async function fetchColumnsByRoot({ return await network.sendDataColumnSidecarsByRoot(peerIdStr, [{blockRoot, columns: columnMeta.missing}]); } -export type ValidateColumnSidecarProps = Pick< - FetchByRootAndValidateColumnsProps, - "config" | "peerIdStr" | "blockRoot" -> & { - columnSidecar: fulu.DataColumnSidecar; -}; -export function validateColumnSidecar({config, peerIdStr, blockRoot, columnSidecar}: ValidateColumnSidecarProps): void { - const headerRoot = config - .getForkTypes(columnSidecar.signedBlockHeader.message.slot) - .BeaconBlockHeader.hashTreeRoot(columnSidecar.signedBlockHeader.message); - if (!byteArrayEquals(blockRoot, headerRoot)) { - throw new DownloadByRootError( - { - code: DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT, - peer: prettyPrintPeerIdStr(peerIdStr), - requestedBlockRoot: prettyBytes(blockRoot), - receivedBlockRoot: prettyBytes(toRootHex(headerRoot)), - }, - `columnSidecar.signedBlockHeader not match requested blockRoot for index=${columnSidecar.index}` - ); - } - - if (!verifyDataColumnSidecarInclusionProof(columnSidecar)) { - throw new DownloadByRootError({ - code: DownloadByRootErrorCode.INVALID_INCLUSION_PROOF, - peer: prettyPrintPeerIdStr(peerIdStr), - blockRoot: prettyBytes(blockRoot), - sidecarIndex: columnSidecar.index, - }); - } -} - export type ValidateColumnSidecarsProps = Pick< FetchByRootAndValidateColumnsProps, "config" | "peerIdStr" | "blockRoot" | "columnMeta" > & { + slot: number; + blobCount: number; needed?: fulu.DataColumnSidecars; needToPublish?: fulu.DataColumnSidecars; - /* should only be used for testing purposes */ - validateFn?: (props: ValidateColumnSidecarProps) => void; }; export async function validateColumnSidecars({ - config, peerIdStr, + slot, blockRoot, + blobCount, columnMeta, needed = [], needToPublish = [], - validateFn = validateColumnSidecar, }: ValidateColumnSidecarsProps): Promise { const requestedIndices = columnMeta.missing; for (const columnSidecar of needed) { @@ -631,57 +532,8 @@ export async function validateColumnSidecars({ "Received a columnSidecar that was not requested" ); } - - try { - validateFn({ - config, - peerIdStr, - blockRoot, - columnSidecar, - }); - } catch (err) { - (err as Error).message = - `Error validating needed columnSidecar index=${columnSidecar.index}. Validation error: ${(err as Error).message}`; - throw err; - } - } - - const checkedIndices = needed.map((c) => c.index); - const needToCheckProof: fulu.DataColumnSidecars = []; - for (const columnSidecar of needToPublish) { - if (!checkedIndices.includes(columnSidecar.index)) { - try { - validateFn({ - config, - peerIdStr, - blockRoot, - columnSidecar, - }); - } catch (err) { - (err as Error).message = - `Error validating needToPublish columnSidecar index=${columnSidecar.index}. Validation error: ${(err as Error).message}`; - throw err; - } - needToCheckProof.push(columnSidecar); - } - } - - const columnSidecars = [...needed, ...needToCheckProof]; - try { - // TODO(fulu): need to double check that the construction of these arrays is correct - await verifyDataColumnSidecarKzgProofs( - columnSidecars.flatMap((c) => c.kzgCommitments), - columnSidecars.flatMap((c) => Array.from({length: c.column.length}, () => c.index)), - columnSidecars.flatMap((c) => c.column), - columnSidecars.flatMap((c) => c.kzgProofs) - ); - } catch { - throw new DownloadByRootError({ - code: DownloadByRootErrorCode.INVALID_KZG_PROOF, - peer: prettyPrintPeerIdStr(peerIdStr), - blockRoot: prettyBytes(blockRoot), - }); } + await validateBlockDataColumnSidecars(slot, blockRoot, blobCount, [...needed, ...needToPublish]); } export enum DownloadByRootErrorCode { diff --git a/packages/beacon-node/test/spec/presets/fork_choice.test.ts b/packages/beacon-node/test/spec/presets/fork_choice.test.ts index 11912f037654..8aa890bf1cee 100644 --- a/packages/beacon-node/test/spec/presets/fork_choice.test.ts +++ b/packages/beacon-node/test/spec/presets/fork_choice.test.ts @@ -221,7 +221,6 @@ const forkChoiceTest = slot, blockRoot, (signedBlock as SignedBeaconBlock).message.body.blobKzgCommitments.length, - columns.map((c) => c.index), columns ); diff --git a/packages/beacon-node/test/unit/chain/validation/blobSidecar.test.ts b/packages/beacon-node/test/unit/chain/validation/blobSidecar.test.ts new file mode 100644 index 000000000000..9e092cb3d5b7 --- /dev/null +++ b/packages/beacon-node/test/unit/chain/validation/blobSidecar.test.ts @@ -0,0 +1,82 @@ +import {ForkName} from "@lodestar/params"; +import {ssz} from "@lodestar/types"; +import {describe, expect, it} from "vitest"; +import {BlobSidecarValidationError} from "../../../../src/chain/errors/blobSidecarError.js"; +import {validateBlockBlobSidecars} from "../../../../src/chain/validation/blobSidecar.js"; +import {generateBlockWithBlobSidecars} from "../../../utils/blocksAndData.js"; + +describe("validateBlockBlobSidecars", () => { + const {block, blockRoot, blobSidecars} = generateBlockWithBlobSidecars({forkName: ForkName.deneb}); + + it("should validate correct blob sidecars", async () => { + await expect( + validateBlockBlobSidecars( + block.message.slot, + blockRoot, + block.message.body.blobKzgCommitments.length, + blobSidecars + ) + ).resolves.toBeUndefined(); + }); + + it("should error on no blobs in block", async () => { + await expect(validateBlockBlobSidecars(block.message.slot, blockRoot, 0, blobSidecars)).rejects.toThrow( + BlobSidecarValidationError + ); + }); + + it("should error if sidecar block header doesn't match block", async () => { + const invalidSidecar = ssz.deneb.BlobSidecar.clone(blobSidecars[0]); + invalidSidecar.signedBlockHeader.message.slot += 1; // invalid slot (will change the root) + + await expect( + validateBlockBlobSidecars(block.message.slot, blockRoot, block.message.body.blobKzgCommitments.length, [ + invalidSidecar, + ]) + ).rejects.toThrow(BlobSidecarValidationError); + }); + + it("should error on invalid index", async () => { + const invalidSidecar = ssz.deneb.BlobSidecar.clone(blobSidecars[0]); + invalidSidecar.index = block.message.body.blobKzgCommitments.length; // invalid index + + await expect( + validateBlockBlobSidecars(block.message.slot, blockRoot, block.message.body.blobKzgCommitments.length, [ + invalidSidecar, + ]) + ).rejects.toThrow(BlobSidecarValidationError); + }); + + it("should error on invalid kzg commitment", async () => { + const invalidSidecar = ssz.deneb.BlobSidecar.clone(blobSidecars[0]); + invalidSidecar.kzgCommitment = invalidSidecar.kzgCommitment.map((b) => b ^ 1); // invalid commitment + + await expect( + validateBlockBlobSidecars(block.message.slot, blockRoot, block.message.body.blobKzgCommitments.length, [ + invalidSidecar, + ]) + ).rejects.toThrow(BlobSidecarValidationError); + }); + + it("should error on invalid kzg commitment inclusion proof", async () => { + const invalidSidecar = ssz.deneb.BlobSidecar.clone(blobSidecars[0]); + invalidSidecar.kzgCommitmentInclusionProof[0][0] ^= 1; // invalid inclusion proof + + await expect( + validateBlockBlobSidecars(block.message.slot, blockRoot, block.message.body.blobKzgCommitments.length, [ + invalidSidecar, + ]) + ).rejects.toThrow(BlobSidecarValidationError); + }); + + it("should error on invalid kzg proof", async () => { + const invalidSidecar = ssz.deneb.BlobSidecar.clone(blobSidecars[0]); + invalidSidecar.kzgProof = invalidSidecar.kzgProof.map((b) => b ^ 1); // invalid proof + + await expect( + validateBlockBlobSidecars(block.message.slot, blockRoot, block.message.body.blobKzgCommitments.length, [ + invalidSidecar, + ]) + ).rejects.toThrow(BlobSidecarValidationError); + }); +}); diff --git a/packages/beacon-node/test/unit/chain/validation/dataColumnSidecar.test.ts b/packages/beacon-node/test/unit/chain/validation/dataColumnSidecar.test.ts new file mode 100644 index 000000000000..e01d719ffca3 --- /dev/null +++ b/packages/beacon-node/test/unit/chain/validation/dataColumnSidecar.test.ts @@ -0,0 +1,88 @@ +import {ForkName, NUMBER_OF_COLUMNS} from "@lodestar/params"; +import {ssz} from "@lodestar/types"; +import {describe, expect, it} from "vitest"; +import {DataColumnSidecarValidationError} from "../../../../src/chain/errors/dataColumnSidecarError.js"; +import {validateBlockDataColumnSidecars} from "../../../../src/chain/validation/dataColumnSidecar.js"; +import {generateBlockWithColumnSidecars} from "../../../utils/blocksAndData.js"; + +describe("validateBlockDataColumnSidecars", () => { + const {block, blockRoot, columnSidecars} = generateBlockWithColumnSidecars({forkName: ForkName.fulu}); + + it("should validate correct column sidecars", async () => { + await expect( + validateBlockDataColumnSidecars( + block.message.slot, + blockRoot, + block.message.body.blobKzgCommitments.length, + columnSidecars + ) + ).resolves.toBeUndefined(); + }); + + it("should validate empty sidecars array", async () => { + await expect( + validateBlockDataColumnSidecars(block.message.slot, blockRoot, block.message.body.blobKzgCommitments.length, []) + ).resolves.toBeUndefined(); + }); + + it("should error on no blobs in block", async () => { + await expect(validateBlockDataColumnSidecars(block.message.slot, blockRoot, 0, columnSidecars)).rejects.toThrow( + DataColumnSidecarValidationError + ); + }); + + it("should error if sidecar block header doesn't match block", async () => { + const invalidSidecar = ssz.fulu.DataColumnSidecar.clone(columnSidecars[0]); + invalidSidecar.signedBlockHeader.message.slot += 1; // invalid slot (will change the root) + + await expect( + validateBlockDataColumnSidecars(block.message.slot, blockRoot, block.message.body.blobKzgCommitments.length, [ + invalidSidecar, + ]) + ).rejects.toThrow(DataColumnSidecarValidationError); + }); + + it("should error on invalid column index", async () => { + const invalidSidecar = ssz.fulu.DataColumnSidecar.clone(columnSidecars[0]); + invalidSidecar.index = NUMBER_OF_COLUMNS; // invalid index + + await expect( + validateBlockDataColumnSidecars(block.message.slot, blockRoot, block.message.body.blobKzgCommitments.length, [ + invalidSidecar, + ]) + ).rejects.toThrow(DataColumnSidecarValidationError); + }); + + it("should error on invalid kzg commitments", async () => { + const invalidSidecar = ssz.fulu.DataColumnSidecar.clone(columnSidecars[0]); + invalidSidecar.kzgCommitments = columnSidecars[0].kzgCommitments.map((commitment) => commitment.map((b) => b ^ 1)); // invalid commitments + + await expect( + validateBlockDataColumnSidecars(block.message.slot, blockRoot, block.message.body.blobKzgCommitments.length, [ + invalidSidecar, + ]) + ).rejects.toThrow(DataColumnSidecarValidationError); + }); + + it("should error on invalid kzg commitments inclusion proofs", async () => { + const invalidSidecar = ssz.fulu.DataColumnSidecar.clone(columnSidecars[0]); + invalidSidecar.kzgCommitmentsInclusionProof[0][0] ^= 1; // invalid inclusion proof + + await expect( + validateBlockDataColumnSidecars(block.message.slot, blockRoot, block.message.body.blobKzgCommitments.length, [ + invalidSidecar, + ]) + ).rejects.toThrow(DataColumnSidecarValidationError); + }); + + it("should error on invalid kzg proof", async () => { + const invalidSidecar = ssz.fulu.DataColumnSidecar.clone(columnSidecars[0]); + invalidSidecar.kzgProofs = columnSidecars[0].kzgProofs.map((proof) => proof.map((b) => b ^ 1)); // invalid proofs + + await expect( + validateBlockDataColumnSidecars(block.message.slot, blockRoot, block.message.body.blobKzgCommitments.length, [ + invalidSidecar, + ]) + ).rejects.toThrow(DataColumnSidecarValidationError); + }); +}); diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts index a51494fa1664..c6e454bc39b1 100644 --- a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts @@ -1,17 +1,18 @@ import {randomBytes} from "node:crypto"; -import {BYTES_PER_BLOB, BYTES_PER_CELL, BYTES_PER_COMMITMENT, BYTES_PER_PROOF} from "@crate-crypto/node-eth-kzg"; +import {BYTES_PER_BLOB, BYTES_PER_COMMITMENT, BYTES_PER_PROOF} from "@crate-crypto/node-eth-kzg"; import {ForkName, NUMBER_OF_COLUMNS} from "@lodestar/params"; import {deneb, fulu, ssz} from "@lodestar/types"; import {BlobAndProof} from "@lodestar/types/lib/deneb/types.js"; import {prettyBytes} from "@lodestar/utils"; import {afterAll, afterEach, beforeAll, beforeEach, describe, expect, it, vi} from "vitest"; import {BlobMeta, MissingColumnMeta} from "../../../../src/chain/blocks/blockInput/types.js"; +import {validateBlockBlobSidecars} from "../../../../src/chain/validation/blobSidecar.js"; +import {validateBlockDataColumnSidecars} from "../../../../src/chain/validation/dataColumnSidecar.js"; import {IExecutionEngine} from "../../../../src/execution/index.js"; import {INetwork, prettyPrintPeerIdStr} from "../../../../src/network/index.js"; import { DownloadByRootError, DownloadByRootErrorCode, - ValidateColumnSidecarsProps, fetchAndValidateBlobs, fetchAndValidateBlock, fetchAndValidateColumns, @@ -19,9 +20,6 @@ import { fetchColumnsByRoot, fetchGetBlobsV1AndBuildSidecars, fetchGetBlobsV2AndBuildSidecars, - validateBlobs, - validateColumnSidecar, - validateColumnSidecars, } from "../../../../src/sync/utils/downloadByRoot.js"; import {kzgCommitmentToVersionedHash} from "../../../../src/util/blobs.js"; import {CustodyConfig} from "../../../../src/util/dataColumns.js"; @@ -405,13 +403,12 @@ describe("downloadByRoot.ts", () => { } await expect( - validateBlobs({ - config, - peerIdStr, - blockRoot: denebBlockWithBlobs.blockRoot, - blobSidecars: response, - blobMeta, - }) + validateBlockBlobSidecars( + denebBlockWithBlobs.block.message.slot, + denebBlockWithBlobs.blockRoot, + denebBlockWithBlobs.block.message.body.blobKzgCommitments.length, + response + ) ).resolves.toBeUndefined(); }); @@ -488,115 +485,6 @@ describe("downloadByRoot.ts", () => { }); }); - describe("validateBlobs", () => { - let denebBlockWithBlobs: ReturnType; - let blockRoot: Uint8Array; - let blobMeta: BlobMeta[]; - let blobSidecars: deneb.BlobSidecars; - - beforeAll(() => { - denebBlockWithBlobs = generateBlockWithBlobSidecars({forkName: ForkName.deneb}); - blockRoot = denebBlockWithBlobs.blockRoot; - blobSidecars = denebBlockWithBlobs.blobSidecars; - blobMeta = blobSidecars.map((b) => ({index: b.index}) as BlobMeta); - }); - - it("should successfully validate all blobSidecars", async () => { - await expect( - validateBlobs({ - config, - peerIdStr, - blockRoot, - blobMeta, - blobSidecars, - }) - ).resolves.toBeUndefined(); - }); - - it("should throw error for extra un-requested blobSidecar", async () => { - try { - await validateBlobs({ - config, - peerIdStr, - blockRoot, - blobMeta: blobMeta.slice(0, -1), - blobSidecars, - }); - } catch (err) { - expect(err).toBeInstanceOf(DownloadByRootError); - expect((err as any).type.code).toBe(DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED); - expect((err as any).type.peer).toBe(prettyPeerIdStr); - expect((err as any).type.blockRoot).toBe(prettyBytes(blockRoot)); - expect((err as any).type.invalidIndex).toBe(blobMeta.at(-1)?.index); - expect((err as any).message).toBe("received a blobSidecar that was not requested"); - } - }); - - it("should throw error for mismatched block root in blob header", async () => { - const requestedBlockRoot = new Uint8Array(ROOT_SIZE).fill(0xac); - try { - await validateBlobs({ - config, - peerIdStr, - blockRoot: requestedBlockRoot, - blobMeta, - blobSidecars, - }); - } catch (err) { - expect(err).toBeInstanceOf(DownloadByRootError); - expect((err as any).type.code).toBe(DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT); - expect((err as any).type.peer).toBe(prettyPeerIdStr); - expect((err as any).type.requestedBlockRoot).toBe(prettyBytes(requestedBlockRoot)); - expect((err as any).type.receivedBlockRoot).toBe(prettyBytes(denebBlockWithBlobs.blockRoot)); - expect((err as any).message).toEqual("blobSidecar header root did not match requested blockRoot for index=0"); - } - }); - - it("should throw error for invalid inclusion proof", async () => { - const invalidBlobSidecar = ssz.deneb.BlobSidecar.clone(denebBlockWithBlobs.blobSidecars[0]); - // Corrupt the inclusion proof to make it invalid - invalidBlobSidecar.kzgCommitmentInclusionProof[0] = new Uint8Array(32).fill(255); - - try { - await validateBlobs({ - config, - peerIdStr, - blockRoot, - blobMeta: [blobMeta[0]], - blobSidecars: [invalidBlobSidecar], - }); - } catch (err) { - expect(err).toBeInstanceOf(DownloadByRootError); - expect((err as any).type.code).toBe(DownloadByRootErrorCode.INVALID_INCLUSION_PROOF); - expect((err as any).type.peer).toBe(prettyPeerIdStr); - expect((err as any).type.blockRoot).toBe(prettyBytes(blockRoot)); - expect((err as any).type.sidecarIndex).toBe(invalidBlobSidecar.index); - expect((err as any).message).toEqual("invalid inclusion proof for blobSidecar at index=0"); - } - }); - - it("should throw error for invalid KZG proof", async () => { - const invalidBlobSidecar = ssz.deneb.BlobSidecar.clone(denebBlockWithBlobs.blobSidecars[0]); - // Corrupt a single proof in the batch and make sure all trip as invalid - invalidBlobSidecar.kzgProof = new Uint8Array(48).fill(255); - - try { - await validateBlobs({ - config, - peerIdStr, - blockRoot, - blobMeta, - blobSidecars: [invalidBlobSidecar, ...blobSidecars.slice(1)], - }); - } catch (err) { - expect(err).toBeInstanceOf(DownloadByRootError); - expect((err as any).type.code).toBe(DownloadByRootErrorCode.INVALID_KZG_PROOF); - expect((err as any).type.peer).toBe(prettyPeerIdStr); - expect((err as any).type.blockRoot).toBe(prettyBytes(blockRoot)); - } - }); - }); - describe("fetchAndValidateColumns", () => { const forkName = ForkName.fulu; let fuluBlockWithColumns: ReturnType; @@ -1091,7 +979,12 @@ describe("downloadByRoot.ts", () => { expect(columnSidecar.signedBlockHeader.message.stateRoot).toEqual(fuluBlockWithColumns.block.message.stateRoot); expect( - validateColumnSidecar({config, peerIdStr, blockRoot: fuluBlockWithColumns.blockRoot, columnSidecar}) + validateBlockDataColumnSidecars( + columnSidecar.signedBlockHeader.message.slot, + fuluBlockWithColumns.blockRoot, + fuluBlockWithColumns.block.message.body.blobKzgCommitments.length, + [columnSidecar] + ) ).toBeUndefined(); } }); @@ -1126,282 +1019,6 @@ describe("downloadByRoot.ts", () => { }); }); - describe("validateColumnSidecar", () => { - let fuluBlockWithColumns: ReturnType; - - beforeAll(() => { - fuluBlockWithColumns = generateBlockWithColumnSidecars({forkName: ForkName.fulu}); - }); - - it("should successfully validate column sidecar", () => { - const columnSidecar = fuluBlockWithColumns.columnSidecars[0]; - const testBlockRoot = fuluBlockWithColumns.blockRoot; - - // This should not throw - expect(() => { - validateColumnSidecar({ - config, - peerIdStr, - blockRoot: testBlockRoot, - columnSidecar, - }); - }).not.toThrow(); - }); - - it("should throw error for mismatched block root in column header", () => { - const columnSidecar = fuluBlockWithColumns.columnSidecars[0]; - const wrongBlockRoot = new Uint8Array(32).fill(1); - try { - validateColumnSidecar({ - config, - peerIdStr, - blockRoot: wrongBlockRoot, - columnSidecar, - }); - } catch (error) { - expect(error).toBeInstanceOf(DownloadByRootError); - expect((error as any).type.code).toBe(DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT); - expect((error as any).type.peer).toBe(prettyPeerIdStr); - expect((error as any).type.requestedBlockRoot).toBe(prettyBytes(wrongBlockRoot)); - } - }); - - it("should throw error for invalid inclusion proof", () => { - const columnSidecar = ssz.fulu.DataColumnSidecar.clone(fuluBlockWithColumns.columnSidecars[0]); - // Corrupt the inclusion proof to make it invalid - columnSidecar.kzgCommitmentsInclusionProof[0] = new Uint8Array(32).fill(255); - try { - validateColumnSidecar({ - config, - peerIdStr, - blockRoot: fuluBlockWithColumns.blockRoot, - columnSidecar, - }); - } catch (err) { - expect(err).toBeInstanceOf(DownloadByRootError); - expect((err as any).type.code).toBe(DownloadByRootErrorCode.INVALID_INCLUSION_PROOF); - expect((err as any).type.peer).toBe(prettyPeerIdStr); - expect((err as any).type.blockRoot).toBe(prettyBytes(fuluBlockWithColumns.blockRoot)); - expect((err as any).type.sidecarIndex).toBe(columnSidecar.index); - } - }); - }); - - describe("validateColumnSidecars", () => { - let fuluBlockWithColumns: ReturnType; - let blockRoot: Uint8Array; - let columnMeta: MissingColumnMeta; - - beforeAll(() => { - fuluBlockWithColumns = generateBlockWithColumnSidecars({forkName: ForkName.fulu}); - blockRoot = fuluBlockWithColumns.blockRoot; - columnMeta = { - missing: fuluBlockWithColumns.columnSidecars.map((c) => c.index), - versionedHashes: [], - }; - }); - - it("should successfully validate all needed column sidecars", async () => { - await expect( - validateColumnSidecars({ - config, - peerIdStr, - blockRoot, - columnMeta, - needed: fuluBlockWithColumns.columnSidecars, - }) - ).resolves.toBeUndefined(); - }); - - it("should successfully validate needToPublish columns", async () => { - await expect( - validateColumnSidecars({ - config, - peerIdStr, - blockRoot, - columnMeta, - needToPublish: fuluBlockWithColumns.columnSidecars, - }) - ).resolves.toBeUndefined(); - }); - - it("should throw error for extra un-requested column sidecar", async () => { - const testProps = { - config, - peerIdStr, - blockRoot, - columnMeta: { - ...columnMeta, - missing: Array.from({length: 18}, (_, i) => i), - }, - needed: fuluBlockWithColumns.columnSidecars, - }; - await expect(validateColumnSidecars(testProps)).rejects.toThrow(); - - try { - await validateColumnSidecars(testProps); - } catch (err) { - expect(err).toBeInstanceOf(DownloadByRootError); - expect((err as any).type.code).toBe(DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED); - expect((err as any).type.peer).toBe(prettyPeerIdStr); - expect((err as any).type.blockRoot).toBe(prettyBytes(blockRoot)); - expect((err as any).type.invalidIndex).toBe(18); - expect((err as any).message).toBe("Received a columnSidecar that was not requested"); - } - }); - - it("should invalidate individual needed column sidecar correctly", async () => { - // Create an invalid column with bad inclusion proof to trigger the final validation error - const invalidColumn = ssz.fulu.DataColumnSidecar.clone(fuluBlockWithColumns.columnSidecars[127]); - invalidColumn.kzgCommitmentsInclusionProof[0] = new Uint8Array(32).fill(255); - - const invalidTestProps = { - config, - peerIdStr, - blockRoot, - columnMeta, - needed: [...fuluBlockWithColumns.columnSidecars.slice(0, -1), invalidColumn], - }; - - try { - await validateColumnSidecars(invalidTestProps); - } catch (err) { - expect(err).toBeInstanceOf(DownloadByRootError); - expect((err as any).type.code).toBe(DownloadByRootErrorCode.INVALID_INCLUSION_PROOF); - expect((err as any).type.peer).toBe(prettyPeerIdStr); - expect((err as any).type.blockRoot).toBe(prettyBytes(fuluBlockWithColumns.blockRoot)); - expect((err as any).type.sidecarIndex).toBe(127); - expect((err as any).message).toBe( - "Error validating needed columnSidecar index=127. Validation error: DOWNLOAD_BY_ROOT_ERROR_INVALID_INCLUSION_PROOF" - ); - } - }); - - it("should invalidate individual needToPublish column sidecar correctly", async () => { - // Create an invalid column with bad inclusion proof to trigger the final validation error - const invalidColumn = ssz.fulu.DataColumnSidecar.clone(fuluBlockWithColumns.columnSidecars[127]); - invalidColumn.kzgCommitmentsInclusionProof[0] = new Uint8Array(32).fill(255); - - const invalidTestProps = { - config, - peerIdStr, - blockRoot, - columnMeta, - needToPublish: [...fuluBlockWithColumns.columnSidecars.slice(0, -1), invalidColumn], - }; - - try { - await validateColumnSidecars(invalidTestProps); - } catch (err) { - expect(err).toBeInstanceOf(DownloadByRootError); - expect((err as any).type.code).toBe(DownloadByRootErrorCode.INVALID_INCLUSION_PROOF); - expect((err as any).type.peer).toBe(prettyPeerIdStr); - expect((err as any).type.blockRoot).toBe(prettyBytes(fuluBlockWithColumns.blockRoot)); - expect((err as any).type.sidecarIndex).toBe(127); - expect((err as any).message).toBe( - "Error validating needToPublish columnSidecar index=127. Validation error: DOWNLOAD_BY_ROOT_ERROR_INVALID_INCLUSION_PROOF" - ); - } - }); - - it("should avoid duplicate validation for columns in both arrays", async () => { - // Use valid columns to simplify the test setup - const sharedColumns = fuluBlockWithColumns.columnSidecars.slice(0, 2); - const uniqueNeededColumns = fuluBlockWithColumns.columnSidecars.slice(2, 4); - const uniquePublishColumns = fuluBlockWithColumns.columnSidecars.slice(4, 6); - const validateFn = vi.fn(); - - const testProps: ValidateColumnSidecarsProps = { - config, - peerIdStr, - blockRoot, - columnMeta: { - missing: [...sharedColumns, ...uniqueNeededColumns, ...uniquePublishColumns].map((c) => c.index), - versionedHashes: columnMeta.versionedHashes, - }, - needed: [...sharedColumns, ...uniqueNeededColumns], // 4 columns total (2 shared + 2 unique) - needToPublish: [...sharedColumns, ...uniquePublishColumns], // 4 columns total (2 shared + 2 unique to publish) - validateFn, - }; - - await expect(validateColumnSidecars(testProps)).resolves.toBeUndefined(); - const validateCommonProps = { - config, - peerIdStr, - blockRoot, - }; - expect(validateFn).toHaveBeenCalledTimes(6); - expect(validateFn).toHaveBeenNthCalledWith(1, { - ...validateCommonProps, - columnSidecar: sharedColumns[0], - }); - expect(validateFn).toHaveBeenNthCalledWith(2, { - ...validateCommonProps, - columnSidecar: sharedColumns[1], - }); - expect(validateFn).toHaveBeenNthCalledWith(3, { - ...validateCommonProps, - columnSidecar: uniqueNeededColumns[0], - }); - expect(validateFn).toHaveBeenNthCalledWith(4, { - ...validateCommonProps, - columnSidecar: uniqueNeededColumns[1], - }); - expect(validateFn).toHaveBeenNthCalledWith(5, { - ...validateCommonProps, - columnSidecar: uniquePublishColumns[0], - }); - expect(validateFn).toHaveBeenNthCalledWith(6, { - ...validateCommonProps, - columnSidecar: uniquePublishColumns[1], - }); - }); - - it("should throw error for invalid KZG proofs", async () => { - let invalidColumn = ssz.fulu.DataColumnSidecar.clone(fuluBlockWithColumns.columnSidecars[0]); - // Corrupt one of the KZG proofs to make it invalid - invalidColumn.kzgProofs[0] = new Uint8Array(BYTES_PER_PROOF).fill(255); - - let testProps = { - config, - peerIdStr, - blockRoot, - columnMeta, - needed: [invalidColumn, ...fuluBlockWithColumns.columnSidecars.slice(1)], - }; - - try { - await validateColumnSidecars(testProps); - } catch (err) { - expect(err).toBeInstanceOf(DownloadByRootError); - expect((err as any).type.code).toBe(DownloadByRootErrorCode.INVALID_KZG_PROOF); - expect((err as any).type.peer).toBe(prettyPeerIdStr); - expect((err as any).type.blockRoot).toBe(prettyBytes(blockRoot)); - } - - invalidColumn = ssz.fulu.DataColumnSidecar.clone(fuluBlockWithColumns.columnSidecars[0]); - // Corrupt one of the cells to make it invalid - invalidColumn.column[0] = new Uint8Array(BYTES_PER_CELL).fill(255); - - testProps = { - config, - peerIdStr, - blockRoot, - columnMeta, - needed: [invalidColumn, ...fuluBlockWithColumns.columnSidecars.slice(1)], - }; - - try { - await validateColumnSidecars(testProps); - } catch (err) { - expect(err).toBeInstanceOf(DownloadByRootError); - expect((err as any).type.code).toBe(DownloadByRootErrorCode.INVALID_KZG_PROOF); - expect((err as any).type.peer).toBe(prettyPeerIdStr); - expect((err as any).type.blockRoot).toBe(prettyBytes(blockRoot)); - } - }); - }); - describe("DownloadByRootError", () => { const blockRoot = randomBytes(ROOT_SIZE); diff --git a/packages/beacon-node/test/unit/util/dataColumn.test.ts b/packages/beacon-node/test/unit/util/dataColumn.test.ts index 216a124d2eb0..6f29df80b723 100644 --- a/packages/beacon-node/test/unit/util/dataColumn.test.ts +++ b/packages/beacon-node/test/unit/util/dataColumn.test.ts @@ -5,7 +5,7 @@ import {ssz} from "@lodestar/types"; import {bigIntToBytes, fromHex} from "@lodestar/utils"; import {afterEach, beforeEach, describe, expect, it} from "vitest"; -import {validateDataColumnsSidecars} from "../../../src/chain/validation/dataColumnSidecar.js"; +import {validateBlockDataColumnSidecars} from "../../../src/chain/validation/dataColumnSidecar.js"; import { CustodyConfig, getDataColumnSidecarsFromBlock, @@ -175,7 +175,7 @@ describe("data column sidecars", () => { expect(columnSidecars[0].column.length).toEqual(blobs.length); await expect( - validateDataColumnsSidecars(slot, blockRoot, kzgCommitments, columnSidecars, null) + validateBlockDataColumnSidecars(slot, blockRoot, kzgCommitments.length, columnSidecars) ).resolves.toBeUndefined(); }); @@ -211,8 +211,8 @@ describe("data column sidecars", () => { expect(columnSidecars.length).toEqual(NUMBER_OF_COLUMNS); expect(columnSidecars[0].column.length).toEqual(blobs.length); - await expect(validateDataColumnsSidecars(slot, blockRoot, [], columnSidecars, null)).rejects.toThrow( - `Invalid data column sidecar slot=${slot}` + await expect(validateBlockDataColumnSidecars(slot, blockRoot, 0, columnSidecars)).rejects.toThrow( + "Block has no blob commitments but data column sidecars were provided" ); }); }); From 8a9c8dfa3e422cbf8ecabdd7d492541b93019aac Mon Sep 17 00:00:00 2001 From: Cayman Date: Tue, 2 Sep 2025 16:15:22 -0400 Subject: [PATCH 098/173] chore: fix an e2e test --- .../test/e2e/sync/unknownBlockSync.test.ts | 33 ++++++++++++------- 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/packages/beacon-node/test/e2e/sync/unknownBlockSync.test.ts b/packages/beacon-node/test/e2e/sync/unknownBlockSync.test.ts index 4dff2c581878..5f812f9d5d07 100644 --- a/packages/beacon-node/test/e2e/sync/unknownBlockSync.test.ts +++ b/packages/beacon-node/test/e2e/sync/unknownBlockSync.test.ts @@ -2,13 +2,13 @@ import {fromHexString} from "@chainsafe/ssz"; import {routes} from "@lodestar/api"; import {EventData, EventType} from "@lodestar/api/lib/beacon/routes/events.js"; import {ChainConfig} from "@lodestar/config"; -import {config} from "@lodestar/config/default"; import {TimestampFormatCode} from "@lodestar/logger"; import {SLOTS_PER_EPOCH} from "@lodestar/params"; import {afterEach, describe, it, vi} from "vitest"; -import {BlockSource, getBlockInput} from "../../../src/chain/blocks/types.js"; +import {BlockInputPreData} from "../../../src/chain/blocks/blockInput/blockInput.js"; +import {BlockInputSource} from "../../../src/chain/blocks/blockInput/types.js"; +import {ChainEvent} from "../../../src/chain/emitter.js"; import {BlockError, BlockErrorCode} from "../../../src/chain/errors/index.js"; -import {NetworkEvent} from "../../../src/network/index.js"; import {INTEROP_BLOCK_HASH} from "../../../src/node/utils/interop/state.js"; import {waitForEvent} from "../../utils/events/resolver.js"; import {LogLevel, TestLoggerOpts, testLogger} from "../../utils/logger.js"; @@ -47,14 +47,14 @@ describe("sync / unknown block sync for fulu", () => { } }); - const testCases: {id: string; event: NetworkEvent}[] = [ + const testCases: {id: string; event: ChainEvent}[] = [ { id: "should do an unknown block parent sync from another BN", - event: NetworkEvent.unknownBlockParent, + event: ChainEvent.unknownParent, }, { id: "should do an unknown block sync from another BN", - event: NetworkEvent.unknownBlock, + event: ChainEvent.unknownBlockRoot, }, // TODO: new event postfulu for unknownBlockInput ]; @@ -144,27 +144,36 @@ describe("sync / unknown block sync for fulu", () => { await connected; loggerNodeA.info("Node A connected to Node B"); - const headInput = getBlockInput.preData(config, head, BlockSource.gossip); + const headInput = BlockInputPreData.createFromBlock({ + block: head, + blockRootHex: headSummary.blockRoot, + source: BlockInputSource.gossip, + seenTimestampSec: Math.floor(Date.now() / 1000), + forkName: bn.chain.config.getForkName(head.message.slot), + daOutOfRange: false, + }); switch (event) { - case NetworkEvent.unknownBlockParent: + case ChainEvent.unknownParent: await bn2.chain.processBlock(headInput).catch((e) => { - loggerNodeB.info("Error processing block", {slot: headInput.block.message.slot, code: e.type.code}); + loggerNodeB.info("Error processing block", {slot: headInput.slot, code: e.type.code}); if (e instanceof BlockError && e.type.code === BlockErrorCode.PARENT_UNKNOWN) { // Expected - bn2.network.events.emit(NetworkEvent.unknownBlockParent, { + bn2.chain.emitter.emit(ChainEvent.unknownParent, { blockInput: headInput, peer: bn2.network.peerId.toString(), + source: BlockInputSource.gossip, }); } else { throw e; } }); break; - case NetworkEvent.unknownBlock: - bn2.network.events.emit(NetworkEvent.unknownBlock, { + case ChainEvent.unknownBlockRoot: + bn2.chain.emitter.emit(ChainEvent.unknownBlockRoot, { rootHex: headSummary.blockRoot, peer: bn2.network.peerId.toString(), + source: BlockInputSource.gossip, }); break; default: From d3000a804cc57079985a553f0e86bd77725a675f Mon Sep 17 00:00:00 2001 From: Cayman Date: Tue, 2 Sep 2025 16:57:02 -0400 Subject: [PATCH 099/173] chore: fix unit test --- .../sync/range/utils/peerBalancer.test.ts | 44 ++++++++++++------- 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/packages/beacon-node/test/unit/sync/range/utils/peerBalancer.test.ts b/packages/beacon-node/test/unit/sync/range/utils/peerBalancer.test.ts index 021391d60905..8c0301e1cbe6 100644 --- a/packages/beacon-node/test/unit/sync/range/utils/peerBalancer.test.ts +++ b/packages/beacon-node/test/unit/sync/range/utils/peerBalancer.test.ts @@ -2,8 +2,10 @@ import {createChainForkConfig} from "@lodestar/config"; import {chainConfig} from "@lodestar/config/default"; import {ZERO_HASH} from "@lodestar/params"; import {computeStartSlotAtEpoch} from "@lodestar/state-transition"; +import {ssz} from "@lodestar/types"; import {describe, expect, it} from "vitest"; -import {BlockInput} from "../../../../../src/chain/blocks/types.js"; +import {BlockInputColumns} from "../../../../../src/chain/blocks/blockInput/blockInput.js"; +import {BlockInputSource} from "../../../../../src/chain/blocks/blockInput/types.js"; import {Batch} from "../../../../../src/sync/range/batch.js"; import {ChainTarget} from "../../../../../src/sync/range/chain.js"; import {ChainPeersBalancer, PeerSyncInfo} from "../../../../../src/sync/range/utils/peerBalancer.js"; @@ -11,7 +13,6 @@ import {RangeSyncType} from "../../../../../src/sync/utils/remoteSyncType.js"; import {CustodyConfig} from "../../../../../src/util/dataColumns.js"; import {PeerIdStr} from "../../../../../src/util/peerId.js"; import {getRandPeerSyncMeta} from "../../../../utils/peer.js"; -import {generateSignedBlockAtSlot} from "../../../../utils/typeGenerator.js"; describe("sync / range / peerBalancer", () => { const custodyConfig = {sampledColumns: [0, 1, 2, 3]} as CustodyConfig; @@ -143,12 +144,12 @@ describe("sync / range / peerBalancer", () => { ? createChainForkConfig({...chainConfig, FULU_FORK_EPOCH: 0}) : createChainForkConfig(chainConfig); - const batch0 = new Batch(1, config); - const batch1 = new Batch(2, config); + const batch0 = new Batch(1, config, custodyConfig); + const batch1 = new Batch(2, config, custodyConfig); // Batch zero has a failedDownloadAttempt with peer1 batch0.startDownloading(peer1.peerId); - batch0.downloadingError(); + batch0.downloadingError(peer1.peerId); // peer2 is busy downloading batch1 batch1.startDownloading(peer2.peerId); @@ -166,13 +167,26 @@ describe("sync / range / peerBalancer", () => { it("should not retry the batch with a not as up-to-date peer", async () => { const config = createChainForkConfig({...chainConfig, FULU_FORK_EPOCH: 0}); - const batch0 = new Batch(1, config); + const batch0 = new Batch(1, config, custodyConfig); + const blocksRequest = batch0.requests.blocksRequest as {startSlot: number; count: number}; // Batch zero has a failedDownloadAttempt with peer1 batch0.startDownloading(peer1.peerId); - const block: BlockInput = { - block: generateSignedBlockAtSlot(batch0.request.startSlot + batch0.request.count - 1), - } as BlockInput; - batch0.downloadingSuccess({blocks: [block], pendingDataColumns: [1, 2, 3]}); + const block = ssz.fulu.SignedBeaconBlock.defaultValue(); + block.message.slot = blocksRequest.startSlot + blocksRequest.count - 1; + block.message.body.blobKzgCommitments = [ssz.fulu.KZGCommitment.defaultValue()]; + const blockInput = BlockInputColumns.createFromBlock({ + block, + blockRootHex: "0x00", + source: BlockInputSource.gossip, + seenTimestampSec: Math.floor(Date.now() / 1000), + forkName: config.getForkName(block.message.slot), + daOutOfRange: false, + custodyColumns: [0, 1, 2, 3], + sampledColumns: [0, 1, 2, 3], + }); + console.log(blockInput.hasAllData()); + const x = batch0.downloadingSuccess(peer1.peerId, [blockInput]); + console.log("x", x); // peer2 and peer3 are the same but peer3 has a lower target slot than the previous download const peerInfos: PeerSyncInfo[] = [ @@ -180,14 +194,14 @@ describe("sync / range / peerBalancer", () => { peerId: peer2.peerId, client: peer2.client, custodyGroups: [0, 1, 2, 3], - target: {slot: batch0.request.startSlot + batch0.request.count - 1, root: ZERO_HASH}, + target: {slot: blocksRequest.startSlot + blocksRequest.count - 1, root: ZERO_HASH}, earliestAvailableSlot: 0, }, { peerId: peer3.peerId, client: peer3.client, custodyGroups: [0, 1, 2, 3], - target: {slot: batch0.request.startSlot + batch0.request.count - 2, root: ZERO_HASH}, + target: {slot: blocksRequest.startSlot + blocksRequest.count - 2, root: ZERO_HASH}, earliestAvailableSlot: 0, }, ]; @@ -290,13 +304,13 @@ describe("sync / range / peerBalancer", () => { ? createChainForkConfig({...chainConfig, FULU_FORK_EPOCH: 0}) : createChainForkConfig(chainConfig); - const batch0 = new Batch(1, config); - const batch1 = new Batch(2, config); + const batch0 = new Batch(1, config, custodyConfig); + const batch1 = new Batch(2, config, custodyConfig); // peer1 and peer2 are busy downloading batch0.startDownloading(peer1.peerId); batch1.startDownloading(peer2.peerId); - const newBatch = new Batch(3, config); + const newBatch = new Batch(3, config, custodyConfig); const peerBalancer = new ChainPeersBalancer(peerInfos, [batch0, batch1], custodyConfig, RangeSyncType.Head); const idlePeer = peerBalancer.idlePeerForBatch(newBatch); expect(idlePeer?.peerId).toBe(expected); From 10977365192a790d631008f230c87f7fd5bb1f6d Mon Sep 17 00:00:00 2001 From: Cayman Date: Tue, 2 Sep 2025 17:08:01 -0400 Subject: [PATCH 100/173] chore: fix another unit test --- .../test/unit/sync/range/chain.test.ts | 68 +++++++++---------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/packages/beacon-node/test/unit/sync/range/chain.test.ts b/packages/beacon-node/test/unit/sync/range/chain.test.ts index 7e4b03b11acd..e0a144e0afc6 100644 --- a/packages/beacon-node/test/unit/sync/range/chain.test.ts +++ b/packages/beacon-node/test/unit/sync/range/chain.test.ts @@ -4,7 +4,8 @@ import {computeStartSlotAtEpoch} from "@lodestar/state-transition"; import {Epoch, Slot, phase0, ssz} from "@lodestar/types"; import {Logger, fromHex} from "@lodestar/utils"; import {afterEach, describe, it} from "vitest"; -import {BlockInput, BlockSource, getBlockInput} from "../../../../src/chain/blocks/types.js"; +import {BlockInputPreData} from "../../../../src/chain/blocks/blockInput/blockInput.js"; +import {BlockInputSource, IBlockInput} from "../../../../src/chain/blocks/blockInput/types.js"; import {ZERO_HASH} from "../../../../src/constants/index.js"; import {ChainTarget, SyncChain, SyncChainFns} from "../../../../src/sync/range/chain.js"; import {RangeSyncType} from "../../../../src/sync/utils/remoteSyncType.js"; @@ -83,19 +84,16 @@ describe("sync / range / chain", () => { for (const {id, startEpoch, targetEpoch, badBlocks, skippedSlots} of testCases) { it(id, async () => { const processChainSegment: SyncChainFns["processChainSegment"] = async (blocks) => { - for (const {block} of blocks) { + for (const blockInput of blocks) { + const block = blockInput.getBlock(); if (block.signature === ACCEPT_BLOCK) continue; if (block.signature === REJECT_BLOCK) throw Error("REJECT_BLOCK"); } }; - const downloadBeaconBlocksByRange: SyncChainFns["downloadBeaconBlocksByRange"] = async ( - _peer, - request, - _partialDownload - ) => { - const blocks: BlockInput[] = []; - for (let i = request.startSlot; i < request.startSlot + request.count; i += request.step) { + const downloadByRange: SyncChainFns["downloadByRange"] = async (_peer, request, _partialDownload) => { + const blocks: IBlockInput[] = []; + for (let i = request.startSlot; i < request.startSlot + request.count; i += 1) { if (skippedSlots?.has(i)) { continue; // Skip } @@ -104,17 +102,20 @@ describe("sync / range / chain", () => { const shouldReject = badBlocks?.has(i); if (shouldReject) badBlocks?.delete(i); blocks.push( - getBlockInput.preData( - config, - { + BlockInputPreData.createFromBlock({ + block: { message: generateEmptyBlock(i), signature: shouldReject ? REJECT_BLOCK : ACCEPT_BLOCK, }, - BlockSource.byRange - ) + blockRootHex: "0x00", + forkName: config.getForkName(i), + daOutOfRange: false, + source: BlockInputSource.byRange, + seenTimestampSec: Math.floor(Date.now() / 1000), + }) ); } - return {blocks, pendingDataColumns: null}; + return blocks; }; const target: ChainTarget = {slot: computeStartSlotAtEpoch(targetEpoch), root: ZERO_HASH}; @@ -128,7 +129,7 @@ describe("sync / range / chain", () => { syncType, logSyncChainFns(logger, { processChainSegment, - downloadBeaconBlocksByRange, + downloadByRange, getConnectedPeerSyncMeta, reportPeer, onEnd, @@ -150,25 +151,24 @@ describe("sync / range / chain", () => { const peers = [peer]; const processChainSegment: SyncChainFns["processChainSegment"] = async () => {}; - const downloadBeaconBlocksByRange: SyncChainFns["downloadBeaconBlocksByRange"] = async ( - _peer, - request, - _partialDownload - ) => { - const blocks: BlockInput[] = []; - for (let i = request.startSlot; i < request.startSlot + request.count; i += request.step) { + const downloadByRange: SyncChainFns["downloadByRange"] = async (_peer, request, _partialDownload) => { + const blocks: IBlockInput[] = []; + for (let i = request.startSlot; i < request.startSlot + request.count; i += 1) { blocks.push( - getBlockInput.preData( - config, - { + BlockInputPreData.createFromBlock({ + block: { message: generateEmptyBlock(i), signature: ACCEPT_BLOCK, }, - BlockSource.byRange - ) + blockRootHex: "0x00", + forkName: config.getForkName(i), + seenTimestampSec: Math.floor(Date.now() / 1000), + daOutOfRange: false, + source: BlockInputSource.byRange, + }) ); } - return {blocks, pendingDataColumns: null}; + return blocks; }; const target: ChainTarget = {slot: computeStartSlotAtEpoch(targetEpoch), root: ZERO_HASH}; @@ -182,7 +182,7 @@ describe("sync / range / chain", () => { syncType, logSyncChainFns(logger, { processChainSegment, - downloadBeaconBlocksByRange, + downloadByRange, reportPeer, getConnectedPeerSyncMeta, onEnd, @@ -213,12 +213,12 @@ describe("sync / range / chain", () => { function logSyncChainFns(logger: Logger, fns: SyncChainFns): SyncChainFns { return { processChainSegment(blocks, syncType) { - logger.debug("mock processChainSegment", {blocks: blocks.map((b) => b.block.message.slot).join(",")}); + logger.debug("mock processChainSegment", {blocks: blocks.map((b) => b.slot).join(",")}); return fns.processChainSegment(blocks, syncType); }, - downloadBeaconBlocksByRange(peer, request, _partialDownload, syncType) { - logger.debug("mock downloadBeaconBlocksByRange", request); - return fns.downloadBeaconBlocksByRange(peer, request, _partialDownload, syncType); + downloadByRange(peer, request, syncType) { + logger.debug("mock downloadBeaconBlocksByRange", request.state.status); + return fns.downloadByRange(peer, request, syncType); }, getConnectedPeerSyncMeta(peerId) { logger.debug("mock getConnectedPeerSyncMeta", peerId); From e1916623faa7eb78335068a60801c9992b598fc9 Mon Sep 17 00:00:00 2001 From: Cayman Date: Tue, 2 Sep 2025 18:08:30 -0400 Subject: [PATCH 101/173] chore: fix more tests --- .../unit/sync/range/utils/batches.test.ts | 5 +- .../test/unit/sync/unknownBlock.test.ts | 163 ++++++------------ 2 files changed, 59 insertions(+), 109 deletions(-) diff --git a/packages/beacon-node/test/unit/sync/range/utils/batches.test.ts b/packages/beacon-node/test/unit/sync/range/utils/batches.test.ts index bb286a0e8c87..b4bfe1b03b1c 100644 --- a/packages/beacon-node/test/unit/sync/range/utils/batches.test.ts +++ b/packages/beacon-node/test/unit/sync/range/utils/batches.test.ts @@ -9,6 +9,7 @@ import { toBeDownloadedStartEpoch, validateBatchesStatus, } from "../../../../../src/sync/range/utils/batches.js"; +import {CustodyConfig} from "../../../../../src/util/dataColumns.js"; import {validPeerIdStr} from "../../../../utils/peer.js"; describe("sync / range / batches", () => { @@ -220,14 +221,14 @@ describe("sync / range / batches", () => { }); function createBatch(status: BatchStatus, startEpoch = 0): Batch { - const batch = new Batch(startEpoch, config); + const batch = new Batch(startEpoch, config, new CustodyConfig({config, nodeId: Buffer.alloc(32)})); if (status === BatchStatus.AwaitingDownload) return batch; batch.startDownloading(peer); if (status === BatchStatus.Downloading) return batch; - batch.downloadingSuccess({blocks: [], pendingDataColumns: null}); + batch.downloadingSuccess(peer, []); if (status === BatchStatus.AwaitingProcessing) return batch; batch.startProcessing(); diff --git a/packages/beacon-node/test/unit/sync/unknownBlock.test.ts b/packages/beacon-node/test/unit/sync/unknownBlock.test.ts index 670eee1c6be1..a97db3e5c626 100644 --- a/packages/beacon-node/test/unit/sync/unknownBlock.test.ts +++ b/packages/beacon-node/test/unit/sync/unknownBlock.test.ts @@ -3,24 +3,17 @@ import {toHexString} from "@chainsafe/ssz"; import {createChainForkConfig} from "@lodestar/config"; import {config as minimalConfig} from "@lodestar/config/default"; import {IForkChoice, ProtoBlock} from "@lodestar/fork-choice"; -import {ForkName, ZERO_HASH_HEX} from "@lodestar/params"; +import {ForkName} from "@lodestar/params"; import {ssz} from "@lodestar/types"; import {notNullish, sleep} from "@lodestar/utils"; import {afterEach, beforeEach, describe, expect, it, vi} from "vitest"; -import { - BlockInput, - BlockInputDataColumns, - BlockInputType, - BlockSource, - CachedDataColumns, - NullBlockInput, - getBlockInput, -} from "../../../src/chain/blocks/types.js"; +import {BlockInputColumns, BlockInputPreData} from "../../../src/chain/blocks/blockInput/blockInput.js"; +import {BlockInputSource} from "../../../src/chain/blocks/blockInput/types.js"; import {BlockError, BlockErrorCode} from "../../../src/chain/errors/blockError.js"; -import {IBeaconChain} from "../../../src/chain/index.js"; +import {ChainEvent, IBeaconChain} from "../../../src/chain/index.js"; import {SeenBlockProposers} from "../../../src/chain/seenCache/seenBlockProposers.js"; import {ZERO_HASH} from "../../../src/constants/constants.js"; -import {INetwork, NetworkEvent, NetworkEventBus, PeerAction} from "../../../src/network/index.js"; +import {INetwork, NetworkEventBus, PeerAction} from "../../../src/network/index.js"; import {PeerSyncMeta} from "../../../src/network/peers/peersData.js"; import {defaultSyncOptions} from "../../../src/sync/options.js"; import {BlockInputSync, UnknownBlockPeerBalancer} from "../../../src/sync/unknownBlock.js"; @@ -28,6 +21,7 @@ import {CustodyConfig} from "../../../src/util/dataColumns.js"; import {PeerIdStr} from "../../../src/util/peerId.js"; import {ClockStopped} from "../../mocks/clock.js"; import {MockedBeaconChain, getMockedBeaconChain} from "../../mocks/mockedBeaconChain.js"; +import {generateBlockWithColumnSidecars} from "../../utils/blocksAndData.js"; import {testLogger} from "../../utils/logger.js"; import {getRandPeerIdStr, getRandPeerSyncMeta} from "../../utils/peer.js"; @@ -48,7 +42,7 @@ describe.skip( const testCases: { id: string; - event: NetworkEvent.unknownBlockParent | NetworkEvent.unknownBlock; + event: ChainEvent.unknownParent | ChainEvent.unknownBlockRoot; finalizedSlot: number; reportPeer?: boolean; seenBlock?: boolean; @@ -57,23 +51,23 @@ describe.skip( }[] = [ { id: "fetch and process multiple unknown blocks", - event: NetworkEvent.unknownBlock, + event: ChainEvent.unknownBlockRoot, finalizedSlot: 0, }, { id: "fetch and process multiple unknown block parents", - event: NetworkEvent.unknownBlockParent, + event: ChainEvent.unknownParent, finalizedSlot: 0, }, { id: "downloaded parent is before finalized slot", - event: NetworkEvent.unknownBlockParent, + event: ChainEvent.unknownParent, finalizedSlot: 2, reportPeer: true, }, { id: "unbundling attack", - event: NetworkEvent.unknownBlock, + event: ChainEvent.unknownBlockRoot, finalizedSlot: 0, seenBlock: true, }, @@ -86,12 +80,12 @@ describe.skip( // }, { id: "peer returns prefinalized block", - event: NetworkEvent.unknownBlock, + event: ChainEvent.unknownBlockRoot, finalizedSlot: 1, }, { id: "downloaded blocks only", - event: NetworkEvent.unknownBlockParent, + event: ChainEvent.unknownParent, finalizedSlot: 0, maxPendingBlocks: 1, }, @@ -182,7 +176,8 @@ describe.skip( const chain: Partial = { clock: new ClockStopped(0), forkChoice: forkChoice as IForkChoice, - processBlock: async ({block}, opts) => { + processBlock: async (blockInput, opts) => { + const block = blockInput.getBlock(); if (!forkChoice.hasBlock(block.message.parentRoot)) throw Error("Unknown parent"); const blockSlot = block.message.slot; if (blockSlot <= finalizedSlot && !opts?.ignoreIfFinalized) { @@ -205,23 +200,32 @@ describe.skip( maxPendingBlocks, }); syncService.subscribeToNetwork(); - if (event === NetworkEvent.unknownBlockParent) { - network.events?.emit(NetworkEvent.unknownBlockParent, { - blockInput: getBlockInput.preData(config, blockC, BlockSource.gossip), + if (event === ChainEvent.unknownParent) { + chain.emitter?.emit(ChainEvent.unknownParent, { + blockInput: BlockInputPreData.createFromBlock({ + block: blockC, + blockRootHex: blockRootHexC, + forkName: config.getForkName(blockC.message.slot), + daOutOfRange: false, + seenTimestampSec: Math.floor(Date.now() / 1000), + source: BlockInputSource.gossip, + }), peer, + source: BlockInputSource.gossip, }); } else { - network.events?.emit(NetworkEvent.unknownBlock, {rootHex: blockRootHexC, peer}); + chain.emitter?.emit(ChainEvent.unknownBlockRoot, { + rootHex: blockRootHexC, + peer, + source: BlockInputSource.gossip, + }); } if (wrongBlockRoot) { - const [_, requestedRoots] = await sendBeaconBlocksByRootPromise; + await sendBeaconBlocksByRootPromise; await sleep(200); // should not send the invalid root block to chain expect(processBlockSpy).toHaveBeenCalledOnce(); - for (const requestedRoot of requestedRoots) { - expect(syncService["pendingBlocks"].get(toHexString(requestedRoot))?.downloadAttempts).toEqual(1); - } } else if (reportPeer) { const err = await reportPeerPromise; expect(err[0]).toBe(peer); @@ -299,12 +303,12 @@ describe("UnknownBlockSync", () => { } if (expected) { - expect(events.listenerCount(NetworkEvent.unknownBlock)).toBe(1); - expect(events.listenerCount(NetworkEvent.unknownBlockParent)).toBe(1); + expect(events.listenerCount(ChainEvent.unknownBlockRoot)).toBe(1); + expect(events.listenerCount(ChainEvent.unknownParent)).toBe(1); expect(service.isSubscribedToNetwork()).toBe(true); } else { - expect(events.listenerCount(NetworkEvent.unknownBlock)).toBe(0); - expect(events.listenerCount(NetworkEvent.unknownBlockParent)).toBe(0); + expect(events.listenerCount(ChainEvent.unknownBlockRoot)).toBe(0); + expect(events.listenerCount(ChainEvent.unknownParent)).toBe(0); expect(service.isSubscribedToNetwork()).toBe(false); } }); @@ -373,23 +377,26 @@ describe("UnknownBlockPeerBalancer", async () => { } const signedBlock = ssz.fulu.SignedBeaconBlock.defaultValue(); - const cachedData: CachedDataColumns = { - cacheId: 2025, - fork: ForkName.fulu, - availabilityPromise: Promise.resolve({} as unknown as BlockInputDataColumns), - resolveAvailability: () => {}, - dataColumnsCache: new Map([ - [0, {dataColumn: ssz.fulu.DataColumnSidecar.defaultValue(), dataColumnBytes: null}], - [1, {dataColumn: ssz.fulu.DataColumnSidecar.defaultValue(), dataColumnBytes: null}], - ]), - calledRecover: false, - }; - const blockInput: BlockInput = { - block: signedBlock, - source: BlockSource.gossip, - type: BlockInputType.dataPromise, - cachedData, - }; + signedBlock.message.body.blobKzgCommitments = [ssz.fulu.KZGCommitment.defaultValue()]; + const {block, rootHex, columnSidecars} = generateBlockWithColumnSidecars({forkName: ForkName.fulu}); + const blockInput = BlockInputColumns.createFromBlock({ + block: block, + blockRootHex: rootHex, + forkName: ForkName.fulu, + daOutOfRange: false, + source: BlockInputSource.gossip, + seenTimestampSec: Math.floor(Date.now() / 1000), + custodyColumns: custodyConfig.custodyColumns, + sampledColumns: custodyConfig.sampledColumns, + }); + for (const sidecar of columnSidecars.slice(1)) { + blockInput.addColumn({ + columnSidecar: sidecar, + blockRootHex: rootHex, + seenTimestampSec: Math.floor(Date.now() / 1000), + source: BlockInputSource.gossip, + }); + } it(`bestPeerForBlockInput - test case ${testCaseIndex}`, () => { for (const [i, activeRequest] of activeRequests.entries()) { @@ -419,62 +426,4 @@ describe("UnknownBlockPeerBalancer", async () => { } }); } // end for testCases - - it("bestPeerForBlockInput - NullBlockInput", () => { - // there is an edge case where the NullBlockInput has full custody groups but no block, make sure it can return any peers - // in case NullBlockInput has some pending columns, it falls on the above test cases - const signedBlock = ssz.fulu.SignedBeaconBlock.defaultValue(); - const cachedData: CachedDataColumns = { - cacheId: 2025, - fork: ForkName.fulu, - availabilityPromise: Promise.resolve({} as unknown as BlockInputDataColumns), - resolveAvailability: () => {}, - dataColumnsCache: new Map([ - [0, {dataColumn: ssz.fulu.DataColumnSidecar.defaultValue(), dataColumnBytes: null}], - [1, {dataColumn: ssz.fulu.DataColumnSidecar.defaultValue(), dataColumnBytes: null}], - [2, {dataColumn: ssz.fulu.DataColumnSidecar.defaultValue(), dataColumnBytes: null}], - [3, {dataColumn: ssz.fulu.DataColumnSidecar.defaultValue(), dataColumnBytes: null}], - ]), - calledRecover: false, - }; - const blockInput: BlockInput = { - block: signedBlock, - source: BlockSource.gossip, - type: BlockInputType.dataPromise, - cachedData, - }; - - const nullBlockInput: NullBlockInput = { - block: null, - blockRootHex: ZERO_HASH_HEX, - blockInputPromise: Promise.resolve(blockInput), - cachedData, - }; - - const excludedPeers = new Set(); - for (let i = 0; i < peers.length; i++) { - const peer = peerBalancer.bestPeerForBlockInput(nullBlockInput, excludedPeers); - expect(peer).not.toBeNull(); - if (peer == null) { - // should not happen, this is just to make the compiler happy - throw new Error("Unexpected null peer"); - } - excludedPeers.add(peer.peerId); - } - - // last round, no more peer should be returned because all are requested - const peer = peerBalancer.bestPeerForBlockInput(nullBlockInput, excludedPeers); - expect(peer).toBeNull(); - }); - - it("onRequest and onRequestCompleted", () => { - peerBalancer.onRequest(peers[0].peerId); - expect(peerBalancer.activeRequests.get(peers[0].peerId)).toBe(1); - peerBalancer.onRequest(peers[0].peerId); - expect(peerBalancer.activeRequests.get(peers[0].peerId)).toBe(2); - peerBalancer.onRequestCompleted(peers[0].peerId); - expect(peerBalancer.activeRequests.get(peers[0].peerId)).toBe(1); - peerBalancer.onRequestCompleted(peers[0].peerId); - expect(peerBalancer.activeRequests.get(peers[0].peerId)).toBe(0); - }); }); From 1a4303507e3fd0927b3778e729e70f37c885c4e7 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Wed, 3 Sep 2025 05:09:44 +0700 Subject: [PATCH 102/173] test: partial update of batch.test.ts --- .../test/unit/sync/range/batch.test.ts | 256 +++++++++++------- 1 file changed, 160 insertions(+), 96 deletions(-) diff --git a/packages/beacon-node/test/unit/sync/range/batch.test.ts b/packages/beacon-node/test/unit/sync/range/batch.test.ts index 68d28c5c69eb..ca25204b9111 100644 --- a/packages/beacon-node/test/unit/sync/range/batch.test.ts +++ b/packages/beacon-node/test/unit/sync/range/batch.test.ts @@ -1,124 +1,188 @@ -import {config} from "@lodestar/config/default"; +import {generateKeyPair} from "@libp2p/crypto/keys"; import {SLOTS_PER_EPOCH} from "@lodestar/params"; import {ssz} from "@lodestar/types"; -import {describe, expect, it} from "vitest"; +import {beforeAll, beforeEach, describe, expect, it} from "vitest"; import {BlockSource, getBlockInput} from "../../../../src/chain/blocks/types.js"; +import {computeNodeIdFromPrivateKey} from "../../../../src/network/subnets/index.js"; import {EPOCHS_PER_BATCH} from "../../../../src/sync/constants.js"; import {Batch, BatchError, BatchErrorCode, BatchStatus} from "../../../../src/sync/range/batch.js"; +import {CustodyConfig} from "../../../../src/util/dataColumns.js"; +import {config} from "../../../utils/blocksAndData.js"; import {expectThrowsLodestarError} from "../../../utils/errors.js"; import {validPeerIdStr} from "../../../utils/peer.js"; -describe("sync / range / batch", () => { + +describe("sync / range / batch", async () => { // Common mock data - const startEpoch = 0; + const privateKey = await generateKeyPair("secp256k1"); + const nodeId = computeNodeIdFromPrivateKey(privateKey); + const custodyConfig = new CustodyConfig({config, nodeId}); const peer = validPeerIdStr; - const blocksDownloaded = [ - getBlockInput.preData(config, ssz.phase0.SignedBeaconBlock.defaultValue(), BlockSource.byRange), - ]; - - it("Should return correct blockByRangeRequest", () => { - const batch = new Batch(startEpoch, config); - expect(batch.request).toEqual({ - startSlot: 0, - count: SLOTS_PER_EPOCH * EPOCHS_PER_BATCH, - step: 1, - }); - }); - it("Complete state flow", () => { - const batch = new Batch(startEpoch, config); + describe("getRequests", () => { + describe("PreDeneb", () => { + let batch: Batch; + const startEpoch = config.CAPELLA_FORK_EPOCH + 1; - // Instantion: AwaitingDownload - expect(batch.state.status).toBe(BatchStatus.AwaitingDownload); + it("should make default pre-deneb requests if no existing blocks are passed", () => { + batch = new Batch(startEpoch, config, custodyConfig); + expect(batch.requests.blocksRequest).toEqual({startSlot: batch.startSlot, count: batch.count, step: 1}); + expect(batch.requests.blobsRequest).toBeUndefined(); + expect(batch.requests.columnsRequest).toBeUndefined(); + }); - // startDownloading: AwaitingDownload -> Downloading - batch.startDownloading(peer); - expect(batch.state.status).toBe(BatchStatus.Downloading); + it("should have correct start slot to not re-download blocks", () => {}); + }); - // downloadingError: Downloading -> AwaitingDownload - batch.downloadingError(); - expect(batch.state.status).toBe(BatchStatus.AwaitingDownload); - expect(batch.getFailedPeers()[0]).toBe(peer); + describe("ForkDABlobs", () => { + let batch: Batch; + const startEpoch = config.DENEB_FORK_EPOCH + 1; - // As of https://github.com/ChainSafe/lodestar/pull/8150, we abort the batch after a single processing error - // commented out the rest of the flow for now + beforeEach(() => { + batch = new Batch(startEpoch, config, custodyConfig); + }); - // retry download: AwaitingDownload -> Downloading - // downloadingSuccess: Downloading -> AwaitingProcessing - batch.startDownloading(peer); - batch.downloadingSuccess({blocks: blocksDownloaded, pendingDataColumns: null}); - expect(batch.state.status).toBe(BatchStatus.AwaitingProcessing); + it("should make default ForkDABlobs requests if no existing blocks are passed", () => { + expect(batch.requests.blocksRequest).toEqual({startSlot: batch.startSlot, count: batch.count, step: 1}); + expect(batch.requests.blobsRequest).toEqual({startSlot: batch.startSlot, count: batch.count}); + expect(batch.requests.columnsRequest).toBeUndefined(); + }); + }); - // startProcessing: AwaitingProcessing -> Processing - // const blocksToProcess = batch.startProcessing(); - // expect(batch.state.status).toBe(BatchStatus.Processing); - // expect(blocksToProcess).toBe(blocksDownloaded); + describe("ForkDAColumns", () => { + let batch: Batch; + const startEpoch = config.FULU_FORK_EPOCH + 1; + + beforeEach(() => { + batch = new Batch(startEpoch, config, custodyConfig); + }); + + it("should make default pre-deneb requests if no existing blocks are passed", () => { + expect(batch.requests.blocksRequest).toEqual({startSlot: batch.startSlot, count: batch.count, step: 1}); + expect(batch.requests.blobsRequest).toBeUndefined(); + expect(batch.requests.columnsRequest).toEqual({ + startSlot: batch.startSlot, + count: batch.count, + columns: custodyConfig.sampledColumns, + }); + }); + }); - // processingError: Processing -> AwaitingDownload + it("should not request data pre-deneb", () => {}); - // batch.processingError(new Error()); - // expect(batch.state.status).toBe(BatchStatus.AwaitingDownload); + // it("should not request data when before availability window", () => {}); - // retry download + processing: AwaitingDownload -> Downloading -> AwaitingProcessing -> Processing - // processingSuccess: Processing -> AwaitingValidation - // batch.startDownloading(peer); - // batch.downloadingSuccess({blocks: blocksDownloaded, pendingDataColumns: null}); - // batch.startProcessing(); - // batch.processingSuccess(); - // expect(batch.state.status).toBe(BatchStatus.AwaitingValidation); + // it("should request data within availability window", () => {}); - // validationError: AwaitingValidation -> AwaitingDownload + // it("should only request blobs or columns, not both", () => {}); - // batch.validationError(new Error()); - // expect(batch.state.status).toBe(BatchStatus.AwaitingDownload); + // it("should request blobs between post-deneb and pre-fulu ", () => {}); - // retry download + processing + validation: AwaitingDownload -> Downloading -> AwaitingProcessing -> Processing -> AwaitingValidation - // batch.startDownloading(peer); - // batch.downloadingSuccess({blocks: blocksDownloaded, pendingDataColumns: null}); - batch.startProcessing(); - batch.processingSuccess(); - expect(batch.state.status).toBe(BatchStatus.AwaitingValidation); - // On validationSuccess() the batch will just be dropped and garbage collected - }); + // it("should request columns post-fulu", () => {}); - it("Should throw on inconsistent state - downloadingSuccess", () => { - const batch = new Batch(startEpoch, config); - - expectThrowsLodestarError( - () => batch.downloadingSuccess({blocks: blocksDownloaded, pendingDataColumns: []}), - new BatchError({ - code: BatchErrorCode.WRONG_STATUS, - startEpoch, - status: BatchStatus.AwaitingDownload, - expectedStatus: BatchStatus.Downloading, - }) - ); - }); + // it("should have same start slot for blocks and data requests", () => {}); - it("Should throw on inconsistent state - startProcessing", () => { - const batch = new Batch(startEpoch, config); - - expectThrowsLodestarError( - () => batch.startProcessing(), - new BatchError({ - code: BatchErrorCode.WRONG_STATUS, - startEpoch, - status: BatchStatus.AwaitingDownload, - expectedStatus: BatchStatus.AwaitingProcessing, - }) - ); + // it("should have same count for blocks and data requests", () => {}); }); - it("Should throw on inconsistent state - processingSuccess", () => { - const batch = new Batch(startEpoch, config); - - expectThrowsLodestarError( - () => batch.processingSuccess(), - new BatchError({ - code: BatchErrorCode.WRONG_STATUS, - startEpoch, - status: BatchStatus.AwaitingDownload, - expectedStatus: BatchStatus.Processing, - }) - ); + describe("downloadingSuccess", () => { + it("should handle blocks that are not in slot-wise order", () => {}); }); + + // it("Complete state flow", () => { + // const batch = new Batch(startEpoch, config); + + // // Instantion: AwaitingDownload + // expect(batch.state.status).toBe(BatchStatus.AwaitingDownload); + + // // startDownloading: AwaitingDownload -> Downloading + // batch.startDownloading(peer); + // expect(batch.state.status).toBe(BatchStatus.Downloading); + + // // downloadingError: Downloading -> AwaitingDownload + // batch.downloadingError(); + // expect(batch.state.status).toBe(BatchStatus.AwaitingDownload); + // expect(batch.getFailedPeers()[0]).toBe(peer); + + // // As of https://github.com/ChainSafe/lodestar/pull/8150, we abort the batch after a single processing error + // // commented out the rest of the flow for now + + // // retry download: AwaitingDownload -> Downloading + // // downloadingSuccess: Downloading -> AwaitingProcessing + // batch.startDownloading(peer); + // batch.downloadingSuccess({blocks: blocksDownloaded, pendingDataColumns: null}); + // expect(batch.state.status).toBe(BatchStatus.AwaitingProcessing); + + // // startProcessing: AwaitingProcessing -> Processing + // // const blocksToProcess = batch.startProcessing(); + // // expect(batch.state.status).toBe(BatchStatus.Processing); + // // expect(blocksToProcess).toBe(blocksDownloaded); + + // // processingError: Processing -> AwaitingDownload + + // // batch.processingError(new Error()); + // // expect(batch.state.status).toBe(BatchStatus.AwaitingDownload); + + // // retry download + processing: AwaitingDownload -> Downloading -> AwaitingProcessing -> Processing + // // processingSuccess: Processing -> AwaitingValidation + // // batch.startDownloading(peer); + // // batch.downloadingSuccess({blocks: blocksDownloaded, pendingDataColumns: null}); + // // batch.startProcessing(); + // // batch.processingSuccess(); + // // expect(batch.state.status).toBe(BatchStatus.AwaitingValidation); + + // // validationError: AwaitingValidation -> AwaitingDownload + + // // batch.validationError(new Error()); + // // expect(batch.state.status).toBe(BatchStatus.AwaitingDownload); + + // // retry download + processing + validation: AwaitingDownload -> Downloading -> AwaitingProcessing -> Processing -> AwaitingValidation + // // batch.startDownloading(peer); + // // batch.downloadingSuccess({blocks: blocksDownloaded, pendingDataColumns: null}); + // batch.startProcessing(); + // batch.processingSuccess(); + // expect(batch.state.status).toBe(BatchStatus.AwaitingValidation); + // // On validationSuccess() the batch will just be dropped and garbage collected + // }); + + // it("Should throw on inconsistent state - downloadingSuccess", () => { + // const batch = new Batch(startEpoch, config); + + // expectThrowsLodestarError( + // () => batch.downloadingSuccess({blocks: blocksDownloaded, pendingDataColumns: []}), + // new BatchError({ + // code: BatchErrorCode.WRONG_STATUS, + // startEpoch, + // status: BatchStatus.AwaitingDownload, + // expectedStatus: BatchStatus.Downloading, + // }) + // ); + // }); + + // it("Should throw on inconsistent state - startProcessing", () => { + // const batch = new Batch(startEpoch, config); + + // expectThrowsLodestarError( + // () => batch.startProcessing(), + // new BatchError({ + // code: BatchErrorCode.WRONG_STATUS, + // startEpoch, + // status: BatchStatus.AwaitingDownload, + // expectedStatus: BatchStatus.AwaitingProcessing, + // }) + // ); + // }); + + // it("Should throw on inconsistent state - processingSuccess", () => { + // const batch = new Batch(startEpoch, config); + + // expectThrowsLodestarError( + // () => batch.processingSuccess(), + // new BatchError({ + // code: BatchErrorCode.WRONG_STATUS, + // startEpoch, + // status: BatchStatus.AwaitingDownload, + // expectedStatus: BatchStatus.Processing, + // }) + // ); + // }); }); From fbc7fcb78f93af71e4fca4051dc11b46834ae7b4 Mon Sep 17 00:00:00 2001 From: Cayman Date: Tue, 2 Sep 2025 18:27:44 -0400 Subject: [PATCH 103/173] chore: fix more test --- .../test/unit/sync/range/batch.test.ts | 246 ++++++++++-------- 1 file changed, 139 insertions(+), 107 deletions(-) diff --git a/packages/beacon-node/test/unit/sync/range/batch.test.ts b/packages/beacon-node/test/unit/sync/range/batch.test.ts index ca25204b9111..ddfa756404b2 100644 --- a/packages/beacon-node/test/unit/sync/range/batch.test.ts +++ b/packages/beacon-node/test/unit/sync/range/batch.test.ts @@ -1,10 +1,10 @@ import {generateKeyPair} from "@libp2p/crypto/keys"; -import {SLOTS_PER_EPOCH} from "@lodestar/params"; +import {ForkName} from "@lodestar/params"; import {ssz} from "@lodestar/types"; -import {beforeAll, beforeEach, describe, expect, it} from "vitest"; -import {BlockSource, getBlockInput} from "../../../../src/chain/blocks/types.js"; +import {beforeEach, describe, expect, it} from "vitest"; +import {BlockInputPreData} from "../../../../src/chain/blocks/blockInput/blockInput.js"; +import {BlockInputSource} from "../../../../src/chain/blocks/blockInput/types.js"; import {computeNodeIdFromPrivateKey} from "../../../../src/network/subnets/index.js"; -import {EPOCHS_PER_BATCH} from "../../../../src/sync/constants.js"; import {Batch, BatchError, BatchErrorCode, BatchStatus} from "../../../../src/sync/range/batch.js"; import {CustodyConfig} from "../../../../src/util/dataColumns.js"; import {config} from "../../../utils/blocksAndData.js"; @@ -67,9 +67,15 @@ describe("sync / range / batch", async () => { }); }); - it("should not request data pre-deneb", () => {}); + it("should not request data pre-deneb", () => { + const startEpoch = config.CAPELLA_FORK_EPOCH - 1; + const batch = new Batch(startEpoch, config, custodyConfig); + expect(batch.requests.blocksRequest).toEqual({startSlot: batch.startSlot, count: batch.count, step: 1}); + expect(batch.requests.blobsRequest).toBeUndefined(); + expect(batch.requests.columnsRequest).toBeUndefined(); + }); - // it("should not request data when before availability window", () => {}); + it("should not request data when before availability window", () => {}); // it("should request data within availability window", () => {}); @@ -77,112 +83,138 @@ describe("sync / range / batch", async () => { // it("should request blobs between post-deneb and pre-fulu ", () => {}); - // it("should request columns post-fulu", () => {}); - - // it("should have same start slot for blocks and data requests", () => {}); + it("should request columns post-fulu", () => { + const startEpoch = config.FULU_FORK_EPOCH + 1; + const batch = new Batch(startEpoch, config, custodyConfig); + expect(batch.requests.blocksRequest).toEqual({startSlot: batch.startSlot, count: batch.count, step: 1}); + expect(batch.requests.blobsRequest).toBeUndefined(); + expect(batch.requests.columnsRequest).toEqual({ + startSlot: batch.startSlot, + count: batch.count, + columns: custodyConfig.sampledColumns, + }); + }); - // it("should have same count for blocks and data requests", () => {}); + it("should have same start slot and count for blocks and data requests", () => { + const startEpoch = config.FULU_FORK_EPOCH + 1; + const batch = new Batch(startEpoch, config, custodyConfig); + expect(batch.requests.blocksRequest?.startSlot).toEqual(batch.requests.columnsRequest?.startSlot); + expect(batch.requests.blocksRequest?.count).toEqual(batch.requests.columnsRequest?.count); + }); }); describe("downloadingSuccess", () => { it("should handle blocks that are not in slot-wise order", () => {}); }); - // it("Complete state flow", () => { - // const batch = new Batch(startEpoch, config); - - // // Instantion: AwaitingDownload - // expect(batch.state.status).toBe(BatchStatus.AwaitingDownload); - - // // startDownloading: AwaitingDownload -> Downloading - // batch.startDownloading(peer); - // expect(batch.state.status).toBe(BatchStatus.Downloading); - - // // downloadingError: Downloading -> AwaitingDownload - // batch.downloadingError(); - // expect(batch.state.status).toBe(BatchStatus.AwaitingDownload); - // expect(batch.getFailedPeers()[0]).toBe(peer); - - // // As of https://github.com/ChainSafe/lodestar/pull/8150, we abort the batch after a single processing error - // // commented out the rest of the flow for now - - // // retry download: AwaitingDownload -> Downloading - // // downloadingSuccess: Downloading -> AwaitingProcessing - // batch.startDownloading(peer); - // batch.downloadingSuccess({blocks: blocksDownloaded, pendingDataColumns: null}); - // expect(batch.state.status).toBe(BatchStatus.AwaitingProcessing); - - // // startProcessing: AwaitingProcessing -> Processing - // // const blocksToProcess = batch.startProcessing(); - // // expect(batch.state.status).toBe(BatchStatus.Processing); - // // expect(blocksToProcess).toBe(blocksDownloaded); - - // // processingError: Processing -> AwaitingDownload - - // // batch.processingError(new Error()); - // // expect(batch.state.status).toBe(BatchStatus.AwaitingDownload); - - // // retry download + processing: AwaitingDownload -> Downloading -> AwaitingProcessing -> Processing - // // processingSuccess: Processing -> AwaitingValidation - // // batch.startDownloading(peer); - // // batch.downloadingSuccess({blocks: blocksDownloaded, pendingDataColumns: null}); - // // batch.startProcessing(); - // // batch.processingSuccess(); - // // expect(batch.state.status).toBe(BatchStatus.AwaitingValidation); - - // // validationError: AwaitingValidation -> AwaitingDownload - - // // batch.validationError(new Error()); - // // expect(batch.state.status).toBe(BatchStatus.AwaitingDownload); - - // // retry download + processing + validation: AwaitingDownload -> Downloading -> AwaitingProcessing -> Processing -> AwaitingValidation - // // batch.startDownloading(peer); - // // batch.downloadingSuccess({blocks: blocksDownloaded, pendingDataColumns: null}); - // batch.startProcessing(); - // batch.processingSuccess(); - // expect(batch.state.status).toBe(BatchStatus.AwaitingValidation); - // // On validationSuccess() the batch will just be dropped and garbage collected - // }); - - // it("Should throw on inconsistent state - downloadingSuccess", () => { - // const batch = new Batch(startEpoch, config); - - // expectThrowsLodestarError( - // () => batch.downloadingSuccess({blocks: blocksDownloaded, pendingDataColumns: []}), - // new BatchError({ - // code: BatchErrorCode.WRONG_STATUS, - // startEpoch, - // status: BatchStatus.AwaitingDownload, - // expectedStatus: BatchStatus.Downloading, - // }) - // ); - // }); - - // it("Should throw on inconsistent state - startProcessing", () => { - // const batch = new Batch(startEpoch, config); - - // expectThrowsLodestarError( - // () => batch.startProcessing(), - // new BatchError({ - // code: BatchErrorCode.WRONG_STATUS, - // startEpoch, - // status: BatchStatus.AwaitingDownload, - // expectedStatus: BatchStatus.AwaitingProcessing, - // }) - // ); - // }); - - // it("Should throw on inconsistent state - processingSuccess", () => { - // const batch = new Batch(startEpoch, config); - - // expectThrowsLodestarError( - // () => batch.processingSuccess(), - // new BatchError({ - // code: BatchErrorCode.WRONG_STATUS, - // startEpoch, - // status: BatchStatus.AwaitingDownload, - // expectedStatus: BatchStatus.Processing, - // }) - // ); - // }); + it("Complete state flow", () => { + const startEpoch = 0; + const batch = new Batch(startEpoch, config, custodyConfig); + + // Instantion: AwaitingDownload + expect(batch.state.status).toBe(BatchStatus.AwaitingDownload); + + // startDownloading: AwaitingDownload -> Downloading + batch.startDownloading(peer); + expect(batch.state.status).toBe(BatchStatus.Downloading); + + // downloadingError: Downloading -> AwaitingDownload + batch.downloadingError(peer); + expect(batch.state.status).toBe(BatchStatus.AwaitingDownload); + expect(batch.getFailedPeers()[0]).toBe(peer); + + // As of https://github.com/ChainSafe/lodestar/pull/8150, we abort the batch after a single processing error + // commented out the rest of the flow for now + + // retry download: AwaitingDownload -> Downloading + // downloadingSuccess: Downloading -> AwaitingProcessing + batch.startDownloading(peer); + batch.downloadingSuccess(peer, [ + BlockInputPreData.createFromBlock({ + block: ssz.capella.SignedBeaconBlock.defaultValue(), + blockRootHex: "0x1234", + source: BlockInputSource.byRoot, + seenTimestampSec: Date.now() / 1000, + forkName: ForkName.capella, + daOutOfRange: false, + }), + ]); + expect(batch.state.status).toBe(BatchStatus.AwaitingProcessing); + + // startProcessing: AwaitingProcessing -> Processing + // const blocksToProcess = batch.startProcessing(); + // expect(batch.state.status).toBe(BatchStatus.Processing); + // expect(blocksToProcess).toBe(blocksDownloaded); + + // processingError: Processing -> AwaitingDownload + + // batch.processingError(new Error()); + // expect(batch.state.status).toBe(BatchStatus.AwaitingDownload); + + // retry download + processing: AwaitingDownload -> Downloading -> AwaitingProcessing -> Processing + // processingSuccess: Processing -> AwaitingValidation + // batch.startDownloading(peer); + // batch.downloadingSuccess({blocks: blocksDownloaded, pendingDataColumns: null}); + // batch.startProcessing(); + // batch.processingSuccess(); + // expect(batch.state.status).toBe(BatchStatus.AwaitingValidation); + + // validationError: AwaitingValidation -> AwaitingDownload + + // batch.validationError(new Error()); + // expect(batch.state.status).toBe(BatchStatus.AwaitingDownload); + + // retry download + processing + validation: AwaitingDownload -> Downloading -> AwaitingProcessing -> Processing -> AwaitingValidation + // batch.startDownloading(peer); + // batch.downloadingSuccess({blocks: blocksDownloaded, pendingDataColumns: null}); + batch.startProcessing(); + batch.processingSuccess(); + expect(batch.state.status).toBe(BatchStatus.AwaitingValidation); + // On validationSuccess() the batch will just be dropped and garbage collected + }); + + it("Should throw on inconsistent state - downloadingSuccess", () => { + const startEpoch = 0; + const batch = new Batch(startEpoch, config, custodyConfig); + + expectThrowsLodestarError( + () => batch.downloadingSuccess(peer, []), + new BatchError({ + code: BatchErrorCode.WRONG_STATUS, + startEpoch, + status: BatchStatus.AwaitingDownload, + expectedStatus: BatchStatus.Downloading, + }) + ); + }); + + it("Should throw on inconsistent state - startProcessing", () => { + const startEpoch = 0; + const batch = new Batch(startEpoch, config, custodyConfig); + + expectThrowsLodestarError( + () => batch.startProcessing(), + new BatchError({ + code: BatchErrorCode.WRONG_STATUS, + startEpoch, + status: BatchStatus.AwaitingDownload, + expectedStatus: BatchStatus.AwaitingProcessing, + }) + ); + }); + + it("Should throw on inconsistent state - processingSuccess", () => { + const startEpoch = 0; + const batch = new Batch(startEpoch, config, custodyConfig); + + expectThrowsLodestarError( + () => batch.processingSuccess(), + new BatchError({ + code: BatchErrorCode.WRONG_STATUS, + startEpoch, + status: BatchStatus.AwaitingDownload, + expectedStatus: BatchStatus.Processing, + }) + ); + }); }); From b9e4a62f7881ed171c4f5a2011d6a473a7ef5c62 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Wed, 3 Sep 2025 05:49:58 +0700 Subject: [PATCH 104/173] feat: refactor downloadByRange.ts --- .../src/sync/utils/downloadByRange.ts | 397 ++++++++++-------- 1 file changed, 233 insertions(+), 164 deletions(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index d6e9d3741321..f2731e458cd1 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -14,16 +14,13 @@ import {validateBlockBlobSidecars} from "../../chain/validation/blobSidecar.js"; import {validateBlockDataColumnSidecars} from "../../chain/validation/dataColumnSidecar.js"; import {INetwork, PeerAction} from "../../network/index.js"; import {PeerIdStr} from "../../util/peerId.js"; +import {DownloadByRootErrorCode} from "./downloadByRoot.js"; import {RangeSyncType} from "./remoteSyncType.js"; /** * * blocks - * - check all slots are within range of startSlot (inclusive) through startSlot + count (exclusive) - * - don't have more than count number of blocks - * - slots are in ascending order - * - must allow for skip slots - * - check is a chain of blocks where via parentRoot matches hashTreeRoot of block before + * * blobs * - check that expected sidecar count matches the returned count @@ -69,10 +66,6 @@ export type DownloadByRangeResponses = { columnSidecars?: fulu.DataColumnSidecars; }; -export type ValidatedDownloadByRangeResponses = DownloadByRangeResponses & { - blockRoots?: Uint8Array[]; -}; - export type DownloadAndCacheByRangeProps = DownloadByRangeRequests & { config: ChainForkConfig; cache: SeenBlockInput; @@ -96,12 +89,32 @@ export type CacheByRangeResponsesProps = { cache: SeenBlockInput; syncType: RangeSyncType; peerIdStr: PeerIdStr; - responses: ValidatedDownloadByRangeResponses; + responses: ValidatedResponses; batchBlocks: IBlockInput[]; }; +export type ValidatedBlock = { + blockRoot: Uint8Array; + block: SignedBeaconBlock; +}; + +export type ValidatedBlobSidecars = { + blockRoot: Uint8Array; + blobSidecars: deneb.BlobSidecars; +}; + +export type ValidatedColumnSidecars = { + blockRoot: Uint8Array; + columnSidecars: fulu.DataColumnSidecars; +}; + +export type ValidatedResponses = { + validatedBlocks?: ValidatedBlock[]; + validatedBlobSidecars?: ValidatedBlobSidecars[]; + validatedColumnSidecars?: ValidatedColumnSidecars[]; +}; + export function cacheByRangeResponses({ - config, network, cache, syncType, @@ -113,13 +126,12 @@ export function cacheByRangeResponses({ const seenTimestampSec = Date.now() / 1000; const updatedBatchBlocks = new Map(batchBlocks.map((block) => [block.slot, block])); - const blocks = responses.blocks ?? []; - const blockRoots = responses.blockRoots ?? []; + const blocks = responses.validatedBlocks ?? []; for (let i = 0; i < blocks.length; i++) { - const block = blocks[i]; - const existing = updatedBatchBlocks.get(block.message.slot); - const blockRoot = blockRoots[i] ?? config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message); + const {block, blockRoot} = blocks[i]; const blockRootHex = toRootHex(blockRoot); + + const existing = updatedBatchBlocks.get(block.message.slot); if (existing) { try { // will throw if root hex does not match (meaning we are following the wrong chain) @@ -152,12 +164,9 @@ export function cacheByRangeResponses({ } } - for (const blobSidecar of responses.blobSidecars ?? []) { - const blockRoot = config - .getForkTypes(blobSidecar.signedBlockHeader.message.slot) - .BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message); + for (const {blockRoot, blobSidecars} of responses.validatedBlobSidecars ?? []) { + const existing = updatedBatchBlocks.get(blobSidecars[0].signedBlockHeader.message.slot); const blockRootHex = toRootHex(blockRoot); - const existing = updatedBatchBlocks.get(blobSidecar.signedBlockHeader.message.slot); if (existing) { if (!isBlockInputBlobs(existing)) { throw new DownloadByRangeError({ @@ -169,17 +178,19 @@ export function cacheByRangeResponses({ }); } try { - // will throw if root hex does not match (meaning we are following the wrong chain) - existing.addBlob( - { - blobSidecar, - blockRootHex, - seenTimestampSec, - peerIdStr, - source, - }, - {throwOnDuplicateAdd: false} - ); + for (const blobSidecar of blobSidecars) { + // will throw if root hex does not match (meaning we are following the wrong chain) + existing.addBlob( + { + blobSidecar, + blockRootHex, + seenTimestampSec, + peerIdStr, + source, + }, + {throwOnDuplicateAdd: false} + ); + } } catch (err) { network.logger.debug("Following wrong chain for ByRange request", {}, err as Error); if (syncType === RangeSyncType.Finalized) { @@ -188,23 +199,24 @@ export function cacheByRangeResponses({ break; } } else { - const blockInput = cache.getByBlob({ - blockRootHex, - blobSidecar, - source, - peerIdStr, - seenTimestampSec, - }); + let blockInput!: IBlockInput; + for (const blobSidecar of blobSidecars) { + blockInput = cache.getByBlob({ + blockRootHex, + blobSidecar, + source, + peerIdStr, + seenTimestampSec, + }); + } updatedBatchBlocks.set(blockInput.slot, blockInput); } } - for (const columnSidecar of responses.columnSidecars ?? []) { - const blockRoot = config - .getForkTypes(columnSidecar.signedBlockHeader.message.slot) - .BeaconBlockHeader.hashTreeRoot(columnSidecar.signedBlockHeader.message); + for (const {blockRoot, columnSidecars} of responses.validatedColumnSidecars ?? []) { + const existing = updatedBatchBlocks.get(columnSidecars[0].signedBlockHeader.message.slot); const blockRootHex = toRootHex(blockRoot); - const existing = updatedBatchBlocks.get(columnSidecar.signedBlockHeader.message.slot); + if (existing) { if (!isBlockInputColumns(existing)) { throw new DownloadByRangeError({ @@ -216,17 +228,19 @@ export function cacheByRangeResponses({ }); } try { - // will throw if root hex does not match (meaning we are following the wrong chain) - existing.addColumn( - { - columnSidecar, - blockRootHex, - seenTimestampSec, - peerIdStr, - source, - }, - {throwOnDuplicateAdd: false} - ); + for (const columnSidecar of columnSidecars) { + // will throw if root hex does not match (meaning we are following the wrong chain) + existing.addColumn( + { + columnSidecar, + blockRootHex, + seenTimestampSec, + peerIdStr, + source, + }, + {throwOnDuplicateAdd: false} + ); + } } catch (err) { network.logger.debug("Following wrong chain for ByRange request", {}, err as Error); if (syncType === RangeSyncType.Finalized) { @@ -235,13 +249,16 @@ export function cacheByRangeResponses({ break; } } else { - const blockInput = cache.getByColumn({ - blockRootHex, - columnSidecar, - source, - peerIdStr, - seenTimestampSec, - }); + let blockInput!: IBlockInput; + for (const columnSidecar of columnSidecars) { + blockInput = cache.getByColumn({ + blockRootHex, + columnSidecar, + peerIdStr, + source, + seenTimestampSec, + }); + } updatedBatchBlocks.set(blockInput.slot, blockInput); } } @@ -258,11 +275,7 @@ export async function downloadByRange({ blocksRequest, blobsRequest, columnsRequest, -}: Omit): Promise { - const startSlot = (blocksRequest?.startSlot ?? blobsRequest?.startSlot ?? columnsRequest?.startSlot) as number; - const count = (blocksRequest?.count ?? blobsRequest?.count ?? columnsRequest?.count) as number; - const slotRangeString = `${startSlot} - ${startSlot + count}`; - +}: Omit): Promise { let response: DownloadByRangeResponses; try { response = await requestByRange({ @@ -277,22 +290,21 @@ export async function downloadByRange({ throw new DownloadByRangeError({ code: DownloadByRangeErrorCode.REQ_RESP_ERROR, peerId: peerIdStr, - slotRange: slotRangeString, + slotRange: buildSlotRangeString({blocksRequest, blobsRequest, columnsRequest}), }); } - const blockRoots = await validateResponses({ + const validated = await validateResponses({ config, peerIdStr, - slotRangeString, + batchBlocks, blocksRequest, blobsRequest, columnsRequest, - batchBlocks, ...response, }); - return {...response, blockRoots}; + return validated; } /** @@ -352,61 +364,61 @@ export async function requestByRange({ */ export async function validateResponses({ config, - slotRangeString, + batchBlocks, blocksRequest, blobsRequest, columnsRequest, blocks, blobSidecars, columnSidecars, - batchBlocks, }: DownloadByRangeRequests & DownloadByRangeResponses & { config: ChainForkConfig; peerIdStr: string; - slotRangeString: string; batchBlocks?: IBlockInput[]; - }): Promise { + }): Promise { // Blocks are always required for blob/column validation // If a blocksRequest is provided, blocks have just been downloaded // If no blocksRequest is provided, batchBlocks must have been provided from cache - if (blocksRequest && !blocks) { + if ((blobsRequest || columnsRequest) && !(blocks || batchBlocks)) { throw new DownloadByRangeError( { - code: DownloadByRangeErrorCode.MISSING_BLOCKS_RESPONSE, - slotRange: slotRangeString, + code: DownloadByRangeErrorCode.MISSING_BLOCKS, + slotRange: buildSlotRangeString({blobsRequest, columnsRequest}), }, - "No blocks request to validate requests against" + "No blocks to validate data requests against" ); } - if (!blocksRequest && !batchBlocks) { - throw new DownloadByRangeError( - { - code: DownloadByRangeErrorCode.MISSING_BLOCKS_RESPONSE, - slotRange: slotRangeString, - }, - "No blocks request to validate requests against" - ); + + const validatedResponses: ValidatedResponses = {}; + + if (blocksRequest) { + validatedResponses.validatedBlocks = validateBlockByRangeResponse(config, blocksRequest, blocks ?? []); } - // Set blocks for validation below - // blocks = blocks ?? batchBlocks?.map((blockInput) => blockInput.getBlock()) ?? []; + const dataRequest = blobsRequest ?? columnsRequest; + if (!dataRequest) { + return validatedResponses; + } - const blockRoots = blocksRequest ? validateBlockByRangeResponse(config, blocksRequest, blocks ?? []) : []; + const dataRequestBlocks = getBlocksForDataValidation( + dataRequest, + batchBlocks, + blocksRequest ? validatedResponses.validatedBlocks : undefined + ); if (blobsRequest) { if (!blobSidecars) { throw new DownloadByRangeError( { code: DownloadByRangeErrorCode.MISSING_BLOBS_RESPONSE, - slotRange: slotRangeString, + slotRange: buildSlotRangeString({blobsRequest, columnsRequest}), }, "No blobSidecars to validate against blobsRequest" ); } - const requested = getDataRequestBlocks(blobsRequest, batchBlocks, blocks ? {blocks, blockRoots} : undefined); - await validateBlobsByRangeResponse(requested.blocks, requested.blockRoots, blobSidecars); + validatedResponses.validatedBlobSidecars = await validateBlobsByRangeResponse(dataRequestBlocks, blobSidecars); } if (columnsRequest) { @@ -414,28 +426,50 @@ export async function validateResponses({ throw new DownloadByRangeError( { code: DownloadByRangeErrorCode.MISSING_COLUMNS_RESPONSE, - slotRange: slotRangeString, + slotRange: buildSlotRangeString({blobsRequest, columnsRequest}), }, "No columnSidecars to check columnRequest against" ); } - const requested = getDataRequestBlocks(columnsRequest, batchBlocks, blocks ? {blocks, blockRoots} : undefined); - await validateColumnsByRangeResponse(columnsRequest, requested.blocks, requested.blockRoots, columnSidecars); + validatedResponses.validatedColumnSidecars = await validateColumnsByRangeResponse( + columnsRequest, + dataRequestBlocks, + columnSidecars + ); } - return blockRoots; + + return validatedResponses; } /** * Should not be called directly. Only exported for unit testing purposes + * + * - check all slots are within range of startSlot (inclusive) through startSlot + count (exclusive) + * - don't have more than count number of blocks + * - slots are in ascending order + * - must allow for skip slots + * - check is a chain of blocks where via parentRoot matches hashTreeRoot of block before */ export function validateBlockByRangeResponse( config: ChainForkConfig, blocksRequest: phase0.BeaconBlocksByRangeRequest, blocks: SignedBeaconBlock[] -): Uint8Array[] { +): ValidatedBlock[] { const {startSlot, count} = blocksRequest; + // TODO(fulu): This was added by @twoeths in #8150 but it breaks for epochs with 0 blocks during chain + // liveness issues. See comment https://github.com/ChainSafe/lodestar/issues/8147#issuecomment-3246434697 + // if (!blocks.length) { + // throw new DownloadByRangeError( + // { + // code: DownloadByRangeErrorCode.MISSING_BLOCKS_RESPONSE, + // expectedCount: blocksRequest.count, + // }, + // "Zero blocks in response" + // ); + // } + if (blocks.length > count) { throw new DownloadByRangeError( { @@ -447,19 +481,23 @@ export function validateBlockByRangeResponse( ); } - const lastValidSlot = startSlot + count; + const lastValidSlot = startSlot + count - 1; for (let i = 0; i < blocks.length; i++) { const slot = blocks[i].message.slot; - if (slot > lastValidSlot) { + if (slot < startSlot || slot > lastValidSlot) { throw new DownloadByRangeError( { code: DownloadByRangeErrorCode.OUT_OF_RANGE_BLOCKS, + slot, }, - "Blocks with slots outside of requested range in BeaconBlocksByRange response" + "Blocks in response outside of requested slot range" ); } - if (i < blocks.length - 1 && slot >= blocks[i + 1].message.slot) { + + // do not check for out of order on first block, and for subsequent blocks make sure that + // the current block in a later slot than the one prior + if (i !== 0 && slot <= blocks[i - 1].message.slot) { throw new DownloadByRangeError( { code: DownloadByRangeErrorCode.OUT_OF_ORDER_BLOCKS, @@ -469,38 +507,45 @@ export function validateBlockByRangeResponse( } } - const blockRoots = blocks.map((block) => - config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message) - ); - for (let i = 0; i < blocks.length - 1; i++) { - // compare the block root against the next block's parent root - const blockRoot = blockRoots[i]; - const parentRoot = blocks[i + 1].message.parentRoot; - if (Buffer.compare(blockRoot, parentRoot) !== 0) { - throw new DownloadByRangeError( - { - code: DownloadByRangeErrorCode.PARENT_ROOT_MISMATCH, - parentSlot: blocks[i].message.slot, - expected: toRootHex(blockRoot), - actual: toRootHex(parentRoot), - }, - `Block parent root does not match the previous block's root in BeaconBlocksByRange response` - ); + // assumes all blocks are from the same fork. Batch only generated epoch-wise requests starting at slot + // 0 of the epoch + const type = config.getForkTypes(blocks[0].message.slot).BeaconBlock; + const response: {block: SignedBeaconBlock; blockRoot: Uint8Array}[] = []; + + for (let i = 0; i < blocks.length; i++) { + const block = blocks[i]; + const blockRoot = type.hashTreeRoot(block.message); + response.push({block, blockRoot}); + + if (i < blocks.length - 1) { + // compare the block root against the next block's parent root + const parentRoot = blocks[i + 1].message.parentRoot; + if (Buffer.compare(blockRoot, parentRoot) !== 0) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.PARENT_ROOT_MISMATCH, + slot: blocks[i].message.slot, + expected: prettyBytes(blockRoot), + actual: prettyBytes(parentRoot), + }, + `Block parent root does not match the previous block's root in BeaconBlocksByRange response` + ); + } } } - return blockRoots; + + return response; } /** * Should not be called directly. Only exported for unit testing purposes */ export async function validateBlobsByRangeResponse( - requestBlocks: SignedBeaconBlock[], - requestBlockRoots: Uint8Array[], + dataRequestBlocks: ValidatedBlock[], blobSidecars: deneb.BlobSidecars -): Promise { - const expectedBlobCount = requestBlocks.reduce( - (acc, block) => (block as SignedBeaconBlock).message.body.blobKzgCommitments.length + acc, +): Promise { + const expectedBlobCount = dataRequestBlocks.reduce( + (acc, {block}) => (block as SignedBeaconBlock).message.body.blobKzgCommitments.length + acc, 0 ); if (blobSidecars.length > expectedBlobCount) { @@ -524,10 +569,9 @@ export async function validateBlobsByRangeResponse( ); } - const validateSidecarsPromises: Promise[] = []; - for (let blockIndex = 0, blobSidecarIndex = 0; blockIndex < requestBlocks.length; blockIndex++) { - const block = requestBlocks[blockIndex]; - const blockRoot = requestBlockRoots[blockIndex]; + const validateSidecarsPromises: Promise[] = []; + for (let blockIndex = 0, blobSidecarIndex = 0; blockIndex < dataRequestBlocks.length; blockIndex++) { + const {block, blockRoot} = dataRequestBlocks[blockIndex]; const blockKzgCommitments = (block as SignedBeaconBlock).message.body.blobKzgCommitments; if (blockKzgCommitments.length === 0) { continue; @@ -537,12 +581,14 @@ export async function validateBlobsByRangeResponse( blobSidecarIndex += blockKzgCommitments.length; validateSidecarsPromises.push( - validateBlockBlobSidecars(block.message.slot, blockRoot, blockKzgCommitments.length, blockBlobSidecars) + validateBlockBlobSidecars(block.message.slot, blockRoot, blockKzgCommitments.length, blockBlobSidecars).then( + () => ({blockRoot, blobSidecars}) + ) ); } // Await all sidecar validations in parallel - await Promise.all(validateSidecarsPromises); + return await Promise.all(validateSidecarsPromises); } /** @@ -550,11 +596,10 @@ export async function validateBlobsByRangeResponse( */ export async function validateColumnsByRangeResponse( request: fulu.DataColumnSidecarsByRangeRequest, - requestBlocks: SignedBeaconBlock[], - requestBlockRoots: Uint8Array[], + dataRequestBlocks: ValidatedBlock[], columnSidecars: fulu.DataColumnSidecars -): Promise { - const expectedColumnCount = requestBlocks.reduce((acc, block) => { +): Promise { + const expectedColumnCount = dataRequestBlocks.reduce((acc, {block}) => { return (block as SignedBeaconBlock).message.body.blobKzgCommitments.length > 0 ? request.columns.length + acc : acc; @@ -580,10 +625,9 @@ export async function validateColumnsByRangeResponse( ); } - const validateSidecarsPromises: Promise[] = []; - for (let blockIndex = 0, columnSidecarIndex = 0; blockIndex < requestBlocks.length; blockIndex++) { - const block = requestBlocks[blockIndex]; - const blockRoot = requestBlockRoots[blockIndex]; + const validateSidecarsPromises: Promise[] = []; + for (let blockIndex = 0, columnSidecarIndex = 0; blockIndex < dataRequestBlocks.length; blockIndex++) { + const {block, blockRoot} = dataRequestBlocks[blockIndex]; const blockKzgCommitments = (block as SignedBeaconBlock).message.body.blobKzgCommitments; const expectedColumns = blockKzgCommitments.length ? request.columns.length : 0; @@ -618,54 +662,69 @@ export async function validateColumnsByRangeResponse( } validateSidecarsPromises.push( - validateBlockDataColumnSidecars(block.message.slot, blockRoot, blockKzgCommitments.length, blockColumnSidecars) + validateBlockDataColumnSidecars( + block.message.slot, + blockRoot, + blockKzgCommitments.length, + blockColumnSidecars + ).then(() => ({blockRoot, columnSidecars})) ); } // Await all sidecar validations in parallel - await Promise.all(validateSidecarsPromises); + return await Promise.all(validateSidecarsPromises); } /** - * Given a data request, return only the blocks and roots that correspond to the data request (sorted) + * Given a data request, return only the blocks and roots that correspond to the data request (sorted). Assumes that + * cached have slots that are all before the current batch of downloaded blocks */ -export function getDataRequestBlocks( +export function getBlocksForDataValidation( dataRequest: {startSlot: Slot; count: number}, cached: IBlockInput[] | undefined, - current: {blocks: SignedBeaconBlock[]; blockRoots: Uint8Array[]} | undefined -): {blocks: SignedBeaconBlock[]; blockRoots: Uint8Array[]} { + current: ValidatedBlock[] | undefined +): ValidatedBlock[] { const startSlot = dataRequest.startSlot; const endSlot = startSlot + dataRequest.count; // Organize cached blocks and current blocks, only including those in the requested slot range - const dataRequestBlocks: SignedBeaconBlock[] = []; - const dataRequestBlockRoots: Uint8Array[] = []; + const dataRequestBlocks: ValidatedBlock[] = []; let lastSlot = startSlot - 1; + if (cached) { for (let i = 0; i < cached.length; i++) { const blockInput = cached[i]; if (blockInput.slot >= startSlot && blockInput.slot < endSlot && blockInput.slot > lastSlot) { - dataRequestBlocks.push(blockInput.getBlock()); - dataRequestBlockRoots.push(fromHex(blockInput.blockRootHex)); + dataRequestBlocks.push({block: blockInput.getBlock(), blockRoot: fromHex(blockInput.blockRootHex)}); lastSlot = blockInput.slot; } } } + if (current) { - const {blocks, blockRoots} = current; - for (let i = 0; i < blocks.length; i++) { - const block = blocks[i]; + for (let i = 0; i < current.length; i++) { + const block = current[i].block; if (block.message.slot >= startSlot && block.message.slot < endSlot && block.message.slot > lastSlot) { - dataRequestBlocks.push(block); - dataRequestBlockRoots.push(blockRoots[i]); + dataRequestBlocks.push(current[i]); lastSlot = block.message.slot; } } } - return {blocks: dataRequestBlocks, blockRoots: dataRequestBlockRoots}; + + return dataRequestBlocks; +} + +function buildSlotRangeString({blocksRequest, blobsRequest, columnsRequest}: DownloadByRangeRequests): string { + const startSlot = blocksRequest?.startSlot ?? blobsRequest?.startSlot ?? columnsRequest?.startSlot; + const count = blocksRequest?.count ?? blobsRequest?.count ?? columnsRequest?.count; + if (startSlot && count) { + return `${startSlot} - ${startSlot + count}`; + } + return "[error calculating slotRange]"; } export enum DownloadByRangeErrorCode { + MISSING_BLOCKS = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOCKS", MISSING_BLOCKS_REQUEST = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOCKS_REQUEST", MISSING_BLOCKS_RESPONSE = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOCKS_RESPONSE", MISSING_BLOBS_REQUEST = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOBS_REQUEST", @@ -699,18 +758,27 @@ export enum DownloadByRangeErrorCode { } export type DownloadByRangeErrorType = + | { + code: DownloadByRootErrorCode.MISSING_BLOCK_RESPONSE; + expectedCount: number; + } | { code: - | DownloadByRangeErrorCode.MISSING_BLOCKS_REQUEST - | DownloadByRangeErrorCode.MISSING_BLOCKS_RESPONSE - | DownloadByRangeErrorCode.MISSING_BLOBS_REQUEST + | DownloadByRangeErrorCode.MISSING_BLOCKS | DownloadByRangeErrorCode.MISSING_BLOBS_RESPONSE - | DownloadByRangeErrorCode.MISSING_COLUMNS_REQUEST - | DownloadByRangeErrorCode.MISSING_COLUMNS_RESPONSE - | DownloadByRangeErrorCode.INVALID_DATA_REQUEST - | DownloadByRangeErrorCode.MISSING_DATA_REQUEST; + | DownloadByRangeErrorCode.MISSING_COLUMNS_RESPONSE; + // | DownloadByRangeErrorCode.MISSING_BLOCKS_REQUEST + // | DownloadByRangeErrorCode.MISSING_BLOBS_REQUEST + // | DownloadByRangeErrorCode.MISSING_COLUMNS_REQUEST + // | DownloadByRangeErrorCode.INVALID_DATA_REQUEST + // | DownloadByRangeErrorCode.MISSING_DATA_REQUEST; slotRange: string; } + | { + code: DownloadByRootErrorCode.MISSING_BLOCK_RESPONSE; + peer: string; + expectedCount: number; + } | { code: DownloadByRangeErrorCode.START_SLOT_MISMATCH; blockStartSlot: number; @@ -723,6 +791,7 @@ export type DownloadByRangeErrorType = } | { code: DownloadByRangeErrorCode.OUT_OF_RANGE_BLOCKS; + slot: number; } | { code: DownloadByRangeErrorCode.OUT_OF_ORDER_BLOCKS; @@ -739,7 +808,7 @@ export type DownloadByRangeErrorType = } | { code: DownloadByRangeErrorCode.PARENT_ROOT_MISMATCH; - parentSlot: number; + slot: number; expected: string; actual: string; } From 604f0cad21f41b3e6ecb7dda92aad1eea62ae4fc Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Wed, 3 Sep 2025 17:57:37 +0700 Subject: [PATCH 105/173] feat: add AI analysis and unit test cases for downloadByRange --- .../src/sync/utils/downloadByRange.ts | 66 +++ .../test/unit/sync/range/batch.test.ts | 106 +++++ .../unit/sync/utils/downloadByRange.test.ts | 378 ++++++++---------- 3 files changed, 345 insertions(+), 205 deletions(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index f2731e458cd1..a22497fa166a 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -17,6 +17,72 @@ import {PeerIdStr} from "../../util/peerId.js"; import {DownloadByRootErrorCode} from "./downloadByRoot.js"; import {RangeSyncType} from "./remoteSyncType.js"; +/** + * WRITE_OPTIMIZATIONS_AND_HEURISTIC_SUGGESTIONS_HERE + * + * Architecture Analysis & Optimization Suggestions: + * + * 1. **Reduce Code Duplication** + * - cacheByRangeResponses has nearly identical logic repeated 3 times (blocks, blobs, columns) + * - Extract common caching pattern into a generic helper function + * - Use strategy pattern or polymorphism to handle type-specific operations + * + * 2. **Improve Error Handling Consistency** + * - Wrong chain errors only break loops but don't propagate properly + * - Consider returning error status alongside partial results for better upstream handling + * - Standardize peer reporting logic across all error cases + * + * 3. **Optimize Data Structure Usage** + * - updatedBatchBlocks Map could be pre-sized based on expected slot range + * - Consider using a more efficient data structure for slot-based lookups + * - Avoid multiple iterations over same data in validation functions + * + * 4. **Simplify Validation Flow** + * - validateResponses has complex conditional logic that could be streamlined + * - Consider builder pattern for constructing ValidatedResponses + * - Separate concerns: structural validation vs cryptographic validation + * + * 5. **Performance Improvements** + * - Parallel validation in validateBlobsByRangeResponse/validateColumnsByRangeResponse is good + * - Consider batching validation operations to reduce Promise overhead + * - Pre-allocate arrays where sizes are known (e.g., expectedBlobCount) + * + * 6. **Type Safety Enhancements** + * - DAType checking happens after operations in cacheByRangeResponses + * - Move type checks earlier to fail fast + * - Use discriminated unions for better type narrowing + * + * 7. **Memory Efficiency** + * - Avoid creating intermediate arrays in validation (e.g., blockBlobSidecars slice) + * - Use iterators where possible instead of array slicing + * - Consider streaming validation for large responses + * + * 8. **API Design Improvements** + * - Too many similar type definitions (ValidatedBlock, ValidatedBlobSidecars, etc.) + * - Consider generic ValidatedData type + * - Reduce number of exported types by using namespaces or modules + * + * 9. **Logging and Observability** + * - Add structured logging with correlation IDs for request tracking + * - Include metrics for validation performance + * - Log partial success scenarios more clearly + * + * 10. **Simplify getBlocksForDataValidation** + * - Complex slot filtering logic could be extracted + * - Consider using Set for duplicate detection instead of lastSlot tracking + * - Validate assumption that cached blocks come before current blocks + * + * 11. **Request Coordination** + * - requestByRange uses mutable variables with Promise.all - consider Promise.allSettled + * - Add timeout handling for network requests + * - Consider request prioritization based on sync type + * + * 12. **Validation Optimization** + * - validateBlockByRangeResponse computes blockRoot for all blocks even on failure + * - Consider lazy evaluation or early exit strategies + * - Cache fork type lookup instead of calling config.getForkTypes repeatedly + */ + /** * * blocks diff --git a/packages/beacon-node/test/unit/sync/range/batch.test.ts b/packages/beacon-node/test/unit/sync/range/batch.test.ts index ddfa756404b2..3b989f5108e6 100644 --- a/packages/beacon-node/test/unit/sync/range/batch.test.ts +++ b/packages/beacon-node/test/unit/sync/range/batch.test.ts @@ -11,6 +11,112 @@ import {config} from "../../../utils/blocksAndData.js"; import {expectThrowsLodestarError} from "../../../utils/errors.js"; import {validPeerIdStr} from "../../../utils/peer.js"; +/** + * Should not be called directly. Only exported for unit testing purposes + */ +// export function validateRequests({ +// config, +// daOutOfRange, +// blocksRequest, +// blobsRequest, +// columnsRequest, +// }: DownloadByRangeRequests & Pick): string { +// const startSlot = (blocksRequest?.startSlot ?? blobsRequest?.startSlot ?? columnsRequest?.startSlot) as number; +// const count = (blocksRequest?.count ?? blobsRequest?.count ?? columnsRequest?.count) as number; +// const slotRange = `${startSlot} - ${startSlot + count}`; +// const dataRequest = blobsRequest ?? columnsRequest; + +// if (!blocksRequest) { +// throw new DownloadByRangeError({ +// code: DownloadByRangeErrorCode.MISSING_BLOCKS_REQUEST, +// slotRange, +// }); +// } + +// if (daOutOfRange) { +// if (dataRequest) { +// throw new DownloadByRangeError( +// { +// code: DownloadByRangeErrorCode.INVALID_DATA_REQUEST, +// slotRange, +// }, +// "Cannot request data if it is outside of the availability range" +// ); +// } + +// return slotRange; +// } + +// if (!dataRequest) { +// throw new DownloadByRangeError( +// { +// code: DownloadByRangeErrorCode.MISSING_DATA_REQUEST, +// slotRange, +// }, +// "Must request data if it is available" +// ); +// } + +// if (blobsRequest && columnsRequest) { +// throw new DownloadByRangeError( +// { +// code: DownloadByRangeErrorCode.INVALID_DATA_REQUEST, +// slotRange, +// }, +// "Cannot request both blob and column data in the same slot range" +// ); +// } + +// const forkName = config.getForkName(startSlot); +// if (!isForkPostDeneb(forkName)) { +// throw new DownloadByRangeError( +// { +// code: DownloadByRangeErrorCode.INVALID_DATA_REQUEST, +// slotRange, +// }, +// "Cannot request data pre-deneb" +// ); +// } + +// if (isForkPostDeneb(forkName) && !isForkPostFulu(forkName) && !blobsRequest) { +// throw new DownloadByRangeError( +// { +// code: DownloadByRangeErrorCode.MISSING_BLOBS_REQUEST, +// slotRange, +// }, +// "Must request blobs for blob-only forks" +// ); +// } + +// if (isForkPostFulu(forkName) && !columnsRequest) { +// throw new DownloadByRangeError( +// { +// code: DownloadByRangeErrorCode.MISSING_COLUMNS_REQUEST, +// slotRange, +// }, +// "Must request columns for forks with columns" +// ); +// } + +// if (blocksRequest.startSlot !== dataRequest.startSlot) { +// throw new DownloadByRangeError({ +// code: DownloadByRangeErrorCode.START_SLOT_MISMATCH, +// blockStartSlot: blocksRequest.startSlot, +// dataStartSlot: dataRequest.startSlot, +// }); +// } + +// if (blocksRequest.count !== dataRequest.count) { +// throw new DownloadByRangeError({ +// code: DownloadByRangeErrorCode.COUNT_MISMATCH, +// blockCount: blocksRequest.count, +// dataCount: dataRequest.count, +// }); +// } + +// return slotRange; +// } + describe("sync / range / batch", async () => { // Common mock data const privateKey = await generateKeyPair("secp256k1"); diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts index d01ad5874b33..0771f44f81db 100644 --- a/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts @@ -1,5 +1,5 @@ import {ForkName} from "@lodestar/params"; -import {SignedBeaconBlock, WithBytes, deneb, ssz} from "@lodestar/types"; +import {SignedBeaconBlock, WithBytes, deneb, fulu, ssz} from "@lodestar/types"; import {Mock, beforeAll, beforeEach, describe, expect, it, vi} from "vitest"; import {INetwork} from "../../../../src/network/index.js"; import { @@ -12,6 +12,26 @@ import { } from "../../../../src/sync/utils/downloadByRange.js"; import {config, generateChainOfBlockMaybeSidecars, slots} from "../../../utils/blocksAndData.js"; +/** + * Logic errors and gaps identified during test case creation: + * + * INSERT_LOGIC_ERROR_BULLET_POINTS_HERE + * + * - validateBlockByRangeResponse: Commented out zero blocks check breaks during chain liveness issues (line 445-453) + * - validateBlobsByRangeResponse: Missing validation that blob sidecars are in consecutive (slot, index) order as per spec + * - validateColumnsByRangeResponse: Missing validation that column sidecars are in consecutive (slot, index) order + * - cacheByRangeResponses: Error handling for wrong chain only breaks loop but doesn't throw/propagate error properly + * - getBlocksForDataValidation: No validation that cached blocks are actually before current blocks as assumed in comments + * - validateResponses: Missing validation that blocks and data requests have matching/compatible slot ranges + * - downloadByRange: Original error details are lost when catching and re-throwing REQ_RESP_ERROR + * - validateBlobsByRangeResponse: Doesn't validate blob indices are sequential (0, 1, 2...) within each block + * - validateColumnsByRangeResponse: Logic assumes all requested columns present but doesn't validate properly + * - cacheByRangeResponses: Type checking for DAType mismatch happens after attempting operations + * - validateBlockByRangeResponse: Parent root validation only checks consecutive blocks, missing skip slot handling + * - requestByRange: No timeout handling for concurrent network requests + * - validateResponses: batchBlocks parameter can be undefined but not properly handled in all cases + */ + describe("downloadByRange", () => { const peerIdStr = "0x1234567890abcdef"; // let cache: SeenBlockInputCache; @@ -24,227 +44,175 @@ describe("downloadByRange", () => { let networkResponse!: { blocks: WithBytes[]; blobSidecars: deneb.BlobSidecars; + columnSidecars: fulu.DataColumnSidecars; }; let expected!: DownloadByRangeResponses; + beforeAll(() => { - // expectedBlobCount = count * minBlobs; - requests = { - blocksRequest: {startSlot, count, step: 1}, - blobsRequest: {count, startSlot}, - }; - const blockAndBlobs = generateChainOfBlockMaybeSidecars(ForkName.deneb, count); - const blobSidecars = blockAndBlobs.flatMap(({blobSidecars}) => blobSidecars); - networkResponse = { - blocks: blockAndBlobs.map(({block}) => ({bytes: new Uint8Array(), data: block})), - blobSidecars, - }; - expected = { - blocks: blockAndBlobs.map(({block}) => block), - blobSidecars, - }; + // Test setup code here }); - beforeEach(() => { - // const abortController = new AbortController(); - // const signal = abortController.signal; - // cache = new SeenBlockInputCache({ - // config, - // custodyConfig, - // clock: new Clock({config, signal, genesisTime: Math.floor(Date.now() / 1000)}), - // chainEvents: new ChainEventEmitter(), - // signal, - // metrics: null, - // logger, - // }); - network = { - sendBeaconBlocksByRange: vi.fn(), - sendBlobSidecarsByRange: vi.fn(), - // sendDataColumnSidecarsByRange: vi.fn(), - } as unknown as INetwork; + describe("cacheByRangeResponses", () => { + it("should cache blocks only when no data sidecars present"); + it("should cache blocks with blob sidecars"); + it("should cache blocks with column sidecars"); + it("should add blocks to existing batch blocks"); + it("should add blob sidecars to existing batch blocks"); + it("should add column sidecars to existing batch blocks"); + it("should create new block input when block doesn't exist in batch"); + it("should create new block input from blob sidecars when block doesn't exist"); + it("should create new block input from column sidecars when block doesn't exist"); + it("should throw error when block input type mismatches for blobs"); + it("should throw error when block input type mismatches for columns"); + it("should handle wrong chain error for blocks in finalized sync"); + it("should handle wrong chain error for blobs in finalized sync"); + it("should handle wrong chain error for columns in finalized sync"); + it("should not report peer for wrong chain in non-finalized sync"); + it("should maintain slot ordering in returned block inputs"); + it("should handle empty responses gracefully"); + it("should handle duplicate blocks with throwOnDuplicateAdd false"); + it("should handle duplicate blobs with throwOnDuplicateAdd false"); + it("should handle duplicate columns with throwOnDuplicateAdd false"); }); - // describe("downloadAndCacheByRange", () => {}); - // describe("downloadByRange", () => {}); - describe("requestByRange", () => { - it("should make block requests", async () => { - (network.sendBeaconBlocksByRange as Mock).mockResolvedValueOnce(networkResponse.blocks); - const response = await requestByRange({ - network, - peerIdStr, - blocksRequest: requests.blocksRequest, - }); - expect(network.sendBeaconBlocksByRange).toHaveBeenCalledWith(peerIdStr, requests.blocksRequest); - expect(response.blocks).toEqual(expected.blocks); - }); - - it("should make blob requests", async () => { - (network.sendBlobSidecarsByRange as Mock).mockResolvedValueOnce(networkResponse.blobSidecars); - const response = await requestByRange({ - network, - peerIdStr, - blobsRequest: requests.blobsRequest, - }); - expect(network.sendBlobSidecarsByRange).toHaveBeenCalledWith(peerIdStr, requests.blobsRequest); - expect(response.blobSidecars).toEqual(expected.blobSidecars); - }); - - // it("should make column requests", async () => { - // const response = await requestByRange({ - // network, - // peerIdStr, - // columnsRequest: requests.columnsRequest, - // }); - // expect(network.sendColumnSidecarsByRange).toHaveBeenCalledWith(peerIdStr, requests.columnsRequest); - // expect(response.columnSidecars).toBe(expected.columnSidecars); - // }); - - it("should make concurrent block/blob/column requests from the same peer", async () => { - (network.sendBeaconBlocksByRange as Mock).mockResolvedValueOnce(networkResponse.blocks); - (network.sendBlobSidecarsByRange as Mock).mockResolvedValueOnce(networkResponse.blobSidecars); - const response = await requestByRange({ - network, - peerIdStr, - blocksRequest: requests.blocksRequest, - blobsRequest: requests.blobsRequest, - // columnsRequest: requests.columnsRequest, - }); - expect(network.sendBeaconBlocksByRange).toHaveBeenCalledWith(peerIdStr, requests.blocksRequest); - expect(network.sendBlobSidecarsByRange).toHaveBeenCalledWith(peerIdStr, requests.blobsRequest); - // expect(network.sendColumnSidecarsByRange).toHaveBeenCalledWith(peerIdStr, requests.columnsRequest); - expect(response.blocks).toEqual(expected.blocks); - expect(response.blobSidecars).toEqual(expected.blobSidecars); - // expect(response.columnSidecars).toBe(expected.columnSidecars); - }); - - it("should throw if one of the calls fails", async () => { - (network.sendBeaconBlocksByRange as Mock).mockResolvedValueOnce(networkResponse.blocks); - const rejectionError = new Error("TEST_ERROR_MESSAGE"); - (network.sendBlobSidecarsByRange as Mock).mockRejectedValueOnce(rejectionError); - try { - await requestByRange({ - network, - peerIdStr, - blocksRequest: requests.blocksRequest, - blobsRequest: requests.blobsRequest, - // columnsRequest: requests.columnsRequest, - }); - expect.fail("Did not fail as expected"); - } catch (e) { - expect(e).toBe(rejectionError); - } finally { - expect(network.sendBeaconBlocksByRange).toHaveBeenCalledWith(peerIdStr, requests.blocksRequest); - expect(network.sendBlobSidecarsByRange).toHaveBeenCalledWith(peerIdStr, requests.blobsRequest); - } - }); + describe("downloadByRange", () => { + it("should download and validate blocks only"); + it("should download and validate blocks with blobs"); + it("should download and validate blocks with columns"); + it("should download blocks, blobs and columns concurrently"); + it("should use cached batch blocks for data validation when no blocks request"); + it("should throw REQ_RESP_ERROR when network request fails"); + it("should handle empty responses from network"); + it("should validate responses before returning"); + it("should pass through validation errors"); + it("should log verbose error before throwing"); }); - describe("validateBlockByRangeRequest", () => { - const block1 = ssz.capella.SignedBeaconBlock.defaultValue(); - block1.message.slot = slots.capella; - const block2 = ssz.capella.SignedBeaconBlock.defaultValue(); - block2.message.slot = slots.capella + 1; - block2.message.parentRoot = config.getForkTypes(block1.message.slot).BeaconBlock.hashTreeRoot(block1.message); - const block3 = ssz.capella.SignedBeaconBlock.defaultValue(); - block3.message.slot = slots.capella + 2; - block3.message.parentRoot = config.getForkTypes(block2.message.slot).BeaconBlock.hashTreeRoot(block2.message); - const block4 = ssz.capella.SignedBeaconBlock.defaultValue(); - block4.message.slot = slots.capella + 3; - block4.message.parentRoot = config.getForkTypes(block3.message.slot).BeaconBlock.hashTreeRoot(block3.message); - const block5 = ssz.capella.SignedBeaconBlock.defaultValue(); - block5.message.slot = slots.capella + 4; - block5.message.parentRoot = config.getForkTypes(block4.message.slot).BeaconBlock.hashTreeRoot(block4.message); - - it("should correctly match request with response", () => { - const blockRoots = validateBlockByRangeResponse( - config, - { - startSlot: slots.capella, - count: 5, - step: 1, - }, - [block1, block2, block3, block4, block5] - ); - expect(blockRoots).toBeInstanceOf(Array); - expect(blockRoots.length).toEqual(5); - }); - it("should throw if there are duplicates within the given range", () => { - expect(() => - validateBlockByRangeResponse( - config, - { - startSlot: slots.capella, - count: 4, - step: 1, - }, - [block1, block2, block3, block4, block4] - ) - ).toThrow(DownloadByRangeError); - }); - - it("should throw if more blocks than were requested", () => { - expect(() => - validateBlockByRangeResponse( - config, - { - startSlot: slots.capella, - count: 4, - step: 1, - }, - [block1, block2, block3, block4, block5] - ) - ).toThrow(DownloadByRangeError); - }); + describe("requestByRange", () => { + it("should make block requests"); + it("should make blob requests"); + it("should make column requests"); + it("should make concurrent block/blob/column requests from the same peer"); + it("should handle undefined responses properly"); + it("should throw if one of the concurrent requests fails"); + it("should not make requests for undefined request parameters"); + it("should return empty object when no requests provided"); + it("should handle network timeout errors"); + it("should preserve response order for concurrent requests"); + }); - it("should throw if blocks are returned out of order", () => { - expect(() => - validateBlockByRangeResponse( - config, - { - startSlot: slots.capella, - count: 5, - step: 1, - }, - [block1, block3, block2, block4, block5] - ) - ).toThrow(DownloadByRangeError); - }); + describe("validateResponses", () => { + it("should validate blocks when blocksRequest provided"); + it("should validate blobs when blobsRequest provided with blocks"); + it("should validate columns when columnsRequest provided with blocks"); + it("should use batchBlocks for data validation when no blocksRequest"); + it("should throw MISSING_BLOCKS when data request but no blocks available"); + it("should throw MISSING_BLOBS_RESPONSE when blobsRequest but no blobSidecars"); + it("should throw MISSING_COLUMNS_RESPONSE when columnsRequest but no columnSidecars"); + it("should return empty responses when no requests provided"); + it("should validate blocks before validating data sidecars"); + it("should use validated blocks for data validation when both downloaded"); + it("should handle mixed cached and downloaded blocks for validation"); + it("should validate slot ranges match between blocks and data requests"); }); - describe("compareBlobsByRangeRequestAndResponse", () => { - const expectedBlocks = expected.blocks as SignedBeaconBlock[]; - const expectedBlobSidecars = expected.blobSidecars as deneb.BlobSidecars; - it("should not throw when all blobs are present in response", () => { - expect(() => validateBlobsByRangeResponse(expectedBlocks, expectedBlobSidecars)).not.toThrow(); - }); + describe("validateBlockByRangeResponse", () => { + it("should accept valid chain of blocks"); + it("should accept empty response during chain liveness issues"); + it("should throw EXTRA_BLOCKS when more blocks than requested count"); + it("should throw OUT_OF_RANGE_BLOCKS when block slot before startSlot"); + it("should throw OUT_OF_RANGE_BLOCKS when block slot after lastValidSlot"); + it("should throw OUT_OF_ORDER_BLOCKS when blocks not in ascending slot order"); + it("should allow skip slots in block chain"); + it("should validate parent root matches previous block root"); + it("should throw PARENT_ROOT_MISMATCH when chain broken"); + it("should handle single block response"); + it("should handle maximum count blocks"); + it("should compute block roots correctly for each fork"); + it("should validate blocks at fork boundaries"); + it("should handle blocks with same slot (reorgs)"); + }); - it("should throw when blobs are missing from response", () => { - expect(() => validateBlobsByRangeResponse(expectedBlocks, expectedBlobSidecars.slice(0, -4))).toThrow( - DownloadByRangeError - ); - }); + describe("validateBlobsByRangeResponse", () => { + it("should accept valid blob sidecars matching blocks"); + it("should throw EXTRA_BLOBS when more blobs than expected"); + it("should throw MISSING_BLOBS when fewer blobs than expected"); + it("should validate blob count matches block kzg commitments"); + it("should skip blocks with zero kzg commitments"); + it("should validate blobs in consecutive (slot, index) order"); + it("should validate blob indices are sequential within block"); + it("should validate all blobs for a block are included"); + it("should call validateBlockBlobSidecars for each block with blobs"); + it("should handle blocks with different blob counts"); + it("should validate blobs across multiple blocks"); + it("should return validated blob sidecars grouped by block"); + it("should handle maximum blob count per block"); + it("should validate blob sidecars in parallel"); + it("should propagate validation errors from validateBlockBlobSidecars"); + }); - it("should throw when extra blobs are in response", () => { - expect(() => - validateBlobsByRangeResponse(expectedBlocks.slice(0, 1), expectedBlobSidecars.concat(expectedBlobSidecars)) - ).toThrow(DownloadByRangeError); - }); + describe("validateColumnsByRangeResponse", () => { + it("should accept valid column sidecars matching blocks"); + it("should throw EXTRA_COLUMNS when more columns than expected"); + it("should throw MISSING_COLUMNS when fewer columns than expected"); + it("should validate column count matches requested columns times blocks with commitments"); + it("should skip blocks with zero kzg commitments"); + it("should validate columns in consecutive (slot, index) order"); + it("should validate all requested column indices present for each block"); + it("should validate column indices match requested columns array"); + it("should validate columns are in order within each block"); + it("should throw MISSING_COLUMNS when columns not in correct order"); + it("should call validateBlockDataColumnSidecars for each block with columns"); + it("should handle blocks with different commitment counts"); + it("should validate columns across multiple blocks"); + it("should return validated column sidecars grouped by block"); + it("should handle partial column requests (subset of indices)"); + it("should validate column sidecars in parallel"); + it("should propagate validation errors from validateBlockDataColumnSidecars"); + }); - it("should throw when blobs are not in order", () => { - const blobSidecars = expectedBlobSidecars.slice().reverse(); - expect(() => validateBlobsByRangeResponse(expectedBlocks, blobSidecars)).toThrow(DownloadByRangeError); - }); + describe("getBlocksForDataValidation", () => { + it("should return blocks within requested slot range"); + it("should filter out blocks before startSlot"); + it("should filter out blocks at or after endSlot"); + it("should combine cached and current blocks in order"); + it("should maintain ascending slot order"); + it("should skip duplicate slots keeping first occurrence"); + it("should handle undefined cached blocks"); + it("should handle undefined current blocks"); + it("should handle both cached and current undefined"); + it("should return empty array when no blocks in range"); + it("should convert cached IBlockInput to ValidatedBlock format"); + it("should preserve block roots from cached blocks"); + it("should handle overlapping slot ranges between cached and current"); + it("should validate cached blocks are before current blocks"); + it("should handle gaps in slot sequence"); }); - describe("validateResponse", () => { - it("should throw if there are no blocks to validate", () => {}); - it("should throw for responses missing block from requested slots", () => {}); - it("should throw for extra block from slots that were not requested", () => {}); - it("should throw for duplicate blocks from requested slots", () => {}); - it("should throw if there are no blobs but there was a blobsRequest", () => {}); - it("should throw for missing blobs in slots that were requested", () => {}); + describe("Error handling", () => { + it("should build correct slot range string for blocks request"); + it("should build correct slot range string for blobs request"); + it("should build correct slot range string for columns request"); + it("should handle missing request parameters in slot range string"); + it("should create DownloadByRangeError with correct error codes"); + it("should preserve error context in DownloadByRangeError"); + it("should handle network errors appropriately"); + it("should handle validation errors appropriately"); + it("should handle cache errors appropriately"); + }); - it("should throw if there are no columns but there was a columnsRequest", () => {}); + describe("Integration scenarios", () => { + it("should handle full download and cache flow for blocks only"); + it("should handle full download and cache flow for blocks with blobs"); + it("should handle full download and cache flow for blocks with columns"); + it("should handle partial responses within valid range"); + it("should handle peer disconnection during download"); + it("should handle fork transition during range download"); + it("should handle reorg detection via parent root mismatch"); + it("should handle maximum request size limits"); + it("should handle minimum request size (count=1)"); + it("should handle skip slots in epoch boundaries"); + it("should handle genesis slot edge cases"); + it("should handle far future slot requests"); }); - // describe("compareColumnsByRangeRequestAndResponse", () => {}); - // describe("compareByRangeRequestsToResponse", () => {}); }); From de11b44dfee752b4315c70d49e338ed167c24e7b Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Wed, 3 Sep 2025 21:37:32 +0700 Subject: [PATCH 106/173] fix: sort ascending --- packages/beacon-node/src/sync/range/batch.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/beacon-node/src/sync/range/batch.ts b/packages/beacon-node/src/sync/range/batch.ts index 03ced70290c0..440f795a86a9 100644 --- a/packages/beacon-node/src/sync/range/batch.ts +++ b/packages/beacon-node/src/sync/range/batch.ts @@ -248,7 +248,7 @@ export class Batch { } // ensure that blocks are always sorted before getting stored on the batch.state or being used to getRequests - blocks.sort((a, b) => b.slot - a.slot); + blocks.sort((a, b) => a.slot - b.slot); this.goodPeers.push(peer); From 4d2ad9b5955e4955080895fad22f2df1faa96eca Mon Sep 17 00:00:00 2001 From: Cayman Date: Wed, 3 Sep 2025 11:06:54 -0400 Subject: [PATCH 107/173] chore: downscore peers appropriately --- packages/beacon-node/src/sync/range/chain.ts | 38 ++- packages/beacon-node/src/sync/range/range.ts | 11 +- .../src/sync/utils/downloadByRange.ts | 257 +++++------------- 3 files changed, 111 insertions(+), 195 deletions(-) diff --git a/packages/beacon-node/src/sync/range/chain.ts b/packages/beacon-node/src/sync/range/chain.ts index 13a88229c8a5..22d65e1c4596 100644 --- a/packages/beacon-node/src/sync/range/chain.ts +++ b/packages/beacon-node/src/sync/range/chain.ts @@ -1,8 +1,11 @@ import {ChainForkConfig} from "@lodestar/config"; import {Epoch, Root, Slot} from "@lodestar/types"; -import {ErrorAborted, Logger, toRootHex} from "@lodestar/utils"; +import {ErrorAborted, LodestarError, Logger, toRootHex} from "@lodestar/utils"; import {isBlockInputBlobs, isBlockInputColumns} from "../../chain/blocks/blockInput/blockInput.js"; +import {BlockInputErrorCode} from "../../chain/blocks/blockInput/errors.js"; import {IBlockInput} from "../../chain/blocks/blockInput/types.js"; +import {BlobSidecarErrorCode} from "../../chain/errors/blobSidecarError.js"; +import {DataColumnSidecarErrorCode} from "../../chain/errors/dataColumnSidecarError.js"; import {Metrics} from "../../metrics/metrics.js"; import {PeerAction, prettyPrintPeerIdStr} from "../../network/index.js"; import {PeerSyncMeta} from "../../network/peers/peersData.js"; @@ -11,6 +14,7 @@ import {ItTrigger} from "../../util/itTrigger.js"; import {PeerIdStr} from "../../util/peerId.js"; import {wrapError} from "../../util/wrapError.js"; import {BATCH_BUFFER_SIZE, EPOCHS_PER_BATCH, MAX_LOOK_AHEAD_EPOCHS} from "../constants.js"; +import {DownloadByRangeErrorCode} from "../utils/downloadByRange.js"; import {RangeSyncType} from "../utils/remoteSyncType.js"; import {Batch, BatchError, BatchErrorCode, BatchMetadata, BatchStatus} from "./batch.js"; import { @@ -447,6 +451,38 @@ export class SyncChain { const res = await wrapError(this.downloadByRange(peer, batch, this.syncType)); if (res.err) { + // There's several known error cases where we want to take action on the peer + const errCode = (res.err as LodestarError<{code: string}>).type?.code; + if (this.syncType === RangeSyncType.Finalized) { + // For finalized sync, we are stricter with peers as there is no ambiguity about which chain we're syncing. + // The below cases indicate the peer may be on a different chain, so are not penalized during head sync. + switch (errCode) { + case BlockInputErrorCode.MISMATCHED_ROOT_HEX: + case DownloadByRangeErrorCode.MISSING_BLOBS: + case DownloadByRangeErrorCode.EXTRA_BLOBS: + case DownloadByRangeErrorCode.MISSING_COLUMNS: + case DownloadByRangeErrorCode.EXTRA_COLUMNS: + case BlobSidecarErrorCode.INCORRECT_SIDECAR_COUNT: + case BlobSidecarErrorCode.INCORRECT_BLOCK: + case DataColumnSidecarErrorCode.INCORRECT_SIDECAR_COUNT: + case DataColumnSidecarErrorCode.INCORRECT_BLOCK: + this.reportPeer(peer.peerId, PeerAction.LowToleranceError, res.err.message); + } + } + switch (errCode) { + case DownloadByRangeErrorCode.EXTRA_BLOCKS: + case DownloadByRangeErrorCode.OUT_OF_ORDER_BLOCKS: + case DownloadByRangeErrorCode.OUT_OF_RANGE_BLOCKS: + case DownloadByRangeErrorCode.PARENT_ROOT_MISMATCH: + case BlobSidecarErrorCode.INCORRECT_INDEX: + case BlobSidecarErrorCode.INCLUSION_PROOF_INVALID: + case BlobSidecarErrorCode.INVALID_KZG_PROOF_BATCH: + case DataColumnSidecarErrorCode.INCORRECT_KZG_COMMITMENTS_COUNT: + case DataColumnSidecarErrorCode.INCORRECT_KZG_PROOF_COUNT: + case DataColumnSidecarErrorCode.INVALID_KZG_PROOF_BATCH: + case DataColumnSidecarErrorCode.INCLUSION_PROOF_INVALID: + this.reportPeer(peer.peerId, PeerAction.LowToleranceError, res.err.message); + } this.logger.verbose( "Batch download error", {id: this.logId, ...batch.getMetadata(), peer: prettyPrintPeerIdStr(peer.peerId)}, diff --git a/packages/beacon-node/src/sync/range/range.ts b/packages/beacon-node/src/sync/range/range.ts index 9ff2459ba1a6..e1ce84247b7a 100644 --- a/packages/beacon-node/src/sync/range/range.ts +++ b/packages/beacon-node/src/sync/range/range.ts @@ -200,24 +200,21 @@ export class RangeSync extends (EventEmitter as {new (): RangeSyncEmitter}) { } }; - private downloadByRange: SyncChainFns["downloadByRange"] = async (peer, batch, syncType) => { + private downloadByRange: SyncChainFns["downloadByRange"] = async (peer, batch) => { + const batchBlocks = batch.getBlocks(); const responses = await downloadByRange({ config: this.config, network: this.network, logger: this.logger, peerIdStr: peer.peerId, - daOutOfRange: isDaOutOfRange(this.config, batch.forkName, batch.startSlot, this.chain.clock.currentEpoch), - batchBlocks: batch.getBlocks(), + batchBlocks, ...batch.requests, }); const cached = cacheByRangeResponses({ - config: this.config, - network: this.network, cache: this.chain.seenBlockInputCache, - syncType, peerIdStr: peer.peerId, responses, - batchBlocks: batch.getBlocks(), + batchBlocks, }); return cached; }; diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index a22497fa166a..1f6213d675cf 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -12,10 +12,9 @@ import { import {SeenBlockInput} from "../../chain/seenCache/seenGossipBlockInput.js"; import {validateBlockBlobSidecars} from "../../chain/validation/blobSidecar.js"; import {validateBlockDataColumnSidecars} from "../../chain/validation/dataColumnSidecar.js"; -import {INetwork, PeerAction} from "../../network/index.js"; +import {INetwork} from "../../network/index.js"; import {PeerIdStr} from "../../util/peerId.js"; import {DownloadByRootErrorCode} from "./downloadByRoot.js"; -import {RangeSyncType} from "./remoteSyncType.js"; /** * WRITE_OPTIMIZATIONS_AND_HEURISTIC_SUGGESTIONS_HERE @@ -83,43 +82,6 @@ import {RangeSyncType} from "./remoteSyncType.js"; * - Cache fork type lookup instead of calling config.getForkTypes repeatedly */ -/** - * - * blocks - - * - * blobs - * - check that expected sidecar count matches the returned count - * - slots are in ascending order - * - allows for skip slots in validation - * - indices are in ascending order - * - check that the number of blobCount for a slot matches block.message.body.blobKzgCommitments.length - * - check that blobSidecar.kzgCommitment matches block.message.body.blobKzgCommitments[blobSidecar.index] - * - hashTreeRoot(block.message) equals the hashTreeRoot(blobSidecar.signedBlockHeader.message) - * - verify_blob_sidecar, verify_kzg_inclusion_proof, verify_kzg_proof (spec verification) - * - * - * Clients MUST respond with at least the blob sidecars of the first blob-carrying block that exists - * in the range, if they have it, and no more than MAX_REQUEST_BLOB_SIDECARS sidecars. - * - * Clients MUST include all blob sidecars of each block from which they include blob sidecars. - * - * The following blob sidecars, where they exist, MUST be sent in consecutive (slot, index) order. - * - * - * - * - * - * - * - * columns - * - check that expected sidecar count matches the returned count (discount slots with 0 blobKzgCommitment.length) - * - slots are in ascending order - * - indices are in ascending order - * - check that blobCount = 0 in a slot (come back to this) - * - verify_blob_sidecar, verify_kzg_inclusion_proof, verify_kzg_proof - */ - export type DownloadByRangeRequests = { blocksRequest?: phase0.BeaconBlocksByRangeRequest; blobsRequest?: deneb.BlobSidecarsByRangeRequest; @@ -138,7 +100,6 @@ export type DownloadAndCacheByRangeProps = DownloadByRangeRequests & { network: INetwork; logger: Logger; peerIdStr: string; - daOutOfRange: boolean; batchBlocks?: IBlockInput[]; }; @@ -150,11 +111,8 @@ export type DownloadAndCacheByRangeResults = { }; export type CacheByRangeResponsesProps = { - config: ChainForkConfig; - network: INetwork; cache: SeenBlockInput; - syncType: RangeSyncType; - peerIdStr: PeerIdStr; + peerIdStr: string; responses: ValidatedResponses; batchBlocks: IBlockInput[]; }; @@ -180,10 +138,11 @@ export type ValidatedResponses = { validatedColumnSidecars?: ValidatedColumnSidecars[]; }; +/** + * Given existing cached batch block inputs and newly validated responses, update the cache with the new data + */ export function cacheByRangeResponses({ - network, cache, - syncType, peerIdStr, responses, batchBlocks, @@ -199,25 +158,18 @@ export function cacheByRangeResponses({ const existing = updatedBatchBlocks.get(block.message.slot); if (existing) { - try { - // will throw if root hex does not match (meaning we are following the wrong chain) - existing.addBlock( - { - block, - blockRootHex, - source, - peerIdStr, - seenTimestampSec, - }, - {throwOnDuplicateAdd: false} - ); - } catch (err) { - network.logger.debug("Following wrong chain for ByRange request", {}, err as Error); - if (syncType === RangeSyncType.Finalized) { - network.reportPeer(peerIdStr, PeerAction.LowToleranceError, "Missing or mismatching dataColumnSidecars"); - } - break; - } + // In practice this code block shouldn't be reached because we shouldn't be refetching a block we already have, see Batch#getRequests. + // Will throw if root hex does not match (meaning we are following the wrong chain) + existing.addBlock( + { + block, + blockRootHex, + source, + peerIdStr, + seenTimestampSec, + }, + {throwOnDuplicateAdd: false} + ); } else { const blockInput = cache.getByBlock({ block, @@ -233,49 +185,32 @@ export function cacheByRangeResponses({ for (const {blockRoot, blobSidecars} of responses.validatedBlobSidecars ?? []) { const existing = updatedBatchBlocks.get(blobSidecars[0].signedBlockHeader.message.slot); const blockRootHex = toRootHex(blockRoot); - if (existing) { - if (!isBlockInputBlobs(existing)) { - throw new DownloadByRangeError({ - code: DownloadByRangeErrorCode.MISMATCH_BLOCK_INPUT_TYPE, - cachedType: existing.type, - expectedType: DAType.Blobs, - slot: existing.slot, - blockRoot: prettyBytes(existing.blockRootHex), - }); - } - try { - for (const blobSidecar of blobSidecars) { - // will throw if root hex does not match (meaning we are following the wrong chain) - existing.addBlob( - { - blobSidecar, - blockRootHex, - seenTimestampSec, - peerIdStr, - source, - }, - {throwOnDuplicateAdd: false} - ); - } - } catch (err) { - network.logger.debug("Following wrong chain for ByRange request", {}, err as Error); - if (syncType === RangeSyncType.Finalized) { - network.reportPeer(peerIdStr, PeerAction.LowToleranceError, "Missing or mismatching dataColumnSidecars"); - } - break; - } - } else { - let blockInput!: IBlockInput; - for (const blobSidecar of blobSidecars) { - blockInput = cache.getByBlob({ - blockRootHex, + + if (!existing) { + throw new Error("Coding error: blockInput must exist when adding blobs"); + } + + if (!isBlockInputBlobs(existing)) { + throw new DownloadByRangeError({ + code: DownloadByRangeErrorCode.MISMATCH_BLOCK_INPUT_TYPE, + slot: existing.slot, + blockRoot: prettyBytes(existing.blockRootHex), + expected: DAType.Blobs, + actual: existing.type, + }); + } + for (const blobSidecar of blobSidecars) { + // will throw if root hex does not match (meaning we are following the wrong chain) + existing.addBlob( + { blobSidecar, - source, - peerIdStr, + blockRootHex, seenTimestampSec, - }); - } - updatedBatchBlocks.set(blockInput.slot, blockInput); + peerIdStr, + source, + }, + {throwOnDuplicateAdd: false} + ); } } @@ -283,49 +218,31 @@ export function cacheByRangeResponses({ const existing = updatedBatchBlocks.get(columnSidecars[0].signedBlockHeader.message.slot); const blockRootHex = toRootHex(blockRoot); - if (existing) { - if (!isBlockInputColumns(existing)) { - throw new DownloadByRangeError({ - code: DownloadByRangeErrorCode.MISMATCH_BLOCK_INPUT_TYPE, - cachedType: existing.type, - expectedType: DAType.Columns, - slot: existing.slot, - blockRoot: prettyBytes(existing.blockRootHex), - }); - } - try { - for (const columnSidecar of columnSidecars) { - // will throw if root hex does not match (meaning we are following the wrong chain) - existing.addColumn( - { - columnSidecar, - blockRootHex, - seenTimestampSec, - peerIdStr, - source, - }, - {throwOnDuplicateAdd: false} - ); - } - } catch (err) { - network.logger.debug("Following wrong chain for ByRange request", {}, err as Error); - if (syncType === RangeSyncType.Finalized) { - network.reportPeer(peerIdStr, PeerAction.LowToleranceError, "Missing or mismatching dataColumnSidecars"); - } - break; - } - } else { - let blockInput!: IBlockInput; - for (const columnSidecar of columnSidecars) { - blockInput = cache.getByColumn({ - blockRootHex, + if (!existing) { + throw new Error("Coding error: blockInput must exist when adding blobs"); + } + + if (!isBlockInputColumns(existing)) { + throw new DownloadByRangeError({ + code: DownloadByRangeErrorCode.MISMATCH_BLOCK_INPUT_TYPE, + slot: existing.slot, + blockRoot: prettyBytes(existing.blockRootHex), + expected: DAType.Columns, + actual: existing.type, + }); + } + for (const columnSidecar of columnSidecars) { + // will throw if root hex does not match (meaning we are following the wrong chain) + existing.addColumn( + { columnSidecar, + blockRootHex, + seenTimestampSec, peerIdStr, source, - seenTimestampSec, - }); - } - updatedBatchBlocks.set(blockInput.slot, blockInput); + }, + {throwOnDuplicateAdd: false} + ); } } @@ -654,7 +571,7 @@ export async function validateBlobsByRangeResponse( } // Await all sidecar validations in parallel - return await Promise.all(validateSidecarsPromises); + return Promise.all(validateSidecarsPromises); } /** @@ -738,7 +655,7 @@ export async function validateColumnsByRangeResponse( } // Await all sidecar validations in parallel - return await Promise.all(validateSidecarsPromises); + return Promise.all(validateSidecarsPromises); } /** @@ -791,16 +708,8 @@ function buildSlotRangeString({blocksRequest, blobsRequest, columnsRequest}: Dow export enum DownloadByRangeErrorCode { MISSING_BLOCKS = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOCKS", - MISSING_BLOCKS_REQUEST = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOCKS_REQUEST", - MISSING_BLOCKS_RESPONSE = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOCKS_RESPONSE", - MISSING_BLOBS_REQUEST = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOBS_REQUEST", - MISSING_COLUMNS_REQUEST = "DOWNLOAD_BY_RANGE_ERROR_MISSING_COLUMNS_REQUEST", MISSING_BLOBS_RESPONSE = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOBS_RESPONSE", MISSING_COLUMNS_RESPONSE = "DOWNLOAD_BY_RANGE_ERROR_MISSING_COLUMNS_RESPONSE", - INVALID_DATA_REQUEST = "DOWNLOAD_BY_RANGE_ERROR_INVALID_DATA_REQUEST", - MISSING_DATA_REQUEST = "DOWNLOAD_BY_RANGE_ERROR_MISSING_DATA_REQUEST", - START_SLOT_MISMATCH = "DOWNLOAD_BY_RANGE_ERROR_START_SLOT_MISMATCH", - COUNT_MISMATCH = "DOWNLOAD_BY_RANGE_ERROR_COUNT_MISMATCH", /** Error at the reqresp layer */ REQ_RESP_ERROR = "DOWNLOAD_BY_RANGE_ERROR_REQ_RESP_ERROR", @@ -818,9 +727,8 @@ export enum DownloadByRangeErrorCode { MISSING_COLUMNS = "DOWNLOAD_BY_RANGE_ERROR_MISSING_COLUMNS", EXTRA_COLUMNS = "DOWNLOAD_BY_RANGE_ERROR_EXTRA_COLUMNS", - PEER_CUSTODY_FAILURE = "DOWNLOAD_BY_RANGE_ERROR_PEER_CUSTODY_FAILURE", - CACHING_ERROR = "DOWNLOAD_BY_RANGE_CACHING_ERROR", - MISMATCH_BLOCK_INPUT_TYPE = "DOWNLOAD_BY_RANGE_MISMATCH_BLOCK_INPUT_TYPE", + /** Cached block input type mismatches new data */ + MISMATCH_BLOCK_INPUT_TYPE = "DOWNLOAD_BY_RANGE_ERROR_MISMATCH_BLOCK_INPUT_TYPE", } export type DownloadByRangeErrorType = @@ -833,11 +741,6 @@ export type DownloadByRangeErrorType = | DownloadByRangeErrorCode.MISSING_BLOCKS | DownloadByRangeErrorCode.MISSING_BLOBS_RESPONSE | DownloadByRangeErrorCode.MISSING_COLUMNS_RESPONSE; - // | DownloadByRangeErrorCode.MISSING_BLOCKS_REQUEST - // | DownloadByRangeErrorCode.MISSING_BLOBS_REQUEST - // | DownloadByRangeErrorCode.MISSING_COLUMNS_REQUEST - // | DownloadByRangeErrorCode.INVALID_DATA_REQUEST - // | DownloadByRangeErrorCode.MISSING_DATA_REQUEST; slotRange: string; } | { @@ -845,16 +748,6 @@ export type DownloadByRangeErrorType = peer: string; expectedCount: number; } - | { - code: DownloadByRangeErrorCode.START_SLOT_MISMATCH; - blockStartSlot: number; - dataStartSlot: number; - } - | { - code: DownloadByRangeErrorCode.COUNT_MISMATCH; - blockCount: number; - dataCount: number; - } | { code: DownloadByRangeErrorCode.OUT_OF_RANGE_BLOCKS; slot: number; @@ -867,11 +760,6 @@ export type DownloadByRangeErrorType = peerId: string; slotRange: string; } - | { - code: DownloadByRangeErrorCode.CACHING_ERROR; - peerId: string; - message: string; - } | { code: DownloadByRangeErrorCode.PARENT_ROOT_MISMATCH; slot: number; @@ -903,17 +791,12 @@ export type DownloadByRangeErrorType = expected: number; actual: number; } - | { - code: DownloadByRangeErrorCode.PEER_CUSTODY_FAILURE; - peerId: string; - missingColumns: string; - } | { code: DownloadByRangeErrorCode.MISMATCH_BLOCK_INPUT_TYPE; - expectedType: DAType; - cachedType: DAType; - slot: Slot; + slot: number; blockRoot: string; + expected: DAType; + actual: DAType; }; export class DownloadByRangeError extends LodestarError {} From 89b9c7cad5364c26d2e4c64e776f0654c8ffa1ad Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Thu, 4 Sep 2025 01:06:20 +0700 Subject: [PATCH 108/173] fix: debug test/utils/blocksAndData.ts --- .../beacon-node/test/utils/blocksAndData.ts | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/packages/beacon-node/test/utils/blocksAndData.ts b/packages/beacon-node/test/utils/blocksAndData.ts index 723775131c42..ed60ca952e5a 100644 --- a/packages/beacon-node/test/utils/blocksAndData.ts +++ b/packages/beacon-node/test/utils/blocksAndData.ts @@ -218,11 +218,10 @@ export function generateChainOfBlocks({ count, }: {forkName: F; count: number}): BlockTestSet[] { let parentRoot = Uint8Array.from(randomBytes(ROOT_SIZE)); - let slot = slots[forkName]; + const startSlot = slots[forkName]; const blocks: BlockTestSet[] = []; - for (; slot < slot + count; slot++) { - const block = generateBeaconBlock({forkName, parentRoot, slot}); - const {blockRoot, rootHex} = generateRoots(forkName, block); + for (let slot = startSlot; slot < startSlot + count; slot++) { + const {block, blockRoot, rootHex} = generateBlock({forkName, parentRoot, slot}); parentRoot = blockRoot; blocks.push({ block, @@ -338,11 +337,15 @@ export type ChainOfBlockMaybeSidecars = F extends For ? BlockWithSidecars[] : BlockTestSet[]; -export function generateChainOfBlockMaybeSidecars( - forkName: F, - count: number, - oomProtection = false -): ChainOfBlockMaybeSidecars { +export function generateChainOfBlockMaybeSidecars({ + forkName, + count, + oomProtection = false, +}: { + forkName: F; + count: number; + oomProtection?: boolean; +}): ChainOfBlockMaybeSidecars { if (isForkPostDeneb(forkName)) { return generateChainOfBlocksWithBlobs({forkName, count, oomProtection}) as ChainOfBlockMaybeSidecars; } From 5ce5bb774ccfffdca5732b6a5945131730f047d1 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Thu, 4 Sep 2025 01:50:24 +0700 Subject: [PATCH 109/173] test: unit test getBlocksForDataValidation --- .../src/sync/utils/downloadByRange.ts | 10 + .../unit/sync/utils/downloadByRange.test.ts | 489 ++++++++++++------ 2 files changed, 336 insertions(+), 163 deletions(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index 1f6213d675cf..641039262603 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -390,6 +390,16 @@ export async function validateResponses({ blocksRequest ? validatedResponses.validatedBlocks : undefined ); + if (!dataRequestBlocks.length) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.MISSING_BLOCKS, + slotRange: buildSlotRangeString({blobsRequest, columnsRequest}), + }, + "No blocks in data request slot range to validate data response against" + ); + } + if (blobsRequest) { if (!blobSidecars) { throw new DownloadByRangeError( diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts index 0771f44f81db..8101da725c6a 100644 --- a/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts @@ -1,16 +1,27 @@ import {ForkName} from "@lodestar/params"; import {SignedBeaconBlock, WithBytes, deneb, fulu, ssz} from "@lodestar/types"; +import {fromHex} from "@lodestar/utils"; import {Mock, beforeAll, beforeEach, describe, expect, it, vi} from "vitest"; +import {BlockInputPreData} from "../../../../src/chain/blocks/blockInput/blockInput.js"; +import {BlockInputSource, IBlockInput} from "../../../../src/chain/blocks/blockInput/types.js"; import {INetwork} from "../../../../src/network/index.js"; import { DownloadByRangeError, DownloadByRangeRequests, DownloadByRangeResponses, + ValidatedBlock, + getBlocksForDataValidation, requestByRange, validateBlobsByRangeResponse, validateBlockByRangeResponse, } from "../../../../src/sync/utils/downloadByRange.js"; -import {config, generateChainOfBlockMaybeSidecars, slots} from "../../../utils/blocksAndData.js"; +import { + config, + generateChainOfBlockMaybeSidecars, + generateChainOfBlocks, + generateChainOfBlocksWithBlobs, + slots, +} from "../../../utils/blocksAndData.js"; /** * Logic errors and gaps identified during test case creation: @@ -32,187 +43,339 @@ import {config, generateChainOfBlockMaybeSidecars, slots} from "../../../utils/b * - validateResponses: batchBlocks parameter can be undefined but not properly handled in all cases */ -describe("downloadByRange", () => { - const peerIdStr = "0x1234567890abcdef"; - // let cache: SeenBlockInputCache; - let network: INetwork; - // const logger = getMockedLogger(); - - const startSlot = slots.deneb; - const count = 32; - let requests!: DownloadByRangeRequests; - let networkResponse!: { - blocks: WithBytes[]; - blobSidecars: deneb.BlobSidecars; - columnSidecars: fulu.DataColumnSidecars; - }; - let expected!: DownloadByRangeResponses; - - beforeAll(() => { - // Test setup code here +// describe("downloadByRange", () => { +// const peerIdStr = "0x1234567890abcdef"; +// // let cache: SeenBlockInputCache; +// let network: INetwork; +// // const logger = getMockedLogger(); + +// const startSlot = slots.deneb; +// const count = 32; +// let requests!: DownloadByRangeRequests; +// let networkResponse!: { +// blocks: WithBytes[]; +// blobSidecars: deneb.BlobSidecars; +// columnSidecars: fulu.DataColumnSidecars; +// }; +// let expected!: DownloadByRangeResponses; + +// beforeAll(() => { +// // Test setup code here +// }); + +// describe("cacheByRangeResponses", () => { +// it("should cache blocks only when no data sidecars present"); +// it("should cache blocks with blob sidecars"); +// it("should cache blocks with column sidecars"); +// it("should add blocks to existing batch blocks"); +// it("should add blob sidecars to existing batch blocks"); +// it("should add column sidecars to existing batch blocks"); +// it("should create new block input when block doesn't exist in batch"); +// it("should create new block input from blob sidecars when block doesn't exist"); +// it("should create new block input from column sidecars when block doesn't exist"); +// it("should throw error when block input type mismatches for blobs"); +// it("should throw error when block input type mismatches for columns"); +// it("should handle wrong chain error for blocks in finalized sync"); +// it("should handle wrong chain error for blobs in finalized sync"); +// it("should handle wrong chain error for columns in finalized sync"); +// it("should not report peer for wrong chain in non-finalized sync"); +// it("should maintain slot ordering in returned block inputs"); +// it("should handle empty responses gracefully"); +// it("should handle duplicate blocks with throwOnDuplicateAdd false"); +// it("should handle duplicate blobs with throwOnDuplicateAdd false"); +// it("should handle duplicate columns with throwOnDuplicateAdd false"); +// }); + +// describe("downloadByRange", () => { +// it("should download and validate blocks only"); +// it("should download and validate blocks with blobs"); +// it("should download and validate blocks with columns"); +// it("should download blocks, blobs and columns concurrently"); +// it("should use cached batch blocks for data validation when no blocks request"); +// it("should throw REQ_RESP_ERROR when network request fails"); +// it("should handle empty responses from network"); +// it("should validate responses before returning"); +// it("should pass through validation errors"); +// it("should log verbose error before throwing"); +// }); + +// describe("requestByRange", () => { +// it("should make block requests"); +// it("should make blob requests"); +// it("should make column requests"); +// it("should make concurrent block/blob/column requests from the same peer"); +// it("should handle undefined responses properly"); +// it("should throw if one of the concurrent requests fails"); +// it("should not make requests for undefined request parameters"); +// it("should return empty object when no requests provided"); +// it("should handle network timeout errors"); +// it("should preserve response order for concurrent requests"); +// }); + +// describe("validateResponses", () => { +// it("should validate blocks when blocksRequest provided"); +// it("should validate blobs when blobsRequest provided with blocks"); +// it("should validate columns when columnsRequest provided with blocks"); +// it("should use batchBlocks for data validation when no blocksRequest"); +// it("should throw MISSING_BLOCKS when data request but no blocks available"); +// it("should throw MISSING_BLOBS_RESPONSE when blobsRequest but no blobSidecars"); +// it("should throw MISSING_COLUMNS_RESPONSE when columnsRequest but no columnSidecars"); +// it("should return empty responses when no requests provided"); +// it("should validate blocks before validating data sidecars"); +// it("should use validated blocks for data validation when both downloaded"); +// it("should handle mixed cached and downloaded blocks for validation"); +// it("should validate slot ranges match between blocks and data requests"); +// }); + +// describe("validateBlockByRangeResponse", () => { +// it("should accept valid chain of blocks"); +// it("should accept empty response during chain liveness issues"); +// it("should throw EXTRA_BLOCKS when more blocks than requested count"); +// it("should throw OUT_OF_RANGE_BLOCKS when block slot before startSlot"); +// it("should throw OUT_OF_RANGE_BLOCKS when block slot after lastValidSlot"); +// it("should throw OUT_OF_ORDER_BLOCKS when blocks not in ascending slot order"); +// it("should allow skip slots in block chain"); +// it("should validate parent root matches previous block root"); +// it("should throw PARENT_ROOT_MISMATCH when chain broken"); +// it("should handle single block response"); +// it("should handle maximum count blocks"); +// it("should compute block roots correctly for each fork"); +// it("should validate blocks at fork boundaries"); +// it("should handle blocks with same slot (reorgs)"); +// }); + +// describe("validateBlobsByRangeResponse", () => { +// it("should accept valid blob sidecars matching blocks"); +// it("should throw EXTRA_BLOBS when more blobs than expected"); +// it("should throw MISSING_BLOBS when fewer blobs than expected"); +// it("should validate blob count matches block kzg commitments"); +// it("should skip blocks with zero kzg commitments"); +// it("should validate blobs in consecutive (slot, index) order"); +// it("should validate blob indices are sequential within block"); +// it("should validate all blobs for a block are included"); +// it("should call validateBlockBlobSidecars for each block with blobs"); +// it("should handle blocks with different blob counts"); +// it("should validate blobs across multiple blocks"); +// it("should return validated blob sidecars grouped by block"); +// it("should handle maximum blob count per block"); +// it("should validate blob sidecars in parallel"); +// it("should propagate validation errors from validateBlockBlobSidecars"); +// }); + +// describe("validateColumnsByRangeResponse", () => { +// it("should accept valid column sidecars matching blocks"); +// it("should throw EXTRA_COLUMNS when more columns than expected"); +// it("should throw MISSING_COLUMNS when fewer columns than expected"); +// it("should validate column count matches requested columns times blocks with commitments"); +// it("should skip blocks with zero kzg commitments"); +// it("should validate columns in consecutive (slot, index) order"); +// it("should validate all requested column indices present for each block"); +// it("should validate column indices match requested columns array"); +// it("should validate columns are in order within each block"); +// it("should throw MISSING_COLUMNS when columns not in correct order"); +// it("should call validateBlockDataColumnSidecars for each block with columns"); +// it("should handle blocks with different commitment counts"); +// it("should validate columns across multiple blocks"); +// it("should return validated column sidecars grouped by block"); +// it("should handle partial column requests (subset of indices)"); +// it("should validate column sidecars in parallel"); +// it("should propagate validation errors from validateBlockDataColumnSidecars"); +// }); + +describe("getBlocksForDataValidation", () => { + const forkName = ForkName.capella; + let chainOfBlocks: ReturnType; + let blockInputs: IBlockInput[]; + let validatedBlocks: ValidatedBlock[]; + + beforeEach(() => { + chainOfBlocks = generateChainOfBlockMaybeSidecars({forkName, count: 32, oomProtection: true}); + blockInputs = chainOfBlocks.map(({block, rootHex}) => + BlockInputPreData.createFromBlock({ + block, + forkName, + blockRootHex: rootHex, + daOutOfRange: true, + seenTimestampSec: Date.now(), + source: BlockInputSource.gossip, + }) + ); + validatedBlocks = chainOfBlocks.map(({block, blockRoot}) => ({block, blockRoot})); }); - describe("cacheByRangeResponses", () => { - it("should cache blocks only when no data sidecars present"); - it("should cache blocks with blob sidecars"); - it("should cache blocks with column sidecars"); - it("should add blocks to existing batch blocks"); - it("should add blob sidecars to existing batch blocks"); - it("should add column sidecars to existing batch blocks"); - it("should create new block input when block doesn't exist in batch"); - it("should create new block input from blob sidecars when block doesn't exist"); - it("should create new block input from column sidecars when block doesn't exist"); - it("should throw error when block input type mismatches for blobs"); - it("should throw error when block input type mismatches for columns"); - it("should handle wrong chain error for blocks in finalized sync"); - it("should handle wrong chain error for blobs in finalized sync"); - it("should handle wrong chain error for columns in finalized sync"); - it("should not report peer for wrong chain in non-finalized sync"); - it("should maintain slot ordering in returned block inputs"); - it("should handle empty responses gracefully"); - it("should handle duplicate blocks with throwOnDuplicateAdd false"); - it("should handle duplicate blobs with throwOnDuplicateAdd false"); - it("should handle duplicate columns with throwOnDuplicateAdd false"); + it("should return requested slot range from cached", () => { + // Request slots 10-20 from cached blocks (slots 0-31) + const dataRequest = {startSlot: 10, count: 10}; + const lastSlot = dataRequest.startSlot + dataRequest.count - 1; + + const result = getBlocksForDataValidation(dataRequest, blockInputs.slice(10, 20), undefined); + + expect(result).toHaveLength(10); + expect(result[0].block.message.slot).toBe(dataRequest.startSlot); + expect(result[dataRequest.count - 1].block.message.slot).toBe(lastSlot); }); - describe("downloadByRange", () => { - it("should download and validate blocks only"); - it("should download and validate blocks with blobs"); - it("should download and validate blocks with columns"); - it("should download blocks, blobs and columns concurrently"); - it("should use cached batch blocks for data validation when no blocks request"); - it("should throw REQ_RESP_ERROR when network request fails"); - it("should handle empty responses from network"); - it("should validate responses before returning"); - it("should pass through validation errors"); - it("should log verbose error before throwing"); + it("should filter out blocks before and after range from cached", () => { + // Request slots 10-20 but provide cached blocks from slots 5-25 + const dataRequest = {startSlot: 10, count: 10}; + const lastSlot = dataRequest.startSlot + dataRequest.count - 1; + const cached = blockInputs; + + const result = getBlocksForDataValidation(dataRequest, cached, undefined); + + expect(result).toHaveLength(10); + expect(result[0].block.message.slot).toBe(dataRequest.startSlot); + expect(result[dataRequest.count - 1].block.message.slot).toBe(lastSlot); + // Verify no blocks outside range + for (const block of result) { + expect(block.block.message.slot).toBeGreaterThanOrEqual(10); + expect(block.block.message.slot).toBeLessThan(20); + } }); - describe("requestByRange", () => { - it("should make block requests"); - it("should make blob requests"); - it("should make column requests"); - it("should make concurrent block/blob/column requests from the same peer"); - it("should handle undefined responses properly"); - it("should throw if one of the concurrent requests fails"); - it("should not make requests for undefined request parameters"); - it("should return empty object when no requests provided"); - it("should handle network timeout errors"); - it("should preserve response order for concurrent requests"); + it("should return requested slot range from current", () => { + // Request slots 10-20 from current blocks (slots 0-31) + const dataRequest = {startSlot: 10, count: 10}; + const lastSlot = dataRequest.startSlot + dataRequest.count - 1; + const current = validatedBlocks.slice(10, 20); + + const result = getBlocksForDataValidation(dataRequest, undefined, current); + + expect(result).toHaveLength(10); + expect(result[0].block.message.slot).toBe(dataRequest.startSlot); + expect(result[dataRequest.count - 1].block.message.slot).toBe(lastSlot); }); - describe("validateResponses", () => { - it("should validate blocks when blocksRequest provided"); - it("should validate blobs when blobsRequest provided with blocks"); - it("should validate columns when columnsRequest provided with blocks"); - it("should use batchBlocks for data validation when no blocksRequest"); - it("should throw MISSING_BLOCKS when data request but no blocks available"); - it("should throw MISSING_BLOBS_RESPONSE when blobsRequest but no blobSidecars"); - it("should throw MISSING_COLUMNS_RESPONSE when columnsRequest but no columnSidecars"); - it("should return empty responses when no requests provided"); - it("should validate blocks before validating data sidecars"); - it("should use validated blocks for data validation when both downloaded"); - it("should handle mixed cached and downloaded blocks for validation"); - it("should validate slot ranges match between blocks and data requests"); + it("should filter out blocks before and after range from current", () => { + // Request slots 10-20 but provide current blocks from slots 5-25 + const dataRequest = {startSlot: 10, count: 10}; + const lastSlot = dataRequest.startSlot + dataRequest.count - 1; + const current = validatedBlocks; + + const result = getBlocksForDataValidation(dataRequest, undefined, current); + + expect(result).toHaveLength(10); + expect(result[0].block.message.slot).toBe(dataRequest.startSlot); + expect(result[dataRequest.count - 1].block.message.slot).toBe(lastSlot); + // Verify no blocks outside range + for (const block of result) { + expect(block.block.message.slot).toBeGreaterThanOrEqual(10); + expect(block.block.message.slot).toBeLessThan(20); + } }); - describe("validateBlockByRangeResponse", () => { - it("should accept valid chain of blocks"); - it("should accept empty response during chain liveness issues"); - it("should throw EXTRA_BLOCKS when more blocks than requested count"); - it("should throw OUT_OF_RANGE_BLOCKS when block slot before startSlot"); - it("should throw OUT_OF_RANGE_BLOCKS when block slot after lastValidSlot"); - it("should throw OUT_OF_ORDER_BLOCKS when blocks not in ascending slot order"); - it("should allow skip slots in block chain"); - it("should validate parent root matches previous block root"); - it("should throw PARENT_ROOT_MISMATCH when chain broken"); - it("should handle single block response"); - it("should handle maximum count blocks"); - it("should compute block roots correctly for each fork"); - it("should validate blocks at fork boundaries"); - it("should handle blocks with same slot (reorgs)"); + it("should return requested slot range from combination of cached and current", () => { + const dataRequest = {startSlot: 5, count: 25}; + const lastSlot = dataRequest.startSlot + dataRequest.count - 1; + const cached = blockInputs.slice(0, 15); + const current = validatedBlocks.slice(15); + + const result = getBlocksForDataValidation(dataRequest, cached, current); + + expect(result).toHaveLength(25); + expect(result[0].block.message.slot).toBe(dataRequest.startSlot); + + expect(result[dataRequest.count - 1].block.message.slot).toBe(lastSlot); }); - describe("validateBlobsByRangeResponse", () => { - it("should accept valid blob sidecars matching blocks"); - it("should throw EXTRA_BLOBS when more blobs than expected"); - it("should throw MISSING_BLOBS when fewer blobs than expected"); - it("should validate blob count matches block kzg commitments"); - it("should skip blocks with zero kzg commitments"); - it("should validate blobs in consecutive (slot, index) order"); - it("should validate blob indices are sequential within block"); - it("should validate all blobs for a block are included"); - it("should call validateBlockBlobSidecars for each block with blobs"); - it("should handle blocks with different blob counts"); - it("should validate blobs across multiple blocks"); - it("should return validated blob sidecars grouped by block"); - it("should handle maximum blob count per block"); - it("should validate blob sidecars in parallel"); - it("should propagate validation errors from validateBlockBlobSidecars"); + it("should always return ValidatedBlocks for mixed block source", () => { + const dataRequest = {startSlot: 5, count: 25}; + const cached = blockInputs.slice(0, 15); + const current = validatedBlocks.slice(15); + + const result = getBlocksForDataValidation(dataRequest, cached, current); + + // All results should be ValidatedBlock type with block and blockRoot + for (const validatedBlock of result) { + expect(validatedBlock).toHaveProperty("block"); + expect(validatedBlock).toHaveProperty("blockRoot"); + expect(validatedBlock.blockRoot).toBeInstanceOf(Uint8Array); + } }); - describe("validateColumnsByRangeResponse", () => { - it("should accept valid column sidecars matching blocks"); - it("should throw EXTRA_COLUMNS when more columns than expected"); - it("should throw MISSING_COLUMNS when fewer columns than expected"); - it("should validate column count matches requested columns times blocks with commitments"); - it("should skip blocks with zero kzg commitments"); - it("should validate columns in consecutive (slot, index) order"); - it("should validate all requested column indices present for each block"); - it("should validate column indices match requested columns array"); - it("should validate columns are in order within each block"); - it("should throw MISSING_COLUMNS when columns not in correct order"); - it("should call validateBlockDataColumnSidecars for each block with columns"); - it("should handle blocks with different commitment counts"); - it("should validate columns across multiple blocks"); - it("should return validated column sidecars grouped by block"); - it("should handle partial column requests (subset of indices)"); - it("should validate column sidecars in parallel"); - it("should propagate validation errors from validateBlockDataColumnSidecars"); + it("should maintain ascending slot order", () => { + const dataRequest = {startSlot: 5, count: 25}; + const cached = blockInputs.slice(0, 15); + const current = validatedBlocks.slice(15); + + const result = getBlocksForDataValidation(dataRequest, cached, current); + + expect(result.sort((a, b) => a.block.message.slot - b.block.message.slot)).toEqual(result); }); - describe("getBlocksForDataValidation", () => { - it("should return blocks within requested slot range"); - it("should filter out blocks before startSlot"); - it("should filter out blocks at or after endSlot"); - it("should combine cached and current blocks in order"); - it("should maintain ascending slot order"); - it("should skip duplicate slots keeping first occurrence"); - it("should handle undefined cached blocks"); - it("should handle undefined current blocks"); - it("should handle both cached and current undefined"); - it("should return empty array when no blocks in range"); - it("should convert cached IBlockInput to ValidatedBlock format"); - it("should preserve block roots from cached blocks"); - it("should handle overlapping slot ranges between cached and current"); - it("should validate cached blocks are before current blocks"); - it("should handle gaps in slot sequence"); + it("should handle overlapping slot ranges between cached and current", () => { + // Both cached and current have blocks for slots 12-15 + const dataRequest = {startSlot: 10, count: 10}; + const lastSlot = dataRequest.startSlot + dataRequest.count - 1; + const cached = blockInputs.slice(0, 16); // slots 0-15 + const current = validatedBlocks.slice(12, 25); // slots 12-24 + + const result = getBlocksForDataValidation(dataRequest, cached, current); + + // Should not have duplicates, cached takes precedence + expect(result).toHaveLength(10); + expect(result[0].block.message.slot).toBe(dataRequest.startSlot); + expect(result[dataRequest.count - 1].block.message.slot).toBe(lastSlot); + // Verify no duplicate slots + const slots = result.map((b) => b.block.message.slot); + const uniqueSlots = new Set(slots); + expect(uniqueSlots.size).toBe(slots.length); }); - describe("Error handling", () => { - it("should build correct slot range string for blocks request"); - it("should build correct slot range string for blobs request"); - it("should build correct slot range string for columns request"); - it("should handle missing request parameters in slot range string"); - it("should create DownloadByRangeError with correct error codes"); - it("should preserve error context in DownloadByRangeError"); - it("should handle network errors appropriately"); - it("should handle validation errors appropriately"); - it("should handle cache errors appropriately"); + it("should return empty array when no blocks in range", () => { + const dataRequest = {startSlot: 100, count: 10}; + const cached = blockInputs.slice(0, 10); // slots 0-9 + const current = validatedBlocks.slice(10, 20); // slots 10-19 + + const result = getBlocksForDataValidation(dataRequest, cached, current); + + expect(result).toHaveLength(0); }); - describe("Integration scenarios", () => { - it("should handle full download and cache flow for blocks only"); - it("should handle full download and cache flow for blocks with blobs"); - it("should handle full download and cache flow for blocks with columns"); - it("should handle partial responses within valid range"); - it("should handle peer disconnection during download"); - it("should handle fork transition during range download"); - it("should handle reorg detection via parent root mismatch"); - it("should handle maximum request size limits"); - it("should handle minimum request size (count=1)"); - it("should handle skip slots in epoch boundaries"); - it("should handle genesis slot edge cases"); - it("should handle far future slot requests"); + it("should tolerate skip slots in cached and current", () => { + const dataRequest = {startSlot: 0, count: 20}; + // Create sparse arrays with skip slots + const cached = [blockInputs[1], blockInputs[3], blockInputs[5], blockInputs[7]]; + const current = [validatedBlocks[10], validatedBlocks[12], validatedBlocks[15], validatedBlocks[18]]; + + const result = getBlocksForDataValidation(dataRequest, cached, current); + + expect(result).toHaveLength(cached.length + current.length); + const slots = result.map(({block}) => block.message.slot); + const expectedSlots = cached.map((b) => b.slot).concat(...current.map((b) => b.block.message.slot)); + expect(slots).toEqual(expectedSlots); + + // Verify ascending order is maintained despite skip slots + for (let i = 1; i < slots.length; i++) { + expect(slots[i]).toBeGreaterThan(slots[i - 1]); + } }); }); + +// describe("Error handling", () => { +// it("should build correct slot range string for blocks request"); +// it("should build correct slot range string for blobs request"); +// it("should build correct slot range string for columns request"); +// it("should handle missing request parameters in slot range string"); +// it("should create DownloadByRangeError with correct error codes"); +// it("should preserve error context in DownloadByRangeError"); +// it("should handle network errors appropriately"); +// it("should handle validation errors appropriately"); +// it("should handle cache errors appropriately"); +// }); + +// describe("Integration scenarios", () => { +// it("should handle full download and cache flow for blocks only"); +// it("should handle full download and cache flow for blocks with blobs"); +// it("should handle full download and cache flow for blocks with columns"); +// it("should handle partial responses within valid range"); +// it("should handle peer disconnection during download"); +// it("should handle fork transition during range download"); +// it("should handle reorg detection via parent root mismatch"); +// it("should handle maximum request size limits"); +// it("should handle minimum request size (count=1)"); +// it("should handle skip slots in epoch boundaries"); +// it("should handle genesis slot edge cases"); +// it("should handle far future slot requests"); +// }); +// }); From ff687e37f5730a9ddb7a87a22a2e5f9f08bfaabe Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Thu, 4 Sep 2025 02:00:47 +0700 Subject: [PATCH 110/173] chore: fix check-types --- packages/beacon-node/src/sync/range/range.ts | 1 - .../unit/sync/utils/downloadByRange.test.ts | 24 +++---------------- 2 files changed, 3 insertions(+), 22 deletions(-) diff --git a/packages/beacon-node/src/sync/range/range.ts b/packages/beacon-node/src/sync/range/range.ts index e1ce84247b7a..d8e5ccdba456 100644 --- a/packages/beacon-node/src/sync/range/range.ts +++ b/packages/beacon-node/src/sync/range/range.ts @@ -4,7 +4,6 @@ import {computeStartSlotAtEpoch} from "@lodestar/state-transition"; import {Epoch, Status, fulu} from "@lodestar/types"; import {Logger, toRootHex} from "@lodestar/utils"; import {StrictEventEmitter} from "strict-event-emitter-types"; -import {isDaOutOfRange} from "../../chain/blocks/blockInput/utils.js"; import {AttestationImportOpt, ImportBlockOpts} from "../../chain/blocks/index.js"; import {IBeaconChain} from "../../chain/index.js"; import {Metrics} from "../../metrics/index.js"; diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts index 8101da725c6a..58393c8b8b37 100644 --- a/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts @@ -1,27 +1,9 @@ import {ForkName} from "@lodestar/params"; -import {SignedBeaconBlock, WithBytes, deneb, fulu, ssz} from "@lodestar/types"; -import {fromHex} from "@lodestar/utils"; -import {Mock, beforeAll, beforeEach, describe, expect, it, vi} from "vitest"; +import {beforeEach, describe, expect, it} from "vitest"; import {BlockInputPreData} from "../../../../src/chain/blocks/blockInput/blockInput.js"; import {BlockInputSource, IBlockInput} from "../../../../src/chain/blocks/blockInput/types.js"; -import {INetwork} from "../../../../src/network/index.js"; -import { - DownloadByRangeError, - DownloadByRangeRequests, - DownloadByRangeResponses, - ValidatedBlock, - getBlocksForDataValidation, - requestByRange, - validateBlobsByRangeResponse, - validateBlockByRangeResponse, -} from "../../../../src/sync/utils/downloadByRange.js"; -import { - config, - generateChainOfBlockMaybeSidecars, - generateChainOfBlocks, - generateChainOfBlocksWithBlobs, - slots, -} from "../../../utils/blocksAndData.js"; +import {ValidatedBlock, getBlocksForDataValidation} from "../../../../src/sync/utils/downloadByRange.js"; +import {generateChainOfBlockMaybeSidecars} from "../../../utils/blocksAndData.js"; /** * Logic errors and gaps identified during test case creation: From 1fe1fce63c80fd039ddc7678fb251c631119b66b Mon Sep 17 00:00:00 2001 From: Cayman Date: Wed, 3 Sep 2025 15:49:05 -0400 Subject: [PATCH 111/173] chore: fix more tests --- .../chain/seenCache/seenBlockInput.test.ts | 215 +++++++---------- .../test/unit/sync/unknownBlock.test.ts | 6 +- .../unit/sync/utils/downloadByRoot.test.ts | 225 ++---------------- 3 files changed, 112 insertions(+), 334 deletions(-) diff --git a/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts b/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts index 8d0464be8946..68118332e2a4 100644 --- a/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts +++ b/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts @@ -1,8 +1,7 @@ import {generateKeyPair} from "@libp2p/crypto/keys"; -import {createChainForkConfig, defaultChainConfig} from "@lodestar/config"; -import {ForkName, ForkPostCapella, ForkPostDeneb, ForkPostFulu} from "@lodestar/params"; -import {computeStartSlotAtEpoch, signedBlockToSignedHeader} from "@lodestar/state-transition"; -import {SignedBeaconBlock, deneb, ssz} from "@lodestar/types"; +import {ForkName, ForkPostFulu} from "@lodestar/params"; +import {signedBlockToSignedHeader} from "@lodestar/state-transition"; +import {SignedBeaconBlock} from "@lodestar/types"; import {toRootHex} from "@lodestar/utils"; import {beforeEach, describe, expect, it} from "vitest"; import { @@ -17,6 +16,12 @@ import {SeenBlockInput} from "../../../../src/chain/seenCache/seenGossipBlockInp import {computeNodeIdFromPrivateKey} from "../../../../src/network/subnets/index.js"; import {Clock} from "../../../../src/util/clock.js"; import {CustodyConfig} from "../../../../src/util/dataColumns.js"; +import { + config, + generateBlock, + generateBlockWithBlobSidecars, + generateChainOfBlocks, +} from "../../../utils/blocksAndData.js"; import {testLogger} from "../../../utils/logger.js"; describe("SeenBlockInputCache", async () => { @@ -24,95 +29,11 @@ describe("SeenBlockInputCache", async () => { let abortController: AbortController; let chainEvents: ChainEventEmitter; - const CAPELLA_FORK_EPOCH = 0; - const DENEB_FORK_EPOCH = 1; - const ELECTRA_FORK_EPOCH = 2; - const FULU_FORK_EPOCH = 3; - const GLOAS_FORK_EPOCH = 4; - const config = createChainForkConfig({ - ...defaultChainConfig, - CAPELLA_FORK_EPOCH, - DENEB_FORK_EPOCH, - ELECTRA_FORK_EPOCH, - FULU_FORK_EPOCH, - GLOAS_FORK_EPOCH, - }); const privateKey = await generateKeyPair("secp256k1"); const nodeId = computeNodeIdFromPrivateKey(privateKey); const custodyConfig = new CustodyConfig({config, nodeId}); - - const slots: Record = { - capella: computeStartSlotAtEpoch(CAPELLA_FORK_EPOCH), - deneb: computeStartSlotAtEpoch(DENEB_FORK_EPOCH), - electra: computeStartSlotAtEpoch(ELECTRA_FORK_EPOCH), - fulu: computeStartSlotAtEpoch(FULU_FORK_EPOCH), - gloas: computeStartSlotAtEpoch(GLOAS_FORK_EPOCH), - }; - - type BlockTestSet = { - block: SignedBeaconBlock; - blockRoot: Uint8Array; - rootHex: string; - }; - - function buildBlockTestSet(forkName: F): BlockTestSet { - const block = ssz[forkName].SignedBeaconBlock.defaultValue(); - block.message.slot = slots[forkName]; - const blockRoot = ssz[forkName].BeaconBlock.hashTreeRoot(block.message as any); - const rootHex = toRootHex(blockRoot); - return { - block, - blockRoot, - rootHex, - }; - } - - type ParentAndChildBlockTestSet = { - parentBlock: SignedBeaconBlock; - parentBlockRoot: Uint8Array; - parentRootHex: string; - childBlock: SignedBeaconBlock; - childBlockRoot: Uint8Array; - childRootHex: string; - }; - function buildParentAndChildBlockTestSet( - forkName: F - ): ParentAndChildBlockTestSet { - const {block: parentBlock, blockRoot: parentBlockRoot, rootHex: parentRootHex} = buildBlockTestSet(forkName); - const {block: childBlock, blockRoot: childBlockRoot, rootHex: childRootHex} = buildBlockTestSet(forkName); - childBlock.message.slot = parentBlock.message.slot + 1; - childBlock.message.parentRoot = parentBlockRoot; - return { - parentBlock, - parentBlockRoot, - parentRootHex, - childBlock, - childBlockRoot, - childRootHex, - }; - } - - type BlockAndBlobTestSet = BlockTestSet & { - blobSidecar: deneb.BlobSidecar; - }; - function buildBlockAndBlobTestSet(forkName: ForkPostDeneb): BlockAndBlobTestSet { - const {block, blockRoot, rootHex} = buildBlockTestSet(forkName); - const commitment = Buffer.alloc(48, 0x77); - block.message.body.blobKzgCommitments = [commitment]; - const signedBlockHeader = signedBlockToSignedHeader(config, block); - const blobSidecar = ssz[forkName].BlobSidecar.defaultValue(); - blobSidecar.signedBlockHeader = signedBlockHeader; - blobSidecar.kzgCommitment = commitment; - - return { - block, - blockRoot, - rootHex, - blobSidecar, - }; - } - const logger = testLogger(); + beforeEach(() => { chainEvents = new ChainEventEmitter(); abortController = new AbortController(); @@ -128,9 +49,10 @@ describe("SeenBlockInputCache", async () => { metrics: null, }); }); + describe("has()", () => { it("should return true if in cache", () => { - const {block, rootHex} = buildBlockTestSet(ForkName.capella); + const {block, rootHex} = generateBlock({forkName: ForkName.capella}); cache.getByBlock({ block, blockRootHex: rootHex, @@ -139,8 +61,9 @@ describe("SeenBlockInputCache", async () => { }); expect(cache.has(rootHex)).toBeTruthy(); }); + it("should return false if not in cache", () => { - const {block, blockRoot, rootHex} = buildBlockTestSet(ForkName.capella); + const {block, blockRoot, rootHex} = generateBlock({forkName: ForkName.capella}); cache.getByBlock({ block, blockRootHex: rootHex, @@ -154,9 +77,10 @@ describe("SeenBlockInputCache", async () => { expect(cache.has(toRootHex(blockRoot))).toBeFalsy(); }); }); + describe("get()", () => { it("should return BlockInput if in cache", () => { - const {block, rootHex} = buildBlockTestSet(ForkName.capella); + const {block, rootHex} = generateBlock({forkName: ForkName.capella}); const blockInput = cache.getByBlock({ block, blockRootHex: rootHex, @@ -165,8 +89,9 @@ describe("SeenBlockInputCache", async () => { }); expect(cache.get(rootHex)).toBe(blockInput); }); + it("should return undefined if not in cache", () => { - const {block, blockRoot, rootHex} = buildBlockTestSet(ForkName.capella); + const {block, blockRoot, rootHex} = generateBlock({forkName: ForkName.capella}); const blockInput = cache.getByBlock({ block, blockRootHex: rootHex, @@ -180,9 +105,10 @@ describe("SeenBlockInputCache", async () => { expect(cache.get(toRootHex(blockRoot))).toBeUndefined(); }); }); + describe("remove()", () => { it("should remove a BlockInput", () => { - const {block, rootHex} = buildBlockTestSet(ForkName.capella); + const {block, rootHex} = generateBlock({forkName: ForkName.capella}); const blockInput = cache.getByBlock({ block, blockRootHex: rootHex, @@ -193,8 +119,9 @@ describe("SeenBlockInputCache", async () => { cache.remove(rootHex); expect(cache.get(rootHex)).toBeUndefined(); }); + it("should not throw an error if BlockInput not in cache", () => { - const {block, blockRoot, rootHex} = buildBlockTestSet(ForkName.capella); + const {block, blockRoot, rootHex} = generateBlock({forkName: ForkName.capella}); const blockInput = cache.getByBlock({ block, blockRootHex: rootHex, @@ -209,9 +136,10 @@ describe("SeenBlockInputCache", async () => { expect(cache.has(rootHex)).toBeTruthy(); }); }); + describe("prune()", () => { it("should remove a BlockInput", () => { - const {block, rootHex} = buildBlockTestSet(ForkName.capella); + const {block, rootHex} = generateBlock({forkName: ForkName.capella}); const blockInput = cache.getByBlock({ block, blockRootHex: rootHex, @@ -224,7 +152,11 @@ describe("SeenBlockInputCache", async () => { }); it("should remove all ancestors of a BlockInput", () => { - const {parentBlock, parentRootHex, childBlock, childRootHex} = buildParentAndChildBlockTestSet(ForkName.capella); + const blocks = generateChainOfBlocks({forkName: ForkName.capella, count: 2}); + const parentBlock = blocks[0].block; + const parentRootHex = blocks[0].rootHex; + const childBlock = blocks[1].block; + const childRootHex = blocks[1].rootHex; const parentBlockInput = cache.getByBlock({ block: parentBlock, @@ -247,6 +179,7 @@ describe("SeenBlockInputCache", async () => { expect(cache.get(parentRootHex)).toBeUndefined(); }); }); + describe("onFinalized()", () => { let childRootHex: string; let childBlockInput: IBlockInput; @@ -255,12 +188,11 @@ describe("SeenBlockInputCache", async () => { const root = Buffer.alloc(32, 0xff); const rootHex = toRootHex(root); beforeEach(() => { - const { - parentBlock, - parentRootHex: parentRoot, - childBlock, - childRootHex: childRoot, - } = buildParentAndChildBlockTestSet(ForkName.capella); + const blocks = generateChainOfBlocks({forkName: ForkName.capella, count: 2}); + const parentBlock = blocks[0].block; + const parentRoot = blocks[0].rootHex; + const childBlock = blocks[1].block; + const childRoot = blocks[1].rootHex; parentRootHex = parentRoot; childRootHex = childRoot; @@ -280,18 +212,20 @@ describe("SeenBlockInputCache", async () => { }); expect(cache.get(childRootHex)).toBe(childBlockInput); }); + it("should remove all BlockInputs in slots before the checkpoint", () => { chainEvents.emit(ChainEvent.forkChoiceFinalized, { - epoch: DENEB_FORK_EPOCH, + epoch: config.DENEB_FORK_EPOCH, root, rootHex, }); expect(cache.get(childRootHex)).toBeUndefined(); expect(cache.get(parentRootHex)).toBeUndefined(); }); + it("should not remove BlockInputs in slots after the checkpoint", () => { chainEvents.emit(ChainEvent.forkChoiceFinalized, { - epoch: CAPELLA_FORK_EPOCH, + epoch: config.CAPELLA_FORK_EPOCH, root, rootHex, }); @@ -299,9 +233,10 @@ describe("SeenBlockInputCache", async () => { expect(cache.get(parentRootHex)).toBe(parentBlockInput); }); }); + describe("getByBlock()", () => { it("should return a new BlockInput for a new block root", () => { - const {block, rootHex} = buildBlockTestSet(ForkName.capella); + const {block, rootHex} = generateBlock({forkName: ForkName.capella}); expect(cache.get(rootHex)).toBeUndefined(); const blockInput = cache.getByBlock({ block, @@ -311,9 +246,10 @@ describe("SeenBlockInputCache", async () => { }); expect(cache.get(rootHex)).toBe(blockInput); }); + describe("should return the correct type of BlockInput for a given block root", () => { it("should return a BlockInputPreDeneb", () => { - const {block, rootHex} = buildBlockTestSet(ForkName.capella); + const {block, rootHex} = generateBlock({forkName: ForkName.capella}); const blockInput = cache.getByBlock({ block, blockRootHex: rootHex, @@ -322,8 +258,9 @@ describe("SeenBlockInputCache", async () => { }); expect(isBlockInputPreDeneb(blockInput)).toBeTruthy(); }); + it("should return a BlockInputBlobs", () => { - const {block, rootHex} = buildBlockTestSet(ForkName.deneb); + const {block, rootHex} = generateBlock({forkName: ForkName.deneb}); const blockInput = cache.getByBlock({ block, blockRootHex: rootHex, @@ -332,8 +269,9 @@ describe("SeenBlockInputCache", async () => { }); expect(isBlockInputBlobs(blockInput)).toBeTruthy(); }); + it("should return a BlockInputColumns", () => { - const {block, rootHex} = buildBlockTestSet(ForkName.fulu); + const {block, rootHex} = generateBlock({forkName: ForkName.fulu}); const blockInput = cache.getByBlock({ block, blockRootHex: rootHex, @@ -343,8 +281,9 @@ describe("SeenBlockInputCache", async () => { expect(isBlockInputColumns(blockInput)).toBeTruthy(); }); }); + it("should return the same BlockInput for an existing block root", () => { - const {block, rootHex} = buildBlockTestSet(ForkName.capella); + const {block, rootHex} = generateBlock({forkName: ForkName.capella}); const blockInput1 = cache.getByBlock({ block, blockRootHex: rootHex, @@ -360,8 +299,9 @@ describe("SeenBlockInputCache", async () => { }); expect(blockInput1).toBe(blockInput2); }); + it("should not throw for a BlockInput with an existing block", () => { - const {block, rootHex} = buildBlockTestSet(ForkName.capella); + const {block, rootHex} = generateBlock({forkName: ForkName.capella}); const blockInput = cache.getByBlock({ block, blockRootHex: rootHex, @@ -385,11 +325,12 @@ describe("SeenBlockInputCache", async () => { }) ).not.toThrow(); }); + it("should return the correct BlockInput for a BlockInput created by blob", () => { - const {block, blobSidecar, rootHex} = buildBlockAndBlobTestSet(ForkName.deneb); + const {block, blobSidecars, rootHex} = generateBlockWithBlobSidecars({forkName: ForkName.deneb, count: 1}); const blockInput1 = cache.getByBlob({ - blobSidecar, + blobSidecar: blobSidecars[0], blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), @@ -403,6 +344,7 @@ describe("SeenBlockInputCache", async () => { expect(blockInput1).toBe(blockInput2); }); + it("should return the correct BlockInput for a BlockInput created by column", () => { // const {block, columnSidecar} = buildBlockAndBlobTestSet(ForkName.fulu); // const blockInput1 = cache.getByColumn({ @@ -418,38 +360,41 @@ describe("SeenBlockInputCache", async () => { // expect(blockInput1).toBe(blockInput2); }); }); + describe("getByBlob()", () => { it("should return a new BlockInput for a new block root", () => { - const {rootHex, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra); + const {rootHex, blobSidecars} = generateBlockWithBlobSidecars({forkName: ForkName.deneb, count: 1}); expect(cache.get(rootHex)).toBeUndefined(); const blockInput = cache.getByBlob({ - blobSidecar, + blobSidecar: blobSidecars[0], blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(cache.get(rootHex)).toBe(blockInput); }); + it("should return the same BlockInput for an existing block root", () => { - const {rootHex, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra); + const {rootHex, blobSidecars} = generateBlockWithBlobSidecars({forkName: ForkName.deneb, count: 1}); const blockInput1 = cache.getByBlob({ - blobSidecar, + blobSidecar: blobSidecars[0], blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(cache.get(rootHex)).toBe(blockInput1); const blockInput2 = cache.getByBlob({ - blobSidecar, + blobSidecar: blobSidecars[0], blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(blockInput1).toBe(blockInput2); }); + it("should throw if attempting to add a blob to wrong type of BlockInput", () => { - const {block, rootHex} = buildBlockTestSet(ForkName.capella); + const {block, rootHex} = generateBlock({forkName: ForkName.capella}); const blockInput = cache.getByBlock({ block, blockRootHex: rootHex, @@ -458,19 +403,20 @@ describe("SeenBlockInputCache", async () => { }); expect(isBlockInputPreDeneb(blockInput)).toBeTruthy(); - const {blobSidecar, rootHex: rootHex2} = buildBlockAndBlobTestSet(ForkName.electra); - blobSidecar.signedBlockHeader = signedBlockToSignedHeader(config, block); + const {blobSidecars} = generateBlockWithBlobSidecars({forkName: ForkName.deneb, count: 1}); + blobSidecars[0].signedBlockHeader = signedBlockToSignedHeader(config, block); expect(() => cache.getByBlob({ - blobSidecar, - blockRootHex: rootHex2, + blobSidecar: blobSidecars[0], + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }) ).toThrow(); }); + it("should add blob to an existing BlockInput", () => { - const {block, blobSidecar, rootHex} = buildBlockAndBlobTestSet(ForkName.electra); + const {block, blobSidecars, rootHex} = generateBlockWithBlobSidecars({forkName: ForkName.deneb, count: 1}); const blockInput1 = cache.getByBlock({ block, @@ -479,21 +425,22 @@ describe("SeenBlockInputCache", async () => { seenTimestampSec: Date.now(), }); const blockInput2 = cache.getByBlob({ - blobSidecar, + blobSidecar: blobSidecars[0], blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(blockInput1).toBe(blockInput2); - expect(blockInput2.getBlobs()[0]).toBe(blobSidecar); + expect(blockInput2.getBlobs()[0]).toBe(blobSidecars[0]); }); + it("should not throw for a BlockInput with an existing blob", () => { - const {rootHex, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra); + const {rootHex, blobSidecars} = generateBlockWithBlobSidecars({forkName: ForkName.deneb, count: 1}); expect(cache.get(rootHex)).toBeUndefined(); const blockInput = cache.getByBlob({ - blobSidecar, + blobSidecar: blobSidecars[0], blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), @@ -501,7 +448,7 @@ describe("SeenBlockInputCache", async () => { expect(cache.get(rootHex)).toBe(blockInput); expect(() => blockInput.addBlob({ - blobSidecar, + blobSidecar: blobSidecars[0], source: BlockInputSource.gossip, seenTimestampSec: Date.now(), blockRootHex: rootHex, @@ -509,20 +456,21 @@ describe("SeenBlockInputCache", async () => { ).toThrow(); expect(() => cache.getByBlob({ - blobSidecar, + blobSidecar: blobSidecars[0], blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }) ).not.toThrow(); }); + it("should throw for an existing blob with opts.throwGossipErrorIfAlreadyKnown", () => { - const {rootHex, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra); + const {rootHex, blobSidecars} = generateBlockWithBlobSidecars({forkName: ForkName.deneb, count: 1}); expect(cache.get(rootHex)).toBeUndefined(); const blockInput = cache.getByBlob( { - blobSidecar, + blobSidecar: blobSidecars[0], blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), @@ -533,7 +481,7 @@ describe("SeenBlockInputCache", async () => { expect(() => cache.getByBlob( { - blobSidecar, + blobSidecar: blobSidecars[0], blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), @@ -543,6 +491,7 @@ describe("SeenBlockInputCache", async () => { ).toThrow(); }); }); + // describe("getByColumn()", () => { // it("should return a new BlockInput for a new block root", () => { // const {rootHex, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra); diff --git a/packages/beacon-node/test/unit/sync/unknownBlock.test.ts b/packages/beacon-node/test/unit/sync/unknownBlock.test.ts index a97db3e5c626..4a902834076e 100644 --- a/packages/beacon-node/test/unit/sync/unknownBlock.test.ts +++ b/packages/beacon-node/test/unit/sync/unknownBlock.test.ts @@ -292,7 +292,7 @@ describe("UnknownBlockSync", () => { for (const {actions, expected} of testCases) { const testName = actions.map((action) => (action ? "subscribe" : "unsubscribe")).join(" - "); it(testName, () => { - const events = network.events as EventEmitter; + const events = chain.emitter as EventEmitter; service = new BlockInputSync(minimalConfig, network, chain, logger, null, defaultSyncOptions); for (const action of actions) { if (action) { @@ -389,7 +389,9 @@ describe("UnknownBlockPeerBalancer", async () => { custodyColumns: custodyConfig.custodyColumns, sampledColumns: custodyConfig.sampledColumns, }); - for (const sidecar of columnSidecars.slice(1)) { + + // test cases rely on first 2 columns being known, the rest unknown + for (const sidecar of columnSidecars.slice(0, 2)) { blockInput.addColumn({ columnSidecar: sidecar, blockRootHex: rootHex, diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts index c6e454bc39b1..6dd5d2c34013 100644 --- a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts @@ -6,13 +6,13 @@ import {BlobAndProof} from "@lodestar/types/lib/deneb/types.js"; import {prettyBytes} from "@lodestar/utils"; import {afterAll, afterEach, beforeAll, beforeEach, describe, expect, it, vi} from "vitest"; import {BlobMeta, MissingColumnMeta} from "../../../../src/chain/blocks/blockInput/types.js"; +import {BlobSidecarValidationError} from "../../../../src/chain/errors/blobSidecarError.js"; import {validateBlockBlobSidecars} from "../../../../src/chain/validation/blobSidecar.js"; import {validateBlockDataColumnSidecars} from "../../../../src/chain/validation/dataColumnSidecar.js"; import {IExecutionEngine} from "../../../../src/execution/index.js"; -import {INetwork, prettyPrintPeerIdStr} from "../../../../src/network/index.js"; +import {INetwork} from "../../../../src/network/index.js"; import { DownloadByRootError, - DownloadByRootErrorCode, fetchAndValidateBlobs, fetchAndValidateBlock, fetchAndValidateColumns, @@ -34,7 +34,6 @@ import { describe("downloadByRoot.ts", () => { const peerIdStr = "1234567890abcdef1234567890abcdef"; - const prettyPeerIdStr = prettyPrintPeerIdStr(peerIdStr); let network: INetwork; let executionEngine: IExecutionEngine; @@ -67,19 +66,14 @@ describe("downloadByRoot.ts", () => { sendBeaconBlocksByRoot: vi.fn(() => []), } as unknown as INetwork; - try { - await fetchAndValidateBlock({ + await expect( + fetchAndValidateBlock({ config, network, peerIdStr, blockRoot: capellaBlock.blockRoot, - }); - } catch (err) { - expect(err).toBeInstanceOf(DownloadByRootError); - expect((err as any).type.code).toEqual(DownloadByRootErrorCode.MISSING_BLOCK_RESPONSE); - expect((err as any).type.peer).toEqual(prettyPeerIdStr); - expect((err as any).type.blockRoot).toEqual(prettyBytes(capellaBlock.blockRoot)); - } + }) + ).rejects.toThrow(DownloadByRootError); }); it("should throw error when block root doesn't match requested root", async () => { @@ -88,21 +82,15 @@ describe("downloadByRoot.ts", () => { } as unknown as INetwork; const invalidRoot = randomBytes(ROOT_SIZE); - try { - await fetchAndValidateBlock({ + + await expect( + fetchAndValidateBlock({ config, network, peerIdStr, blockRoot: invalidRoot, - }); - } catch (err) { - expect(err).toBeInstanceOf(DownloadByRootError); - expect((err as any).message).toEqual("block does not match requested root"); - expect((err as any).type.code).toEqual(DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT); - expect((err as any).type.peer).toEqual(prettyPeerIdStr); - expect((err as any).type.requestedBlockRoot).toEqual(prettyBytes(invalidRoot)); - expect((err as any).type.receivedBlockRoot).toEqual(prettyBytes(capellaBlock.blockRoot)); - } + }) + ).rejects.toThrow(DownloadByRootError); }); }); @@ -279,8 +267,8 @@ describe("downloadByRoot.ts", () => { const requestedBlockRoot = randomBytes(ROOT_SIZE); - try { - await fetchAndValidateBlobs({ + await expect( + fetchAndValidateBlobs({ config, network, executionEngine, @@ -289,16 +277,8 @@ describe("downloadByRoot.ts", () => { blockRoot: requestedBlockRoot, block: denebBlockWithBlobs.block, blobMeta, - }); - expect.fail("should have errored"); - } catch (err) { - expect(err).toBeInstanceOf(DownloadByRootError); - expect((err as any).type.code).toBe(DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT); - expect((err as any).type.peer).toBe(prettyPeerIdStr); - expect((err as any).type.requestedBlockRoot).toBe(prettyBytes(requestedBlockRoot)); - expect((err as any).type.receivedBlockRoot).toBe(prettyBytes(denebBlockWithBlobs.blockRoot)); - expect((err as any).message).toEqual("blobSidecar header root did not match requested blockRoot for index=0"); - } + }) + ).rejects.toThrow(BlobSidecarValidationError); }); }); @@ -707,8 +687,11 @@ describe("downloadByRoot.ts", () => { expect(getBlobsMock).toHaveBeenCalledExactlyOnceWith(forkName, versionedHashes); expect(loggerMock.error).toHaveBeenCalledExactlyOnceWith( - `error building columnSidecars for blockRoot=${prettyBytes(fuluBlockWithColumns.blockRoot)} via getBlobsV2`, - {}, + "error building columnSidecars via getBlobsV2", + { + blockRoot: prettyBytes(fuluBlockWithColumns.blockRoot), + slot: fuluBlockWithColumns.block.message.slot, + }, rejectedError ); expect(sendDataColumnSidecarsByRootMock).toHaveBeenCalledExactlyOnceWith(peerIdStr, [ @@ -747,8 +730,8 @@ describe("downloadByRoot.ts", () => { getBlobs: getBlobsMock, } as unknown as IExecutionEngine; - try { - await fetchAndValidateColumns({ + await expect( + fetchAndValidateColumns({ config, network, executionEngine, @@ -760,14 +743,8 @@ describe("downloadByRoot.ts", () => { missing: [0, 1, 2, 3, 4, 5], versionedHashes, }, - }); - expect.fail("should have thrown error"); - } catch (err) { - expect(err).toBeInstanceOf(DownloadByRootError); - expect((err as any).type.code).toBe(DownloadByRootErrorCode.INVALID_INCLUSION_PROOF); - expect((err as any).type.peer).toBe(prettyPrintPeerIdStr(peerIdStr)); - expect((err as any).type.blockRoot).toBe(prettyBytes(fuluBlockWithColumns.blockRoot)); - } + }) + ).rejects.toThrow(DownloadByRootError); }); it("should handle error when publishing reconstructed columns", async () => { @@ -941,43 +918,7 @@ describe("downloadByRoot.ts", () => { expect(result.length).toEqual(NUMBER_OF_COLUMNS); // Verify the structure of the returned column sidecars - for (const [index, columnSidecar] of Object.entries(result)) { - expect(columnSidecar).toHaveProperty("column"); - expect(columnSidecar.column).toBeInstanceOf(Array); - columnSidecar.column.map((cell) => expect(cell).toBeInstanceOf(Uint8Array)); - expect(columnSidecar.column.length).toEqual(fuluBlockWithColumns.block.message.body.blobKzgCommitments.length); - - expect(columnSidecar).toHaveProperty("index"); - expect(columnSidecar.index).toBeTypeOf("number"); - expect(columnSidecar.index).toEqual(parseInt(index)); - - expect(columnSidecar).toHaveProperty("kzgCommitments"); - expect(columnSidecar.kzgCommitments).toBeInstanceOf(Array); - columnSidecar.kzgCommitments.map((c) => expect(c).toBeInstanceOf(Uint8Array)); - expect(columnSidecar.kzgCommitments.toString()).toEqual( - fuluBlockWithColumns.block.message.body.blobKzgCommitments.toString() - ); - - expect(columnSidecar).toHaveProperty("kzgProofs"); - expect(columnSidecar.kzgProofs).toBeInstanceOf(Array); - columnSidecar.kzgProofs.map((proof) => expect(proof).toBeInstanceOf(Uint8Array)); - expect(columnSidecar.kzgProofs.length).toEqual(columnSidecar.column.length); - - expect(columnSidecar).toHaveProperty("kzgCommitmentsInclusionProof"); - expect(columnSidecar.kzgCommitmentsInclusionProof).toBeInstanceOf(Array); - columnSidecar.kzgCommitmentsInclusionProof.map((proof) => expect(proof).toBeInstanceOf(Uint8Array)); - - // // Verify the signed block header matches the block - expect(columnSidecar).toHaveProperty("signedBlockHeader"); - expect(columnSidecar.signedBlockHeader.message.slot).toBe(fuluBlockWithColumns.block.message.slot); - expect(columnSidecar.signedBlockHeader.message.proposerIndex).toBe( - fuluBlockWithColumns.block.message.proposerIndex - ); - expect(columnSidecar.signedBlockHeader.message.parentRoot).toEqual( - fuluBlockWithColumns.block.message.parentRoot - ); - expect(columnSidecar.signedBlockHeader.message.stateRoot).toEqual(fuluBlockWithColumns.block.message.stateRoot); - + for (const [_, columnSidecar] of Object.entries(result)) { expect( validateBlockDataColumnSidecars( columnSidecar.signedBlockHeader.message.slot, @@ -985,7 +926,7 @@ describe("downloadByRoot.ts", () => { fuluBlockWithColumns.block.message.body.blobKzgCommitments.length, [columnSidecar] ) - ).toBeUndefined(); + ).resolves.toBeUndefined(); } }); }); @@ -1018,118 +959,4 @@ describe("downloadByRoot.ts", () => { expect(network.sendDataColumnSidecarsByRoot).toHaveBeenCalledWith(peerIdStr, [{blockRoot, columns: missing}]); }); }); - - describe("DownloadByRootError", () => { - const blockRoot = randomBytes(ROOT_SIZE); - - it("should create error with MISMATCH_BLOCK_ROOT code", () => { - const err = new DownloadByRootError({ - code: DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT, - peer: peerIdStr, - requestedBlockRoot: prettyBytes(blockRoot), - receivedBlockRoot: prettyBytes(new Uint8Array(32).fill(1)), - }); - - expect(err as any).toBeInstanceOf(DownloadByRootError); - expect((err as any).type.code).toBe(DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT); - expect((err as any).type.peer).toBe(peerIdStr); - expect((err as any).type.requestedBlockRoot).toBe(prettyBytes(blockRoot)); - expect((err as any).type.receivedBlockRoot).toBe(prettyBytes(new Uint8Array(32).fill(1))); - }); - - it("should create error with EXTRA_SIDECAR_RECEIVED code", () => { - const err = new DownloadByRootError({ - code: DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED, - peer: peerIdStr, - blockRoot: prettyBytes(blockRoot), - invalidIndex: 5, - }); - - expect(err as any).toBeInstanceOf(DownloadByRootError); - expect((err as any).type.code).toBe(DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED); - expect((err as any).type.peer).toBe(peerIdStr); - expect((err as any).type.blockRoot).toBe(prettyBytes(blockRoot)); - expect((err as any).type.invalidIndex).toBe(5); - }); - - it("should create error with INVALID_INCLUSION_PROOF code", () => { - const err = new DownloadByRootError({ - code: DownloadByRootErrorCode.INVALID_INCLUSION_PROOF, - peer: peerIdStr, - blockRoot: prettyBytes(blockRoot), - sidecarIndex: 2, - }); - - expect(err as any).toBeInstanceOf(DownloadByRootError); - expect((err as any).type.code).toBe(DownloadByRootErrorCode.INVALID_INCLUSION_PROOF); - expect((err as any).type.peer).toBe(peerIdStr); - expect((err as any).type.blockRoot).toBe(prettyBytes(blockRoot)); - expect((err as any).type.sidecarIndex).toBe(2); - }); - - it("should create error with INVALID_KZG_PROOF code", () => { - const err = new DownloadByRootError({ - code: DownloadByRootErrorCode.INVALID_KZG_PROOF, - peer: peerIdStr, - blockRoot: prettyBytes(blockRoot), - }); - - expect(err as any).toBeInstanceOf(DownloadByRootError); - expect((err as any).type.code).toBe(DownloadByRootErrorCode.INVALID_KZG_PROOF); - expect((err as any).type.peer).toBe(peerIdStr); - expect((err as any).type.blockRoot).toBe(prettyBytes(blockRoot)); - }); - - it("should create error with MISSING_BLOCK_RESPONSE code", () => { - const err = new DownloadByRootError({ - code: DownloadByRootErrorCode.MISSING_BLOCK_RESPONSE, - peer: peerIdStr, - blockRoot: prettyBytes(blockRoot), - }); - - expect(err as any).toBeInstanceOf(DownloadByRootError); - expect((err as any).type.code).toBe(DownloadByRootErrorCode.MISSING_BLOCK_RESPONSE); - expect((err as any).type.peer).toBe(peerIdStr); - expect((err as any).type.blockRoot).toBe(prettyBytes(blockRoot)); - }); - - it("should create error with MISSING_BLOB_RESPONSE code", () => { - const err = new DownloadByRootError({ - code: DownloadByRootErrorCode.MISSING_BLOB_RESPONSE, - peer: peerIdStr, - blockRoot: prettyBytes(blockRoot), - }); - - expect(err as any).toBeInstanceOf(DownloadByRootError); - expect((err as any).type.code).toBe(DownloadByRootErrorCode.MISSING_BLOB_RESPONSE); - expect((err as any).type.peer).toBe(peerIdStr); - expect((err as any).type.blockRoot).toBe(prettyBytes(blockRoot)); - }); - - it("should create error with MISSING_COLUMN_RESPONSE code", () => { - const err = new DownloadByRootError({ - code: DownloadByRootErrorCode.MISSING_COLUMN_RESPONSE, - peer: peerIdStr, - blockRoot: prettyBytes(blockRoot), - }); - - expect(err as any).toBeInstanceOf(DownloadByRootError); - expect((err as any).type.code).toBe(DownloadByRootErrorCode.MISSING_COLUMN_RESPONSE); - expect((err as any).type.peer).toBe(peerIdStr); - expect((err as any).type.blockRoot).toBe(prettyBytes(blockRoot)); - }); - - it("should include correct error details in error object", () => { - const errorData = { - code: DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT, - peer: peerIdStr, - requestedBlockRoot: prettyBytes(blockRoot), - receivedBlockRoot: prettyBytes(new Uint8Array(32).fill(1)), - }; - const err = new DownloadByRootError(errorData as any); - - expect(err.type).toEqual(errorData); - expect(Object.keys(err.type)).toEqual(Object.keys(errorData)); - }); - }); }); From e2cf218689bf5d4d303f42bed1ae3c58de782d74 Mon Sep 17 00:00:00 2001 From: Cayman Date: Wed, 3 Sep 2025 16:04:40 -0400 Subject: [PATCH 112/173] chore: lint --- packages/beacon-node/test/utils/blocksAndData.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/packages/beacon-node/test/utils/blocksAndData.ts b/packages/beacon-node/test/utils/blocksAndData.ts index ed60ca952e5a..c1e2463b60bb 100644 --- a/packages/beacon-node/test/utils/blocksAndData.ts +++ b/packages/beacon-node/test/utils/blocksAndData.ts @@ -216,7 +216,10 @@ export function generateBlock({ export function generateChainOfBlocks({ forkName, count, -}: {forkName: F; count: number}): BlockTestSet[] { +}: { + forkName: F; + count: number; +}): BlockTestSet[] { let parentRoot = Uint8Array.from(randomBytes(ROOT_SIZE)); const startSlot = slots[forkName]; const blocks: BlockTestSet[] = []; From 31407b0c32e36503de3b02a51d8b5dc0e420a74a Mon Sep 17 00:00:00 2001 From: Cayman Date: Wed, 3 Sep 2025 16:42:37 -0400 Subject: [PATCH 113/173] chore: more cleanup --- .../src/sync/utils/downloadByRange.ts | 135 ++++++------------ 1 file changed, 41 insertions(+), 94 deletions(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index 641039262603..63756eb48302 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -16,72 +16,6 @@ import {INetwork} from "../../network/index.js"; import {PeerIdStr} from "../../util/peerId.js"; import {DownloadByRootErrorCode} from "./downloadByRoot.js"; -/** - * WRITE_OPTIMIZATIONS_AND_HEURISTIC_SUGGESTIONS_HERE - * - * Architecture Analysis & Optimization Suggestions: - * - * 1. **Reduce Code Duplication** - * - cacheByRangeResponses has nearly identical logic repeated 3 times (blocks, blobs, columns) - * - Extract common caching pattern into a generic helper function - * - Use strategy pattern or polymorphism to handle type-specific operations - * - * 2. **Improve Error Handling Consistency** - * - Wrong chain errors only break loops but don't propagate properly - * - Consider returning error status alongside partial results for better upstream handling - * - Standardize peer reporting logic across all error cases - * - * 3. **Optimize Data Structure Usage** - * - updatedBatchBlocks Map could be pre-sized based on expected slot range - * - Consider using a more efficient data structure for slot-based lookups - * - Avoid multiple iterations over same data in validation functions - * - * 4. **Simplify Validation Flow** - * - validateResponses has complex conditional logic that could be streamlined - * - Consider builder pattern for constructing ValidatedResponses - * - Separate concerns: structural validation vs cryptographic validation - * - * 5. **Performance Improvements** - * - Parallel validation in validateBlobsByRangeResponse/validateColumnsByRangeResponse is good - * - Consider batching validation operations to reduce Promise overhead - * - Pre-allocate arrays where sizes are known (e.g., expectedBlobCount) - * - * 6. **Type Safety Enhancements** - * - DAType checking happens after operations in cacheByRangeResponses - * - Move type checks earlier to fail fast - * - Use discriminated unions for better type narrowing - * - * 7. **Memory Efficiency** - * - Avoid creating intermediate arrays in validation (e.g., blockBlobSidecars slice) - * - Use iterators where possible instead of array slicing - * - Consider streaming validation for large responses - * - * 8. **API Design Improvements** - * - Too many similar type definitions (ValidatedBlock, ValidatedBlobSidecars, etc.) - * - Consider generic ValidatedData type - * - Reduce number of exported types by using namespaces or modules - * - * 9. **Logging and Observability** - * - Add structured logging with correlation IDs for request tracking - * - Include metrics for validation performance - * - Log partial success scenarios more clearly - * - * 10. **Simplify getBlocksForDataValidation** - * - Complex slot filtering logic could be extracted - * - Consider using Set for duplicate detection instead of lastSlot tracking - * - Validate assumption that cached blocks come before current blocks - * - * 11. **Request Coordination** - * - requestByRange uses mutable variables with Promise.all - consider Promise.allSettled - * - Add timeout handling for network requests - * - Consider request prioritization based on sync type - * - * 12. **Validation Optimization** - * - validateBlockByRangeResponse computes blockRoot for all blocks even on failure - * - Consider lazy evaluation or early exit strategies - * - Cache fork type lookup instead of calling config.getForkTypes repeatedly - */ - export type DownloadByRangeRequests = { blocksRequest?: phase0.BeaconBlocksByRangeRequest; blobsRequest?: deneb.BlobSidecarsByRangeRequest; @@ -103,13 +37,6 @@ export type DownloadAndCacheByRangeProps = DownloadByRangeRequests & { batchBlocks?: IBlockInput[]; }; -export type DownloadAndCacheByRangeResults = { - blockInputs: IBlockInput[]; - numberOfBlocks: number; - numberOfBlobs: number; - numberOfColumns: number; -}; - export type CacheByRangeResponsesProps = { cache: SeenBlockInput; peerIdStr: string; @@ -252,7 +179,6 @@ export function cacheByRangeResponses({ export async function downloadByRange({ config, network, - logger, peerIdStr, batchBlocks, blocksRequest, @@ -269,17 +195,15 @@ export async function downloadByRange({ columnsRequest, }); } catch (err) { - logger.verbose("RangeSync *ByRange error", {}, err as Error); throw new DownloadByRangeError({ code: DownloadByRangeErrorCode.REQ_RESP_ERROR, - peerId: peerIdStr, - slotRange: buildSlotRangeString({blocksRequest, blobsRequest, columnsRequest}), + reason: (err as Error).message, + ...requestsLogMeta({blocksRequest, blobsRequest, columnsRequest}), }); } const validated = await validateResponses({ config, - peerIdStr, batchBlocks, blocksRequest, blobsRequest, @@ -357,7 +281,6 @@ export async function validateResponses({ }: DownloadByRangeRequests & DownloadByRangeResponses & { config: ChainForkConfig; - peerIdStr: string; batchBlocks?: IBlockInput[]; }): Promise { // Blocks are always required for blob/column validation @@ -367,7 +290,7 @@ export async function validateResponses({ throw new DownloadByRangeError( { code: DownloadByRangeErrorCode.MISSING_BLOCKS, - slotRange: buildSlotRangeString({blobsRequest, columnsRequest}), + ...requestsLogMeta({blobsRequest, columnsRequest}), }, "No blocks to validate data requests against" ); @@ -394,7 +317,7 @@ export async function validateResponses({ throw new DownloadByRangeError( { code: DownloadByRangeErrorCode.MISSING_BLOCKS, - slotRange: buildSlotRangeString({blobsRequest, columnsRequest}), + ...requestsLogMeta({blobsRequest, columnsRequest}), }, "No blocks in data request slot range to validate data response against" ); @@ -405,7 +328,7 @@ export async function validateResponses({ throw new DownloadByRangeError( { code: DownloadByRangeErrorCode.MISSING_BLOBS_RESPONSE, - slotRange: buildSlotRangeString({blobsRequest, columnsRequest}), + ...requestsLogMeta({blobsRequest, columnsRequest}), }, "No blobSidecars to validate against blobsRequest" ); @@ -419,7 +342,7 @@ export async function validateResponses({ throw new DownloadByRangeError( { code: DownloadByRangeErrorCode.MISSING_COLUMNS_RESPONSE, - slotRange: buildSlotRangeString({blobsRequest, columnsRequest}), + ...requestsLogMeta({blobsRequest, columnsRequest}), }, "No columnSidecars to check columnRequest against" ); @@ -707,14 +630,29 @@ export function getBlocksForDataValidation( return dataRequestBlocks; } -function buildSlotRangeString({blocksRequest, blobsRequest, columnsRequest}: DownloadByRangeRequests): string { - const startSlot = blocksRequest?.startSlot ?? blobsRequest?.startSlot ?? columnsRequest?.startSlot; - const count = blocksRequest?.count ?? blobsRequest?.count ?? columnsRequest?.count; - if (startSlot && count) { - return `${startSlot} - ${startSlot + count}`; +function requestsLogMeta({blocksRequest, blobsRequest, columnsRequest}: DownloadByRangeRequests) { + const logMeta: { + blockStartSlot?: number; + blockCount?: number; + blobStartSlot?: number; + blobCount?: number; + columnStartSlot?: number; + columnCount?: number; + } = {}; + if (blocksRequest) { + logMeta.blockStartSlot = blocksRequest.startSlot; + logMeta.blockCount = blocksRequest.count; + } + if (blobsRequest) { + logMeta.blobStartSlot = blobsRequest.startSlot; + logMeta.blobCount = blobsRequest.count; + } + if (columnsRequest) { + logMeta.columnStartSlot = columnsRequest.startSlot; + logMeta.columnCount = columnsRequest.count; + } + return logMeta; } - return "[error calculating slotRange]"; -} export enum DownloadByRangeErrorCode { MISSING_BLOCKS = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOCKS", @@ -751,11 +689,15 @@ export type DownloadByRangeErrorType = | DownloadByRangeErrorCode.MISSING_BLOCKS | DownloadByRangeErrorCode.MISSING_BLOBS_RESPONSE | DownloadByRangeErrorCode.MISSING_COLUMNS_RESPONSE; - slotRange: string; + blockStartSlot?: number; + blockCount?: number; + blobStartSlot?: number; + blobCount?: number; + columnStartSlot?: number; + columnCount?: number; } | { code: DownloadByRootErrorCode.MISSING_BLOCK_RESPONSE; - peer: string; expectedCount: number; } | { @@ -767,8 +709,13 @@ export type DownloadByRangeErrorType = } | { code: DownloadByRangeErrorCode.REQ_RESP_ERROR; - peerId: string; - slotRange: string; + blockStartSlot?: number; + blockCount?: number; + blobStartSlot?: number; + blobCount?: number; + columnStartSlot?: number; + columnCount?: number; + reason: string; } | { code: DownloadByRangeErrorCode.PARENT_ROOT_MISMATCH; From f30d9a2f196dba09f82dee36654b8d96b32a49e5 Mon Sep 17 00:00:00 2001 From: Cayman Date: Wed, 3 Sep 2025 18:06:26 -0400 Subject: [PATCH 114/173] chore: fix lint --- .../src/sync/utils/downloadByRange.ts | 42 +++++++++---------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index 63756eb48302..bfda5860ae90 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -631,28 +631,28 @@ export function getBlocksForDataValidation( } function requestsLogMeta({blocksRequest, blobsRequest, columnsRequest}: DownloadByRangeRequests) { - const logMeta: { - blockStartSlot?: number; - blockCount?: number; - blobStartSlot?: number; - blobCount?: number; - columnStartSlot?: number; - columnCount?: number; - } = {}; - if (blocksRequest) { - logMeta.blockStartSlot = blocksRequest.startSlot; - logMeta.blockCount = blocksRequest.count; - } - if (blobsRequest) { - logMeta.blobStartSlot = blobsRequest.startSlot; - logMeta.blobCount = blobsRequest.count; - } - if (columnsRequest) { - logMeta.columnStartSlot = columnsRequest.startSlot; - logMeta.columnCount = columnsRequest.count; - } - return logMeta; + const logMeta: { + blockStartSlot?: number; + blockCount?: number; + blobStartSlot?: number; + blobCount?: number; + columnStartSlot?: number; + columnCount?: number; + } = {}; + if (blocksRequest) { + logMeta.blockStartSlot = blocksRequest.startSlot; + logMeta.blockCount = blocksRequest.count; + } + if (blobsRequest) { + logMeta.blobStartSlot = blobsRequest.startSlot; + logMeta.blobCount = blobsRequest.count; } + if (columnsRequest) { + logMeta.columnStartSlot = columnsRequest.startSlot; + logMeta.columnCount = columnsRequest.count; + } + return logMeta; +} export enum DownloadByRangeErrorCode { MISSING_BLOCKS = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOCKS", From 3943b36ea6dd676fb36e9bb9fd353713f2a677e7 Mon Sep 17 00:00:00 2001 From: Cayman Date: Wed, 3 Sep 2025 18:31:43 -0400 Subject: [PATCH 115/173] chore: add getBlobsV2 call to gossip block handler --- .../src/network/processor/gossipHandlers.ts | 3 + packages/beacon-node/src/util/execution.ts | 88 +++++++++++++++++++ 2 files changed, 91 insertions(+) create mode 100644 packages/beacon-node/src/util/execution.ts diff --git a/packages/beacon-node/src/network/processor/gossipHandlers.ts b/packages/beacon-node/src/network/processor/gossipHandlers.ts index 00e6ae5b8111..d85bd908d86b 100644 --- a/packages/beacon-node/src/network/processor/gossipHandlers.ts +++ b/packages/beacon-node/src/network/processor/gossipHandlers.ts @@ -66,6 +66,7 @@ import {sszDeserialize} from "../gossip/topic.js"; import {INetwork} from "../interface.js"; import {PeerAction} from "../peers/index.js"; import {AggregatorTracker} from "./aggregatorTracker.js"; +import {getDataColumnSidecarsFromExecution} from "../../util/execution.js"; /** * Gossip handler options as part of network options @@ -353,6 +354,8 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand peer: peerIdStr, source: BlockInputSource.gossip, }); + // immediately attempt fetch of data columns from execution engine + getDataColumnSidecarsFromExecution(config, chain.executionEngine, chain.emitter, blockInput, metrics); } else { metrics?.blockInputFetchStats.totalDataAvailableBlockInputs.inc(); metrics?.blockInputFetchStats.totalDataAvailableBlockInputBlobs.inc( diff --git a/packages/beacon-node/src/util/execution.ts b/packages/beacon-node/src/util/execution.ts new file mode 100644 index 000000000000..b8c1d478a763 --- /dev/null +++ b/packages/beacon-node/src/util/execution.ts @@ -0,0 +1,88 @@ +import {ChainForkConfig} from "@lodestar/config"; +import { + getCellsAndProofs, + getDataColumnSidecarsFromBlock, + getDataColumnSidecarsFromColumnSidecar, +} from "./dataColumns.js"; +import {IExecutionEngine} from "../execution/index.js"; +import {ChainEvent, ChainEventEmitter} from "../chain/emitter.js"; +import {BlockInputSource, IBlockInput} from "../chain/blocks/blockInput/types.js"; +import {Metrics} from "../metrics/index.js"; +import {fulu} from "@lodestar/types"; +import {isBlockInputColumns} from "../chain/blocks/blockInput/blockInput.js"; +import {ForkPostFulu} from "@lodestar/params"; + +export async function getDataColumnSidecarsFromExecution( + config: ChainForkConfig, + executionEngine: IExecutionEngine, + emitter: ChainEventEmitter, + blockInput: IBlockInput, + metrics: Metrics | null +): Promise { + // If its not a column block input, exit + if (!isBlockInputColumns(blockInput)) { + return false; + } + + // If already have all columns, exit + if (blockInput.hasAllData()) { + return true; + } + + const versionedHashes = blockInput.getVersionedHashes(); + + // If there are no blobs in this block, exit + if (versionedHashes.length === 0) { + return true; + } + + // Get blobs from execution engine + metrics?.peerDas.getBlobsV2Requests.inc(); + const timer = metrics?.peerDas.getBlobsV2RequestDuration.startTimer(); + const blobs = await executionEngine.getBlobs(blockInput.forkName as ForkPostFulu, versionedHashes); + timer?.(); + + // Execution engine was unable to find one or more blobs + if (blobs === null) { + return false; + } + metrics?.peerDas.getBlobsV2Responses.inc(); + + // Return if we received all data columns while waiting for getBlobs + if (blockInput.hasAllData()) { + return true; + } + + let dataColumnSidecars: fulu.DataColumnSidecars; + const cellsAndProofs = await getCellsAndProofs(blobs); + if (blockInput.hasBlock()) { + dataColumnSidecars = getDataColumnSidecarsFromBlock( + config, + blockInput.getBlock() as fulu.SignedBeaconBlock, + cellsAndProofs + ); + } else { + const firstSidecar = blockInput.getAllColumns()[0]; + dataColumnSidecars = getDataColumnSidecarsFromColumnSidecar(firstSidecar, cellsAndProofs); + } + + // Publish columns if and only if subscribed to them + const previouslyMissingColumns = blockInput.getMissingSampledColumnMeta().missing; + const sampledColumns = previouslyMissingColumns.map((columnIndex) => dataColumnSidecars[columnIndex]); + + // for columns that we already seen, it will be ignored through `ignoreDuplicatePublishError` gossip option + emitter.emit(ChainEvent.publishDataColumns, sampledColumns); + + // add all sampled columns to the block input, even if we didn't sample them + const seenTimestampSec = Date.now() / 1000; + for (const columnSidecar of sampledColumns) { + blockInput.addColumn( + {columnSidecar, blockRootHex: blockInput.blockRootHex, source: BlockInputSource.engine, seenTimestampSec}, + {throwOnDuplicateAdd: false} // columns may have been added while waiting + ); + } + + metrics?.dataColumns.bySource.inc({source: BlockInputSource.engine}, previouslyMissingColumns.length); + + return true; +} From be99499f79ecf84456ab83df5249969472bd8ff7 Mon Sep 17 00:00:00 2001 From: Cayman Date: Wed, 3 Sep 2025 18:36:21 -0400 Subject: [PATCH 116/173] chore: cleanup getDataColumnSidecarsFromExecution --- packages/beacon-node/src/util/execution.ts | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/packages/beacon-node/src/util/execution.ts b/packages/beacon-node/src/util/execution.ts index b8c1d478a763..a2735b062cb3 100644 --- a/packages/beacon-node/src/util/execution.ts +++ b/packages/beacon-node/src/util/execution.ts @@ -18,22 +18,22 @@ export async function getDataColumnSidecarsFromExecution( emitter: ChainEventEmitter, blockInput: IBlockInput, metrics: Metrics | null -): Promise { +): Promise { // If its not a column block input, exit if (!isBlockInputColumns(blockInput)) { - return false; + return; } // If already have all columns, exit if (blockInput.hasAllData()) { - return true; + return; } const versionedHashes = blockInput.getVersionedHashes(); // If there are no blobs in this block, exit if (versionedHashes.length === 0) { - return true; + return; } // Get blobs from execution engine @@ -44,13 +44,13 @@ export async function getDataColumnSidecarsFromExecution( // Execution engine was unable to find one or more blobs if (blobs === null) { - return false; + return; } metrics?.peerDas.getBlobsV2Responses.inc(); // Return if we received all data columns while waiting for getBlobs if (blockInput.hasAllData()) { - return true; + return; } let dataColumnSidecars: fulu.DataColumnSidecars; @@ -83,6 +83,4 @@ export async function getDataColumnSidecarsFromExecution( } metrics?.dataColumns.bySource.inc({source: BlockInputSource.engine}, previouslyMissingColumns.length); - - return true; } From 45baecd8141980e30ccc443d3a41260b9ff3ccc1 Mon Sep 17 00:00:00 2001 From: Cayman Date: Wed, 3 Sep 2025 18:54:57 -0400 Subject: [PATCH 117/173] chore: apply #8282-ish solution --- .../beacon-node/src/eth1/provider/utils.ts | 11 +- .../beacon-node/src/execution/engine/http.ts | 31 ++++- .../src/execution/engine/interface.ts | 4 +- .../beacon-node/src/execution/engine/types.ts | 55 +++++++- packages/beacon-node/src/util/execution.ts | 130 +++++++++++------- 5 files changed, 166 insertions(+), 65 deletions(-) diff --git a/packages/beacon-node/src/eth1/provider/utils.ts b/packages/beacon-node/src/eth1/provider/utils.ts index 39cb9d4b1849..9b3c88c2e8a6 100644 --- a/packages/beacon-node/src/eth1/provider/utils.ts +++ b/packages/beacon-node/src/eth1/provider/utils.ts @@ -1,5 +1,5 @@ import {RootHex} from "@lodestar/types"; -import {bigIntToBytes, bytesToBigInt, fromHex, toHex} from "@lodestar/utils"; +import {bigIntToBytes, bytesToBigInt, fromHex, fromHexInto, toHex} from "@lodestar/utils"; import {ErrorParseJson} from "./jsonRpcHttpClient.js"; /** QUANTITY as defined in ethereum execution layer JSON RPC https://eth.wiki/json-rpc/API */ @@ -118,6 +118,15 @@ export function dataToBytes(hex: DATA, fixedLength: number | null): Uint8Array { } } +/** + * Convert DATA into a preallocated buffer + * fromHexInto will throw if buffer's length is not the same as the decoded hex length + */ +export function dataIntoBytes(hex: DATA, buffer: Uint8Array): Uint8Array { + fromHexInto(hex, buffer); + return buffer; +} + /** * DATA as defined in ethereum execution layer JSON RPC https://eth.wiki/json-rpc/API */ diff --git a/packages/beacon-node/src/execution/engine/http.ts b/packages/beacon-node/src/execution/engine/http.ts index ec6cd07c0ac2..33a1b66c39ab 100644 --- a/packages/beacon-node/src/execution/engine/http.ts +++ b/packages/beacon-node/src/execution/engine/http.ts @@ -29,12 +29,14 @@ import { } from "./interface.js"; import {PayloadIdCache} from "./payloadIdCache.js"; import { + BLOB_AND_PROOF_V2_RPC_BYTES, EngineApiRpcParamTypes, EngineApiRpcReturnTypes, ExecutionPayloadBody, assertReqSizeLimit, deserializeBlobAndProofs, deserializeBlobAndProofsV2, + deserializeBlobAndProofsV2IntoBytes, deserializeExecutionPayloadBody, parseExecutionPayload, serializeBeaconBlockRoot, @@ -489,8 +491,8 @@ export class ExecutionEngineHttp implements IExecutionEngine { return response.map(deserializeExecutionPayloadBody); } - async getBlobs(fork: ForkPostFulu, versionedHashes: VersionedHashes): Promise; - async getBlobs(fork: ForkPreFulu, versionedHashes: VersionedHashes): Promise<(BlobAndProof | null)[]>; + async getBlobs(fork: ForkPostFulu, versionedHashes: VersionedHashes, buffers?: Uint8Array[]): Promise; + async getBlobs(fork: ForkPreFulu, versionedHashes: VersionedHashes, buffers?: Uint8Array[]): Promise<(BlobAndProof | null)[]>; async getBlobs( fork: ForkName, versionedHashes: VersionedHashes @@ -526,7 +528,19 @@ export class ExecutionEngineHttp implements IExecutionEngine { return response.map(deserializeBlobAndProofs); } - private async getBlobsV2(versionedHashesHex: string[]) { + private async getBlobsV2(versionedHashesHex: string[], buffers?: Uint8Array[]) { + if (buffers) { + if (buffers.length !== versionedHashesHex.length) { + throw Error(`Invalid buffers length=${buffers.length} versionedHashes=${versionedHashesHex.length}`); + } + + for (const [i, buffer] of buffers.entries()) { + if (buffer.length !== BLOB_AND_PROOF_V2_RPC_BYTES) { + throw Error(`Invalid buffer[${i}] length=${buffer.length} expected=${BLOB_AND_PROOF_V2_RPC_BYTES}`); + } + } + } + const response = await this.rpc.fetchWithRetries< EngineApiRpcReturnTypes["engine_getBlobsV2"], EngineApiRpcParamTypes["engine_getBlobsV2"] @@ -547,7 +561,16 @@ export class ExecutionEngineHttp implements IExecutionEngine { throw Error(error); } - return !response ? null : response.map(deserializeBlobAndProofsV2); + if (response == null) { + return null; + } + + if (buffers) { + // getBlobsV2() is designed to called once per slot so we expect to have buffers + return response.map((data, i) => deserializeBlobAndProofsV2IntoBytes(data, buffers[i])); + } + + return response.map(deserializeBlobAndProofsV2); } private async getClientVersion(clientVersion: ClientVersion): Promise { diff --git a/packages/beacon-node/src/execution/engine/interface.ts b/packages/beacon-node/src/execution/engine/interface.ts index 5f8527c094f2..76f262610b06 100644 --- a/packages/beacon-node/src/execution/engine/interface.ts +++ b/packages/beacon-node/src/execution/engine/interface.ts @@ -187,6 +187,6 @@ export interface IExecutionEngine { getPayloadBodiesByRange(fork: ForkName, start: number, count: number): Promise<(ExecutionPayloadBody | null)[]>; - getBlobs(fork: ForkPostFulu, versionedHashes: VersionedHashes): Promise; - getBlobs(fork: ForkPreFulu, versionedHashes: VersionedHashes): Promise<(BlobAndProof | null)[]>; + getBlobs(fork: ForkPostFulu, versionedHashes: VersionedHashes, buffers?: Uint8Array[]): Promise; + getBlobs(fork: ForkPreFulu, versionedHashes: VersionedHashes, buffers?: Uint8Array[]): Promise<(BlobAndProof | null)[]>; } diff --git a/packages/beacon-node/src/execution/engine/types.ts b/packages/beacon-node/src/execution/engine/types.ts index 561a0d25e52b..a8396410964b 100644 --- a/packages/beacon-node/src/execution/engine/types.ts +++ b/packages/beacon-node/src/execution/engine/types.ts @@ -1,6 +1,7 @@ import { BYTES_PER_FIELD_ELEMENT, BYTES_PER_LOGS_BLOOM, + CELLS_PER_EXT_BLOB, CONSOLIDATION_REQUEST_TYPE, DEPOSIT_REQUEST_TYPE, FIELD_ELEMENTS_PER_BLOB, @@ -27,6 +28,7 @@ import { DATA, QUANTITY, bytesToData, + dataIntoBytes, dataToBytes, numToQuantity, quantityToBigint, @@ -211,6 +213,11 @@ export type BlobAndProofV2Rpc = { proofs: DATA[]; }; +const BLOB_BYTES = BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB; +const PROOF_BYTES = 48; + +export const BLOB_AND_PROOF_V2_RPC_BYTES = BLOB_BYTES + PROOF_BYTES * CELLS_PER_EXT_BLOB; + export type VersionedHashesRpc = DATA[]; export type PayloadAttributesRpc = { @@ -403,8 +410,8 @@ export function parseBlobsBundle(data: BlobsBundleRpc): BlobsBundle { return { // As of Nov 17th 2022 according to Dan's tests Geth returns null if no blobs in block commitments: (data.commitments ?? []).map((kzg) => dataToBytes(kzg, 48)), - blobs: (data.blobs ?? []).map((blob) => dataToBytes(blob, BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB)), - proofs: (data.proofs ?? []).map((kzg) => dataToBytes(kzg, 48)), + blobs: (data.blobs ?? []).map((blob) => dataToBytes(blob, BLOB_BYTES)), + proofs: (data.proofs ?? []).map((kzg) => dataToBytes(kzg, PROOF_BYTES)), }; } @@ -579,16 +586,51 @@ export function serializeExecutionPayloadBody(data: ExecutionPayloadBody | null) export function deserializeBlobAndProofs(data: BlobAndProofRpc | null): BlobAndProof | null { return data ? { - blob: dataToBytes(data.blob, BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB), - proof: dataToBytes(data.proof, 48), + blob: dataToBytes(data.blob, BLOB_BYTES), + proof: dataToBytes(data.proof, PROOF_BYTES), } : null; } export function deserializeBlobAndProofsV2(data: BlobAndProofV2Rpc): BlobAndProofV2 { return { - blob: dataToBytes(data.blob, BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB), - proofs: data.proofs.map((proof) => dataToBytes(proof, 48)), + blob: dataToBytes(data.blob, BLOB_BYTES), + proofs: data.proofs.map((proof) => dataToBytes(proof, PROOF_BYTES)), + }; +} + +/** + * The same to deserializeBlobAndProofsV2 but using preallocated buffers since BlobAndProofV2Rpc is fixed size + */ +export function deserializeBlobAndProofsV2IntoBytes(data: BlobAndProofV2Rpc, buffer: Uint8Array): BlobAndProofV2 { + if (buffer.length !== BLOB_AND_PROOF_V2_RPC_BYTES) { + throw Error( + `Invalid buffer length ${buffer.length}, expected ${BLOB_AND_PROOF_V2_RPC_BYTES} to hold BlobAndProofV2Rpc` + ); + } + + // https://github.com/ethereum/execution-apis/blob/main/src/engine/osaka.md#blobandproofv2 + // proofs MUST contain exactly CELLS_PER_EXT_BLOB cell proofs. + if (data.proofs.length !== CELLS_PER_EXT_BLOB) { + throw Error(`Invalid proofs length ${data.proofs.length}, expected ${CELLS_PER_EXT_BLOB}`); + } + + const blob = dataIntoBytes(data.blob, buffer.subarray(0, BLOB_BYTES)); + const proofs: Uint8Array[] = []; + for (let i = 0; i < CELLS_PER_EXT_BLOB; i++) { + const proof = dataIntoBytes( + data.proofs[i], + buffer.subarray(BLOB_BYTES + i * PROOF_BYTES, BLOB_BYTES + (i + 1) * PROOF_BYTES) + ); + if (proof.length !== PROOF_BYTES) { + throw Error(`Invalid proof length ${proof.length}, expected ${PROOF_BYTES}`); + } + proofs.push(proof); + } + + return { + blob, + proofs, }; } @@ -598,3 +640,4 @@ export function assertReqSizeLimit(blockHashesReqCount: number, count: number): } return; } + diff --git a/packages/beacon-node/src/util/execution.ts b/packages/beacon-node/src/util/execution.ts index a2735b062cb3..8920f5a69399 100644 --- a/packages/beacon-node/src/util/execution.ts +++ b/packages/beacon-node/src/util/execution.ts @@ -11,7 +11,17 @@ import {Metrics} from "../metrics/index.js"; import {fulu} from "@lodestar/types"; import {isBlockInputColumns} from "../chain/blocks/blockInput/blockInput.js"; import {ForkPostFulu} from "@lodestar/params"; +import { BLOB_AND_PROOF_V2_RPC_BYTES } from "../execution/engine/types.js"; +let running = false; +// Preallocate buffers for getBlobsV2 RPC calls +// See https://github.com/ChainSafe/lodestar/pull/8282 for context +const blobAndProofBuffers: Uint8Array[] = []; + +/** + * Post fulu, call getBlobsV2 from execution engine once per slot whenever we see either beacon_block or data_column_sidecar gossip message + * Only a single call can be in-flight at a time, subsequent calls are ignored + */ export async function getDataColumnSidecarsFromExecution( config: ChainForkConfig, executionEngine: IExecutionEngine, @@ -19,68 +29,84 @@ export async function getDataColumnSidecarsFromExecution( blockInput: IBlockInput, metrics: Metrics | null ): Promise { - // If its not a column block input, exit - if (!isBlockInputColumns(blockInput)) { - return; - } + try { + if (running) { + return; + } + running = true; - // If already have all columns, exit - if (blockInput.hasAllData()) { - return; - } + // If its not a column block input, exit + if (!isBlockInputColumns(blockInput)) { + return; + } - const versionedHashes = blockInput.getVersionedHashes(); + // If already have all columns, exit + if (blockInput.hasAllData()) { + return; + } - // If there are no blobs in this block, exit - if (versionedHashes.length === 0) { - return; - } + const versionedHashes = blockInput.getVersionedHashes(); - // Get blobs from execution engine - metrics?.peerDas.getBlobsV2Requests.inc(); - const timer = metrics?.peerDas.getBlobsV2RequestDuration.startTimer(); - const blobs = await executionEngine.getBlobs(blockInput.forkName as ForkPostFulu, versionedHashes); - timer?.(); + // If there are no blobs in this block, exit + if (versionedHashes.length === 0) { + return; + } - // Execution engine was unable to find one or more blobs - if (blobs === null) { - return; + // Get blobs from execution engine + metrics?.peerDas.getBlobsV2Requests.inc(); + const timer = metrics?.peerDas.getBlobsV2RequestDuration.startTimer(); + if (blobAndProofBuffers) { + for (let i = 0; i < versionedHashes.length; i++) { + if (blobAndProofBuffers[i] === undefined) { + blobAndProofBuffers[i] = new Uint8Array(BLOB_AND_PROOF_V2_RPC_BYTES); + } + } } - metrics?.peerDas.getBlobsV2Responses.inc(); + const blobs = await executionEngine.getBlobs(blockInput.forkName as ForkPostFulu, versionedHashes, blobAndProofBuffers); + timer?.(); - // Return if we received all data columns while waiting for getBlobs - if (blockInput.hasAllData()) { - return; - } + // Execution engine was unable to find one or more blobs + if (blobs === null) { + return; + } + metrics?.peerDas.getBlobsV2Responses.inc(); - let dataColumnSidecars: fulu.DataColumnSidecars; - const cellsAndProofs = await getCellsAndProofs(blobs); - if (blockInput.hasBlock()) { - dataColumnSidecars = getDataColumnSidecarsFromBlock( - config, - blockInput.getBlock() as fulu.SignedBeaconBlock, - cellsAndProofs - ); - } else { - const firstSidecar = blockInput.getAllColumns()[0]; - dataColumnSidecars = getDataColumnSidecarsFromColumnSidecar(firstSidecar, cellsAndProofs); - } + // Return if we received all data columns while waiting for getBlobs + if (blockInput.hasAllData()) { + return; + } - // Publish columns if and only if subscribed to them - const previouslyMissingColumns = blockInput.getMissingSampledColumnMeta().missing; - const sampledColumns = previouslyMissingColumns.map((columnIndex) => dataColumnSidecars[columnIndex]); + let dataColumnSidecars: fulu.DataColumnSidecars; + const cellsAndProofs = await getCellsAndProofs(blobs); + if (blockInput.hasBlock()) { + dataColumnSidecars = getDataColumnSidecarsFromBlock( + config, + blockInput.getBlock() as fulu.SignedBeaconBlock, + cellsAndProofs + ); + } else { + const firstSidecar = blockInput.getAllColumns()[0]; + dataColumnSidecars = getDataColumnSidecarsFromColumnSidecar(firstSidecar, cellsAndProofs); + } - // for columns that we already seen, it will be ignored through `ignoreDuplicatePublishError` gossip option - emitter.emit(ChainEvent.publishDataColumns, sampledColumns); + // Publish columns if and only if subscribed to them + const previouslyMissingColumns = blockInput.getMissingSampledColumnMeta().missing; + const sampledColumns = previouslyMissingColumns.map((columnIndex) => dataColumnSidecars[columnIndex]); - // add all sampled columns to the block input, even if we didn't sample them - const seenTimestampSec = Date.now() / 1000; - for (const columnSidecar of sampledColumns) { - blockInput.addColumn( - {columnSidecar, blockRootHex: blockInput.blockRootHex, source: BlockInputSource.engine, seenTimestampSec}, - {throwOnDuplicateAdd: false} // columns may have been added while waiting - ); - } + // for columns that we already seen, it will be ignored through `ignoreDuplicatePublishError` gossip option + emitter.emit(ChainEvent.publishDataColumns, sampledColumns); - metrics?.dataColumns.bySource.inc({source: BlockInputSource.engine}, previouslyMissingColumns.length); + // add all sampled columns to the block input, even if we didn't sample them + const seenTimestampSec = Date.now() / 1000; + for (const columnSidecar of sampledColumns) { + blockInput.addColumn( + {columnSidecar, blockRootHex: blockInput.blockRootHex, source: BlockInputSource.engine, seenTimestampSec}, + {throwOnDuplicateAdd: false} // columns may have been added while waiting + ); + } + + metrics?.dataColumns.bySource.inc({source: BlockInputSource.engine}, previouslyMissingColumns.length); + } finally { + running = false; + } } From 09f004428b0ac20d2d4f89f9523ad489f3fbbc0a Mon Sep 17 00:00:00 2001 From: Cayman Date: Wed, 3 Sep 2025 18:55:24 -0400 Subject: [PATCH 118/173] chore: fetch data columns from execution upon data column sidecar validation --- packages/beacon-node/src/network/processor/gossipHandlers.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/beacon-node/src/network/processor/gossipHandlers.ts b/packages/beacon-node/src/network/processor/gossipHandlers.ts index d85bd908d86b..24ec21677988 100644 --- a/packages/beacon-node/src/network/processor/gossipHandlers.ts +++ b/packages/beacon-node/src/network/processor/gossipHandlers.ts @@ -525,6 +525,8 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand source: BlockInputSource.gossip, }); }); + // immediately attempt fetch of data columns from execution engine + getDataColumnSidecarsFromExecution(config, chain.executionEngine, chain.emitter, blockInput, metrics); } }, From cc933e5eed04937fbab2f51b90789e391e37c514 Mon Sep 17 00:00:00 2001 From: twoeths <10568965+twoeths@users.noreply.github.com> Date: Fri, 5 Sep 2025 09:54:26 +0700 Subject: [PATCH 119/173] fix: throw on duplicate add (#8326) **Motivation** - incorrect use of flag **Description** Closes #8325 Co-authored-by: Tuyen Nguyen --- .../beacon-node/src/chain/blocks/blockInput/blockInput.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts index 73dcaab3e27c..d6fae7d2ef6f 100644 --- a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts +++ b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts @@ -429,7 +429,7 @@ export class BlockInputBlobs extends AbstractBlockInput Date: Fri, 5 Sep 2025 11:18:46 +0700 Subject: [PATCH 120/173] fix: correct validateBlockDataColumnSidecars result mapping (#8328) **Motivation** - fix error: ``` Sep-05 02:41:30.219[sync] verbose: Batch download error id=Finalized-0, startEpoch=9778, status=Downloading, peer=16...BQ8fLQ, code=BLOCK_INPUT_ERROR_MISMATCHED_ROOT_HEX, blockInputRoot=0xd82f91624089fd14bf59552d3c921706487efa390c6126c983755b4ba1b6c642, mismatchedRoot=0x10a38f5d4ac083fdbfff507a2e1dfc5777685cca22b2af983676408429449313, source=req_resp_by_range, peerId=16Uiu2HAmFsDPzqKFfMuiDTBiRBNALSao7RF2p56TaD4FneBQ8fLQ Error: Column BeaconBlockHeader blockRootHex does not match BlockInput.blockRootHex at BlockInputColumns.addColumn (file:///usr/src/lodestar/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts:734:13) at cacheByRangeResponses (file:///usr/src/lodestar/packages/beacon-node/src/sync/utils/downloadByRange.ts:163:16) at SyncChain.RangeSync.downloadByRange (file:///usr/src/lodestar/packages/beacon-node/src/sync/range/range.ts:212:20) at wrapError (file:///usr/src/lodestar/packages/beacon-node/src/util/wrapError.ts:18:32) at SyncChain.sendBatch (file:///usr/src/lodestar/packages/beacon-node/src/sync/range/chain.ts:451:19) ``` **Description** - correct the mapping of result (use wrong variable) closes #8327 Co-authored-by: Tuyen Nguyen --- packages/beacon-node/src/sync/utils/downloadByRange.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index bfda5860ae90..20bea11bad6c 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -583,7 +583,7 @@ export async function validateColumnsByRangeResponse( blockRoot, blockKzgCommitments.length, blockColumnSidecars - ).then(() => ({blockRoot, columnSidecars})) + ).then(() => ({blockRoot, columnSidecars: blockColumnSidecars})) ); } From 20292ec5ed3ec4f2b67a12485d8a442cf3a5644e Mon Sep 17 00:00:00 2001 From: twoeths <10568965+twoeths@users.noreply.github.com> Date: Fri, 5 Sep 2025 17:30:37 +0700 Subject: [PATCH 121/173] fix: correct leveldb metrics (#8335) **Motivation** - misuse `dbRead*` vs `dbWrite*` which leads to incorrect metric **Description** - this PR should ideally be with `unstable` target branch, however leaving here so that we can investigate #8334 easier - we will merge this to unstable later anyway part of #8334 Co-authored-by: Tuyen Nguyen --- packages/db/src/controller/level.ts | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/db/src/controller/level.ts b/packages/db/src/controller/level.ts index 14c6c2190d03..a28658f90907 100644 --- a/packages/db/src/controller/level.ts +++ b/packages/db/src/controller/level.ts @@ -189,9 +189,9 @@ export class LevelDbController implements DatabaseController(promise: Promise, bucket: string): Promise { - this.metrics?.dbWriteReq.inc({bucket}, 1); + this.metrics?.dbReadReq.inc({bucket}, 1); const items = await promise; - this.metrics?.dbWriteItems.inc({bucket}, items.length); + this.metrics?.dbReadItems.inc({bucket}, items.length); return items; } @@ -201,7 +201,7 @@ export class LevelDbController implements DatabaseController K, bucket: string ): AsyncIterable { - this.metrics?.dbWriteReq.inc({bucket}, 1); + this.metrics?.dbReadReq.inc({bucket}, 1); let itemsRead = 0; @@ -212,7 +212,7 @@ export class LevelDbController implements DatabaseController Date: Fri, 5 Sep 2025 17:52:43 +0700 Subject: [PATCH 122/173] fix: handle full data BlockInput in downloadByRoot (#8339) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Motivation** - there are a lot of errors like ``` Sep 05 10:36:05 devnet-ax41-0 beacon_run.sh[581633]: Sep-05 10:36:05.767[sync] debug: Error downloading in BlockInputSync.fetchBlockInput attempt=13, rootHex=0xbda1d14f56df33ad5f87ad60f639b67750320b324d817d03b3fa72516aa0d7a3, peer=16Uiu2HAmVeQdJYU1cmZHdHD5jyse2kKwLqDFJwZpVv3cuPwUr9yu, peerClient=Nimbus, code=DOWNLOAD_BY_ROOT_ERROR_MISSING_COLUMN_RESPONSE, blockRoot=0xbda1…d7a3, peer=16Uiu2HAmVeQdJYU1cmZHdHD5jyse2kKwLqDFJwZpVv3cuPwUr9yu Sep 05 10:36:05 devnet-ax41-0 beacon_run.sh[581633]: Error: DOWNLOAD_BY_ROOT_ERROR_MISSING_COLUMN_RESPONSE Sep 05 10:36:05 devnet-ax41-0 beacon_run.sh[581633]: at downloadByRoot (file:///usr/src/lodestar/packages/beacon-node/src/sync/utils/downloadByRoot.ts:123:13) Sep 05 10:36:05 devnet-ax41-0 beacon_run.sh[581633]: at BlockInputSync.fetchBlockInput (file:///usr/src/lodestar/packages/beacon-node/src/sync/unknownBlock.ts:512:21) Sep 05 10:36:05 devnet-ax41-0 beacon_run.sh[581633]: at wrapError (file:///usr/src/lodestar/packages/beacon-node/src/util/wrapError.ts:18:32) Sep 05 10:36:05 devnet-ax41-0 beacon_run.sh[581633]: at BlockInputSync.downloadBlock (file:///usr/src/lodestar/packages/beacon-node/src/sync/unknownBlock.ts:319:17) ``` it's because when we see block input has full data, we don't download blobs/data_columns **Description** - skip errors in that case Closes #8338 Co-authored-by: Tuyen Nguyen --- packages/beacon-node/src/sync/utils/downloadByRoot.ts | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRoot.ts b/packages/beacon-node/src/sync/utils/downloadByRoot.ts index a2e47051265e..920c9ec43902 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRoot.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRoot.ts @@ -99,7 +99,10 @@ export async function downloadByRoot({ }); } - if (isBlockInputBlobs(blockInput)) { + const hasAllData = blockInput.hasBlockAndAllData(); + + if (isBlockInputBlobs(blockInput) && !hasAllData) { + // blobSidecars could be undefined if gossip resulted in full block+blobs so we don't download any if (!blobSidecars) { throw new DownloadByRootError({ code: DownloadByRootErrorCode.MISSING_BLOB_RESPONSE, @@ -118,7 +121,8 @@ export async function downloadByRoot({ } } - if (isBlockInputColumns(blockInput)) { + if (isBlockInputColumns(blockInput) && !hasAllData) { + // columnSidecars could be undefined if gossip resulted in full block+columns so we don't download any if (!columnSidecars) { throw new DownloadByRootError({ code: DownloadByRootErrorCode.MISSING_COLUMN_RESPONSE, @@ -139,7 +143,7 @@ export async function downloadByRoot({ let status: PendingBlockInputStatus; let timeSyncedSec: number | undefined; - if (blockInput.hasBlockAndAllData()) { + if (hasAllData) { status = PendingBlockInputStatus.downloaded; timeSyncedSec = Date.now() / 1000; } else { From e338b69ec97968059b29788bbaaafac3cf8ed003 Mon Sep 17 00:00:00 2001 From: twoeths <10568965+twoeths@users.noreply.github.com> Date: Fri, 5 Sep 2025 18:24:32 +0700 Subject: [PATCH 123/173] fix: request columns by peer custodied columns (#8330) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Motivation** - before we send `beacon_data_columns_by_root` or `beacon_data_columns_by_range` to peer, we need to filter by peers' custofied columns - otherwise, will get error ``` Sep-05 03:05:17.827[sync] debug: Error downloading in BlockInputSync.fetchBlockInput attempt=3, rootHex=0xe81f474d8bd7f94e891f664165f7ea3af9841ff3b7a17a20f9bc54647f5b0480, peer=16Uiu2HAm3k5Npu6EaYWxiEvzsdLseEkjVyoVhvbxWEuyqdBgBBbq, peerClient=Grandine, code=DOWNLOAD_BY_ROOT_ERROR_EXTRA_SIDECAR_RECEIVED, peer=16...BgBBbq, blockRoot=0xe81f…0480, invalidIndex=126 Error: Received a columnSidecar that was not requested at fetchAndValidateColumns (file:///usr/src/lodestar/packages/beacon-node/src/sync/utils/downloadByRoot.ts:457:13) at fetchByRoot (file:///usr/src/lodestar/packages/beacon-node/src/sync/utils/downloadByRoot.ts:197:26) at downloadByRoot (file:///usr/src/lodestar/packages/beacon-node/src/sync/utils/downloadByRoot.ts:71:49) at BlockInputSync.fetchBlockInput (file:///usr/src/lodestar/packages/beacon-node/src/sync/unknownBlock.ts:512:21) at wrapError (file:///usr/src/lodestar/packages/beacon-node/src/util/wrapError.ts:18:32) at BlockInputSync.downloadBlock (file:///usr/src/lodestar/packages/beacon-node/src/sync/unknownBlock.ts:319:17) ``` **Description** - fix for `downloadByRange` and `downloadByRoot` flows Closes #8329 --------- Co-authored-by: Tuyen Nguyen --- packages/beacon-node/src/sync/range/batch.ts | 30 ++++++++ packages/beacon-node/src/sync/range/range.ts | 2 +- packages/beacon-node/src/sync/unknownBlock.ts | 8 +- .../src/sync/utils/downloadByRoot.ts | 74 ++++++++++++------- .../unit/sync/utils/downloadByRoot.test.ts | 23 ++++-- 5 files changed, 98 insertions(+), 39 deletions(-) diff --git a/packages/beacon-node/src/sync/range/batch.ts b/packages/beacon-node/src/sync/range/batch.ts index 440f795a86a9..e43b29b3f9e7 100644 --- a/packages/beacon-node/src/sync/range/batch.ts +++ b/packages/beacon-node/src/sync/range/batch.ts @@ -10,6 +10,7 @@ import {PeerIdStr} from "../../util/peerId.js"; import {MAX_BATCH_DOWNLOAD_ATTEMPTS, MAX_BATCH_PROCESSING_ATTEMPTS} from "../constants.js"; import {DownloadByRangeRequests} from "../utils/downloadByRange.js"; import {getBatchSlotRange, hashBlocks} from "./utils/index.js"; +import {PeerSyncMeta} from "../../network/peers/peersData.js"; /** * Current state of a batch @@ -207,6 +208,35 @@ export class Batch { return requests; } + /** + * Post-fulu we should only get columns that peer has advertised + */ + getRequestsForPeer(peer: PeerSyncMeta): DownloadByRangeRequests { + if (!isForkPostFulu(this.forkName)) { + return this.requests; + } + + // post-fulu we need to ensure that we only request columns that the peer has advertised + const {columnsRequest} = this.requests; + if (columnsRequest == null) { + return this.requests; + } + + const peerColumns = new Set(peer.custodyGroups ?? []); + const requestedColumns = columnsRequest.columns.filter((c) => peerColumns.has(c)); + if (requestedColumns.length === columnsRequest.columns.length) { + return this.requests; + } + + return { + ...this.requests, + columnsRequest: { + ...columnsRequest, + columns: requestedColumns, + }, + }; + } + /** * Gives a list of peers from which this batch has had a failed download or processing attempt. */ diff --git a/packages/beacon-node/src/sync/range/range.ts b/packages/beacon-node/src/sync/range/range.ts index d8e5ccdba456..370c6643111a 100644 --- a/packages/beacon-node/src/sync/range/range.ts +++ b/packages/beacon-node/src/sync/range/range.ts @@ -207,7 +207,7 @@ export class RangeSync extends (EventEmitter as {new (): RangeSyncEmitter}) { logger: this.logger, peerIdStr: peer.peerId, batchBlocks, - ...batch.requests, + ...batch.getRequestsForPeer(peer), }); const cached = cacheByRangeResponses({ cache: this.chain.seenBlockInputCache, diff --git a/packages/beacon-node/src/sync/unknownBlock.ts b/packages/beacon-node/src/sync/unknownBlock.ts index dbe95db09556..35a30907799c 100644 --- a/packages/beacon-node/src/sync/unknownBlock.ts +++ b/packages/beacon-node/src/sync/unknownBlock.ts @@ -494,8 +494,8 @@ export class BlockInputSync { ? new Set(cacheItem.blockInput.getMissingSampledColumnMeta().missing) : defaultPendingColumns; // pendingDataColumns is null pre-fulu - const peer = this.peerBalancer.bestPeerForPendingColumns(pendingColumns, excludedPeers); - if (peer === null) { + const peerMeta = this.peerBalancer.bestPeerForPendingColumns(pendingColumns, excludedPeers); + if (peerMeta === null) { // no more peer with needed columns to try, throw error let message = `Error fetching UnknownBlockRoot after ${i}: cannot find peer`; if (pendingColumns) { @@ -503,7 +503,7 @@ export class BlockInputSync { } throw Error(message); } - const {peerId, client: peerClient} = peer; + const {peerId, client: peerClient} = peerMeta; excludedPeers.add(peerId); cacheItem.peerIdStrings.add(peerId); @@ -514,7 +514,7 @@ export class BlockInputSync { network: this.network, seenCache: this.chain.seenGossipBlockInput, executionEngine: this.chain.executionEngine, - peerIdStr: peerId, + peerMeta, cacheItem, }); } catch (e) { diff --git a/packages/beacon-node/src/sync/utils/downloadByRoot.ts b/packages/beacon-node/src/sync/utils/downloadByRoot.ts index 920c9ec43902..e616137101e7 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRoot.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRoot.ts @@ -14,7 +14,6 @@ import {prettyPrintPeerIdStr} from "../../network/util.js"; import {computePreFuluKzgCommitmentsInclusionProof, kzgCommitmentToVersionedHash} from "../../util/blobs.js"; import {byteArrayEquals} from "../../util/bytes.js"; import {getCellsAndProofs, getDataColumnSidecarsFromBlock} from "../../util/dataColumns.js"; -import {PeerIdStr} from "../../util/peerId.js"; import { BlockInputSyncCacheItem, PendingBlockInput, @@ -22,25 +21,31 @@ import { getBlockInputSyncCacheItemRootHex, isPendingBlockInput, } from "../types.js"; +import {PeerSyncMeta} from "../../network/peers/peersData.js"; +import {PeerIdStr} from "../../util/peerId.js"; export type FetchByRootCoreProps = { config: ChainForkConfig; network: INetwork; - peerIdStr: PeerIdStr; + peerMeta: PeerSyncMeta; }; export type FetchByRootProps = FetchByRootCoreProps & { cacheItem: BlockInputSyncCacheItem; executionEngine: IExecutionEngine; blockRoot: Uint8Array; }; -export type FetchByRootAndValidateBlockProps = FetchByRootCoreProps & {blockRoot: Uint8Array}; +export type FetchByRootAndValidateBlockProps = Omit & { + peerIdStr: PeerIdStr; + blockRoot: Uint8Array; +}; export type FetchByRootAndValidateBlobsProps = FetchByRootAndValidateBlockProps & { executionEngine: IExecutionEngine; forkName: ForkPreFulu; block: SignedBeaconBlock; blobMeta: BlobMeta[]; }; -export type FetchByRootAndValidateColumnsProps = FetchByRootAndValidateBlockProps & { +export type FetchByRootAndValidateColumnsProps = FetchByRootCoreProps & { + blockRoot: Uint8Array; executionEngine: IExecutionEngine; forkName: ForkPostFulu; block: SignedBeaconBlock; @@ -62,11 +67,12 @@ export async function downloadByRoot({ seenCache, network, executionEngine, - peerIdStr, + peerMeta, cacheItem, }: DownloadByRootProps): Promise { const rootHex = getBlockInputSyncCacheItemRootHex(cacheItem); const blockRoot = fromHex(rootHex); + const {peerId: peerIdStr} = peerMeta; const {block, blobSidecars, columnSidecars} = await fetchByRoot({ config, @@ -74,7 +80,7 @@ export async function downloadByRoot({ executionEngine, cacheItem, blockRoot, - peerIdStr, + peerMeta, }); let blockInput: IBlockInput; @@ -131,13 +137,17 @@ export async function downloadByRoot({ }); } for (const columnSidecar of columnSidecars) { - blockInput.addColumn({ - columnSidecar, - blockRootHex: rootHex, - seenTimestampSec: Date.now(), - source: BlockInputSource.byRoot, - peerIdStr, - }); + blockInput.addColumn( + { + columnSidecar, + blockRootHex: rootHex, + seenTimestampSec: Date.now(), + source: BlockInputSource.byRoot, + peerIdStr, + }, + // the same DataColumnSidecar may be added by gossip while waiting for fetchByRoot + {throwOnDuplicateAdd: false} + ); } } @@ -163,13 +173,14 @@ export async function fetchByRoot({ config, network, executionEngine, - peerIdStr, + peerMeta, blockRoot, cacheItem, }: FetchByRootProps): Promise { let block: SignedBeaconBlock; let blobSidecars: deneb.BlobSidecars | undefined; let columnSidecars: fulu.DataColumnSidecars | undefined; + const {peerId: peerIdStr} = peerMeta; if (isPendingBlockInput(cacheItem)) { if (cacheItem.blockInput.hasBlock()) { @@ -202,7 +213,7 @@ export async function fetchByRoot({ config, network, executionEngine, - peerIdStr, + peerMeta, forkName: forkName as ForkPostFulu, block: block as SignedBeaconBlock, blockRoot, @@ -223,7 +234,7 @@ export async function fetchByRoot({ config, network, executionEngine, - peerIdStr, + peerMeta, forkName, blockRoot, block: block as SignedBeaconBlock, @@ -403,13 +414,18 @@ export async function fetchAndValidateColumns({ network, executionEngine, forkName, - peerIdStr, + peerMeta, block, blockRoot, columnMeta, }: FetchByRootAndValidateColumnsProps): Promise { + const {peerId: peerIdStr} = peerMeta; const slot = block.message.slot; const blobCount = block.message.body.blobKzgCommitments.length; + if (blobCount === 0) { + return []; + } + const blobsV2ColumnSidecars = await fetchGetBlobsV2AndBuildSidecars({ config, executionEngine, @@ -452,12 +468,14 @@ export async function fetchAndValidateColumns({ return needed; } + const peerColumns = new Set(peerMeta.custodyGroups ?? []); + const requestedColumns = columnMeta.missing.filter((c) => peerColumns.has(c)); const columnSidecars = await network.sendDataColumnSidecarsByRoot(peerIdStr, [ - {blockRoot, columns: columnMeta.missing}, + {blockRoot, columns: requestedColumns}, ]); - for (let i = 0; i < columnMeta.missing.length; i++) { + for (let i = 0; i < requestedColumns.length; i++) { const columnSidecar = columnSidecars[i]; - if (columnSidecar.index !== columnMeta.missing[i]) { + if (columnSidecar.index !== requestedColumns[i]) { throw new DownloadByRootError( { code: DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED, @@ -493,29 +511,33 @@ export async function fetchGetBlobsV2AndBuildSidecars({ return getDataColumnSidecarsFromBlock(config, block, cellsAndProofs); } +// TODO(fulu) not in use, remove? export async function fetchColumnsByRoot({ network, - peerIdStr, + peerMeta, blockRoot, columnMeta, }: Pick< FetchByRootAndValidateColumnsProps, - "network" | "peerIdStr" | "blockRoot" | "columnMeta" + "network" | "peerMeta" | "blockRoot" | "columnMeta" >): Promise { - return await network.sendDataColumnSidecarsByRoot(peerIdStr, [{blockRoot, columns: columnMeta.missing}]); + return await network.sendDataColumnSidecarsByRoot(peerMeta.peerId, [{blockRoot, columns: columnMeta.missing}]); } +// TODO(fulu) not in use, remove? export type ValidateColumnSidecarsProps = Pick< FetchByRootAndValidateColumnsProps, - "config" | "peerIdStr" | "blockRoot" | "columnMeta" + "config" | "peerMeta" | "blockRoot" | "columnMeta" > & { slot: number; blobCount: number; needed?: fulu.DataColumnSidecars; needToPublish?: fulu.DataColumnSidecars; }; + +// TODO(fulu) not in use, remove? export async function validateColumnSidecars({ - peerIdStr, + peerMeta, slot, blockRoot, blobCount, @@ -529,7 +551,7 @@ export async function validateColumnSidecars({ throw new DownloadByRootError( { code: DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED, - peer: prettyPrintPeerIdStr(peerIdStr), + peer: prettyPrintPeerIdStr(peerMeta.peerId), blockRoot: prettyBytes(blockRoot), invalidIndex: columnSidecar.index, }, diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts index 6dd5d2c34013..e42c94e85b16 100644 --- a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts @@ -31,9 +31,16 @@ import { generateBlockWithBlobSidecars, generateBlockWithColumnSidecars, } from "../../../utils/blocksAndData.js"; +import {PeerSyncMeta} from "../../../../src/network/peers/peersData.js"; describe("downloadByRoot.ts", () => { const peerIdStr = "1234567890abcdef1234567890abcdef"; + const peerMeta: PeerSyncMeta = { + peerId: peerIdStr, + client: "N/A", + custodyGroups: Array.from({length: NUMBER_OF_COLUMNS}, (_, i) => i), + earliestAvailableSlot: 0, + }; let network: INetwork; let executionEngine: IExecutionEngine; @@ -519,7 +526,7 @@ describe("downloadByRoot.ts", () => { network, executionEngine, forkName, - peerIdStr, + peerMeta, blockRoot: fuluBlockWithColumns.blockRoot, block: fuluBlockWithColumns.block, columnMeta, @@ -563,7 +570,7 @@ describe("downloadByRoot.ts", () => { network, executionEngine, forkName, - peerIdStr, + peerMeta, blockRoot: fuluBlockWithColumns.blockRoot, block: fuluBlockWithColumns.block, columnMeta: testColumnMeta, @@ -603,7 +610,7 @@ describe("downloadByRoot.ts", () => { network, executionEngine, forkName, - peerIdStr, + peerMeta, blockRoot: fuluBlockWithColumns.blockRoot, block: fuluBlockWithColumns.block, columnMeta: testColumnMeta, @@ -639,7 +646,7 @@ describe("downloadByRoot.ts", () => { network, executionEngine, forkName, - peerIdStr, + peerMeta, blockRoot: fuluBlockWithColumns.blockRoot, block: fuluBlockWithColumns.block, columnMeta, @@ -679,7 +686,7 @@ describe("downloadByRoot.ts", () => { network, executionEngine, forkName, - peerIdStr, + peerMeta, blockRoot: fuluBlockWithColumns.blockRoot, block: fuluBlockWithColumns.block, columnMeta, @@ -736,7 +743,7 @@ describe("downloadByRoot.ts", () => { network, executionEngine, forkName, - peerIdStr, + peerMeta, blockRoot: fuluBlockWithColumns.blockRoot, block: fuluBlockWithColumns.block, columnMeta: { @@ -773,7 +780,7 @@ describe("downloadByRoot.ts", () => { network, executionEngine, forkName, - peerIdStr, + peerMeta, blockRoot: fuluBlockWithColumns.blockRoot, block: fuluBlockWithColumns.block, columnMeta: { @@ -947,7 +954,7 @@ describe("downloadByRoot.ts", () => { const missing = fuluBlockWithColumns.columnSidecars.map((c) => c.index); const response = await fetchColumnsByRoot({ network, - peerIdStr, + peerMeta, blockRoot, columnMeta: { missing, From 4caea7062ff48cdc1b957356d9c5d9d2bb3a87ab Mon Sep 17 00:00:00 2001 From: Cayman Date: Fri, 5 Sep 2025 08:24:27 -0400 Subject: [PATCH 124/173] fix: remove getBlobs use during by root syncing (#8341) **Description** - don't use getBlobs during by-root syncing - (Only use getBlobs as a response to validated gossip objects) --- packages/beacon-node/src/sync/unknownBlock.ts | 1 - .../src/sync/utils/downloadByRoot.ts | 150 +----------------- 2 files changed, 2 insertions(+), 149 deletions(-) diff --git a/packages/beacon-node/src/sync/unknownBlock.ts b/packages/beacon-node/src/sync/unknownBlock.ts index 35a30907799c..0fbb5d333ddd 100644 --- a/packages/beacon-node/src/sync/unknownBlock.ts +++ b/packages/beacon-node/src/sync/unknownBlock.ts @@ -513,7 +513,6 @@ export class BlockInputSync { config: this.config, network: this.network, seenCache: this.chain.seenGossipBlockInput, - executionEngine: this.chain.executionEngine, peerMeta, cacheItem, }); diff --git a/packages/beacon-node/src/sync/utils/downloadByRoot.ts b/packages/beacon-node/src/sync/utils/downloadByRoot.ts index e616137101e7..bc2e72b49213 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRoot.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRoot.ts @@ -1,6 +1,5 @@ import {ChainForkConfig} from "@lodestar/config"; import {ForkPostDeneb, ForkPostFulu, ForkPreFulu, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; -import {signedBlockToSignedHeader} from "@lodestar/state-transition"; import {SignedBeaconBlock, deneb, fulu} from "@lodestar/types"; import {LodestarError, fromHex, prettyBytes, toRootHex} from "@lodestar/utils"; import {isBlockInputBlobs, isBlockInputColumns} from "../../chain/blocks/blockInput/blockInput.js"; @@ -8,12 +7,10 @@ import {BlobMeta, BlockInputSource, IBlockInput, MissingColumnMeta} from "../../ import {SeenBlockInput} from "../../chain/seenCache/seenGossipBlockInput.js"; import {validateBlockBlobSidecars} from "../../chain/validation/blobSidecar.js"; import {validateBlockDataColumnSidecars} from "../../chain/validation/dataColumnSidecar.js"; -import {IExecutionEngine} from "../../execution/index.js"; import {INetwork} from "../../network/interface.js"; import {prettyPrintPeerIdStr} from "../../network/util.js"; -import {computePreFuluKzgCommitmentsInclusionProof, kzgCommitmentToVersionedHash} from "../../util/blobs.js"; +import {kzgCommitmentToVersionedHash} from "../../util/blobs.js"; import {byteArrayEquals} from "../../util/bytes.js"; -import {getCellsAndProofs, getDataColumnSidecarsFromBlock} from "../../util/dataColumns.js"; import { BlockInputSyncCacheItem, PendingBlockInput, @@ -31,7 +28,6 @@ export type FetchByRootCoreProps = { }; export type FetchByRootProps = FetchByRootCoreProps & { cacheItem: BlockInputSyncCacheItem; - executionEngine: IExecutionEngine; blockRoot: Uint8Array; }; export type FetchByRootAndValidateBlockProps = Omit & { @@ -39,14 +35,12 @@ export type FetchByRootAndValidateBlockProps = Omit; blobMeta: BlobMeta[]; }; export type FetchByRootAndValidateColumnsProps = FetchByRootCoreProps & { blockRoot: Uint8Array; - executionEngine: IExecutionEngine; forkName: ForkPostFulu; block: SignedBeaconBlock; columnMeta: MissingColumnMeta; @@ -60,13 +54,11 @@ export type FetchByRootResponses = { export type DownloadByRootProps = FetchByRootCoreProps & { cacheItem: BlockInputSyncCacheItem; seenCache: SeenBlockInput; - executionEngine: IExecutionEngine; }; export async function downloadByRoot({ config, seenCache, network, - executionEngine, peerMeta, cacheItem, }: DownloadByRootProps): Promise { @@ -77,7 +69,6 @@ export async function downloadByRoot({ const {block, blobSidecars, columnSidecars} = await fetchByRoot({ config, network, - executionEngine, cacheItem, blockRoot, peerMeta, @@ -172,7 +163,6 @@ export async function downloadByRoot({ export async function fetchByRoot({ config, network, - executionEngine, peerMeta, blockRoot, cacheItem, @@ -200,7 +190,6 @@ export async function fetchByRoot({ blobSidecars = await fetchAndValidateBlobs({ config, network, - executionEngine, peerIdStr, forkName: forkName as ForkPreFulu, block: block as SignedBeaconBlock, @@ -212,7 +201,6 @@ export async function fetchByRoot({ columnSidecars = await fetchAndValidateColumns({ config, network, - executionEngine, peerMeta, forkName: forkName as ForkPostFulu, block: block as SignedBeaconBlock, @@ -233,7 +221,6 @@ export async function fetchByRoot({ columnSidecars = await fetchAndValidateColumns({ config, network, - executionEngine, peerMeta, forkName, blockRoot, @@ -251,7 +238,6 @@ export async function fetchByRoot({ blobSidecars = await fetchAndValidateBlobs({ config, network, - executionEngine, peerIdStr, forkName: forkName as ForkPreFulu, blockRoot, @@ -305,93 +291,23 @@ export async function fetchAndValidateBlock({ export async function fetchAndValidateBlobs({ config, network, - executionEngine, forkName, peerIdStr, blockRoot, block, blobMeta, }: FetchByRootAndValidateBlobsProps): Promise { - let blobSidecars: deneb.BlobSidecars = []; - try { - blobSidecars = await fetchGetBlobsV1AndBuildSidecars({ - config, - executionEngine, - forkName, - block, - blobMeta, - }); - } catch (err) { - network.logger.error( - `error fetching/building blobSidecars for blockRoot=${prettyBytes(blockRoot)} via getBlobsV1`, - {}, - err as Error - ); - } - - // not all needed blobs were fetched via getBlobs, need to use ReqResp - if (blobSidecars.length !== blobMeta.length) { - const networkResponse = await fetchBlobsByRoot({ + const blobSidecars: deneb.BlobSidecars = await fetchBlobsByRoot({ network, peerIdStr, blobMeta, - indicesInPossession: blobSidecars.map((b) => b.index), }); - blobSidecars.push(...networkResponse); - } - - // responses can be sparse for both types of requests to sort to make sure its in sequential order - blobSidecars.sort((a, b) => a.index - b.index); await validateBlockBlobSidecars(block.message.slot, blockRoot, blobMeta.length, blobSidecars); return blobSidecars; } -export async function fetchGetBlobsV1AndBuildSidecars({ - config, - executionEngine, - forkName, - block, - blobMeta, -}: Pick< - FetchByRootAndValidateBlobsProps, - "config" | "executionEngine" | "forkName" | "block" | "blobMeta" ->): Promise { - const blobSidecars: deneb.BlobSidecars = []; - - const enginedResponse = await executionEngine.getBlobs( - forkName, - blobMeta.map(({versionedHash}) => versionedHash) - ); - - if (!enginedResponse.length) { - return blobSidecars; - } - - // response.length should always match blobMeta.length and they should be in the same order - for (let i = 0; i < blobMeta.length; i++) { - const blobAndProof = enginedResponse[i]; - if (blobAndProof) { - const {blob, proof} = blobAndProof; - const index = blobMeta[i].index; - const kzgCommitment = block.message.body.blobKzgCommitments[index]; - const sidecar: deneb.BlobSidecar = { - index, - blob, - kzgProof: proof, - kzgCommitment, - // TODO(fulu): refactor this to only calculate the root inside these following two functions once - kzgCommitmentInclusionProof: computePreFuluKzgCommitmentsInclusionProof(forkName, block.message.body, index), - signedBlockHeader: signedBlockToSignedHeader(config, block), - }; - blobSidecars.push(sidecar); - } - } - - return blobSidecars; -} - export async function fetchBlobsByRoot({ network, peerIdStr, @@ -412,7 +328,6 @@ export async function fetchBlobsByRoot({ export async function fetchAndValidateColumns({ config, network, - executionEngine, forkName, peerMeta, block, @@ -426,48 +341,6 @@ export async function fetchAndValidateColumns({ return []; } - const blobsV2ColumnSidecars = await fetchGetBlobsV2AndBuildSidecars({ - config, - executionEngine, - forkName, - block, - columnMeta, - }).catch((err) => { - network.logger.error( - "error building columnSidecars via getBlobsV2", - {slot, blockRoot: prettyBytes(blockRoot)}, - err as Error - ); - return null; - }); - if (blobsV2ColumnSidecars?.length) { - // limit reconstructed to only the ones we need - const needed = blobsV2ColumnSidecars.filter((c) => columnMeta.missing.includes(c.index)); - // spec states that reconstructed sidecars need to be published to the network, but only requires - // publishing the ones that we custody and have not already been published. - const alreadyPublished = network.custodyConfig.custodyColumns.filter( - (index) => !columnMeta.missing.includes(index) - ); - const needToPublish = blobsV2ColumnSidecars.filter( - (c) => network.custodyConfig.custodyColumns.includes(c.index) && !alreadyPublished.includes(c.index) - ); - // need to validate both the ones we sample AND ones we will publish - await validateBlockDataColumnSidecars(slot, blockRoot, blobCount, [...needed, ...needToPublish]); - needToPublish.map((column) => - network.publishDataColumnSidecar(column).catch((err) => - network.logger.error( - "Error publishing column after getBlobsV2 reconstruct", - { - index: column.index, - blockRoot: prettyBytes(blockRoot), - }, - err - ) - ) - ); - return needed; - } - const peerColumns = new Set(peerMeta.custodyGroups ?? []); const requestedColumns = columnMeta.missing.filter((c) => peerColumns.has(c)); const columnSidecars = await network.sendDataColumnSidecarsByRoot(peerIdStr, [ @@ -492,25 +365,6 @@ export async function fetchAndValidateColumns({ return columnSidecars; } -export async function fetchGetBlobsV2AndBuildSidecars({ - config, - executionEngine, - forkName, - block, - columnMeta, -}: Pick< - FetchByRootAndValidateColumnsProps, - "config" | "executionEngine" | "forkName" | "block" | "columnMeta" ->): Promise { - const response = await executionEngine.getBlobs(forkName, columnMeta.versionedHashes); - if (!response) { - return []; - } - - const cellsAndProofs = await getCellsAndProofs(response); - return getDataColumnSidecarsFromBlock(config, block, cellsAndProofs); -} - // TODO(fulu) not in use, remove? export async function fetchColumnsByRoot({ network, From 03dc4992271d00d46ee3feeeb79153962813e1cf Mon Sep 17 00:00:00 2001 From: Cayman Date: Fri, 5 Sep 2025 08:38:59 -0400 Subject: [PATCH 125/173] fix: allow for sparse blobs in blob validation (#8342) ** Description ** - blobs validation function required _all_ blobs to be present - that doesn't hold true during by-root where some blobs may have been received via gossip - check that all block blobs exist at the higher level when all blobs must be present (by range sync) - various muddling with error cases --- .../src/chain/errors/blobSidecarError.ts | 3 -- .../src/chain/validation/blobSidecar.ts | 31 ++++++------------- packages/beacon-node/src/sync/range/chain.ts | 1 - .../src/sync/utils/downloadByRange.ts | 19 +++++++++++- 4 files changed, 28 insertions(+), 26 deletions(-) diff --git a/packages/beacon-node/src/chain/errors/blobSidecarError.ts b/packages/beacon-node/src/chain/errors/blobSidecarError.ts index b1e5e7d644db..8bbc8063eb84 100644 --- a/packages/beacon-node/src/chain/errors/blobSidecarError.ts +++ b/packages/beacon-node/src/chain/errors/blobSidecarError.ts @@ -22,8 +22,6 @@ export enum BlobSidecarErrorCode { INCORRECT_SIDECAR_COUNT = "BLOBS_SIDECAR_ERROR_INCORRECT_SIDECAR_COUNT", /** Sidecar doesn't match block */ INCORRECT_BLOCK = "BLOBS_SIDECAR_ERROR_INCORRECT_BLOCK", - /** Sidecar index is not as expected */ - INCORRECT_INDEX = "BLOBS_SIDECAR_ERROR_INCORRECT_INDEX", /** Sidecars proofs not valid */ INVALID_KZG_PROOF_BATCH = "BLOBS_SIDECAR_ERROR_INVALID_KZG_PROOF_BATCH", @@ -48,7 +46,6 @@ export type BlobSidecarErrorType = | {code: BlobSidecarErrorCode.INVALID_KZG_PROOF; blobIdx: number} | {code: BlobSidecarErrorCode.INCORRECT_SIDECAR_COUNT; slot: number; expected: number; actual: number} | {code: BlobSidecarErrorCode.INCORRECT_BLOCK; slot: number; blobIdx: number; expected: string; actual: string} - | {code: BlobSidecarErrorCode.INCORRECT_INDEX; slot: number; expected: number; actual: number} | {code: BlobSidecarErrorCode.INVALID_KZG_PROOF_BATCH; slot: number; reason: string} | {code: BlobSidecarErrorCode.FUTURE_SLOT; blockSlot: Slot; currentSlot: Slot} | {code: BlobSidecarErrorCode.WOULD_REVERT_FINALIZED_SLOT; blockSlot: Slot; finalizedSlot: Slot} diff --git a/packages/beacon-node/src/chain/validation/blobSidecar.ts b/packages/beacon-node/src/chain/validation/blobSidecar.ts index 89f31200ad4a..8d4df35f7be7 100644 --- a/packages/beacon-node/src/chain/validation/blobSidecar.ts +++ b/packages/beacon-node/src/chain/validation/blobSidecar.ts @@ -173,7 +173,7 @@ export async function validateGossipBlobSidecar( } /** - * Validate all blob sidecars in a block + * Validate some blob sidecars in a block * * Requires the block to be known to the node */ @@ -183,7 +183,11 @@ export async function validateBlockBlobSidecars( blockBlobCount: number, blobSidecars: deneb.BlobSidecars ): Promise { - if (blockBlobCount !== blobSidecars.length) { + if (blobSidecars.length === 0) { + return; + } + + if (blockBlobCount === 0) { throw new BlobSidecarValidationError({ code: BlobSidecarErrorCode.INCORRECT_SIDECAR_COUNT, slot: blockSlot, @@ -192,10 +196,6 @@ export async function validateBlockBlobSidecars( }); } - if (blobSidecars.length === 0) { - return; - } - // Hash the first sidecar block header and compare the rest via (cheaper) equality const firstSidecarBlockHeader = blobSidecars[0].signedBlockHeader.message; const firstBlockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(firstSidecarBlockHeader); @@ -215,25 +215,14 @@ export async function validateBlockBlobSidecars( const commitments = []; const blobs = []; const proofs = []; - for (let i = 0; i < blobSidecars.length; i++) { - const blobSidecar = blobSidecars[i]; - if (blobSidecar.index !== i) { - throw new BlobSidecarValidationError( - { - code: BlobSidecarErrorCode.INCORRECT_INDEX, - slot: blockSlot, - expected: i, - actual: blobSidecar.index, - }, - "BlobSidecar index out of order" - ); - } + for (const blobSidecar of blobSidecars) { + const blobIdx = blobSidecar.index; if (!ssz.phase0.BeaconBlockHeader.equals(blobSidecar.signedBlockHeader.message, firstSidecarBlockHeader)) { throw new BlobSidecarValidationError( { code: BlobSidecarErrorCode.INCORRECT_BLOCK, slot: blockSlot, - blobIdx: i, + blobIdx, expected: toRootHex(blockRoot), actual: "unknown - compared via equality", }, @@ -246,7 +235,7 @@ export async function validateBlockBlobSidecars( { code: BlobSidecarErrorCode.INCLUSION_PROOF_INVALID, slot: blockSlot, - blobIdx: i, + blobIdx, }, "BlobSidecar inclusion proof invalid" ); diff --git a/packages/beacon-node/src/sync/range/chain.ts b/packages/beacon-node/src/sync/range/chain.ts index 22d65e1c4596..82697b8e5175 100644 --- a/packages/beacon-node/src/sync/range/chain.ts +++ b/packages/beacon-node/src/sync/range/chain.ts @@ -474,7 +474,6 @@ export class SyncChain { case DownloadByRangeErrorCode.OUT_OF_ORDER_BLOCKS: case DownloadByRangeErrorCode.OUT_OF_RANGE_BLOCKS: case DownloadByRangeErrorCode.PARENT_ROOT_MISMATCH: - case BlobSidecarErrorCode.INCORRECT_INDEX: case BlobSidecarErrorCode.INCLUSION_PROOF_INVALID: case BlobSidecarErrorCode.INVALID_KZG_PROOF_BATCH: case DataColumnSidecarErrorCode.INCORRECT_KZG_COMMITMENTS_COUNT: diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index 20bea11bad6c..d1539757b91f 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -496,9 +496,21 @@ export async function validateBlobsByRangeResponse( const blockBlobSidecars = blobSidecars.slice(blobSidecarIndex, blobSidecarIndex + blockKzgCommitments.length); blobSidecarIndex += blockKzgCommitments.length; + for (let i = 0; i < blockBlobSidecars.length; i++) { + if (blockBlobSidecars[i].index !== i) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.OUT_OF_ORDER_BLOBS, + slot: block.message.slot, + }, + "Blob sidecars not in order or do not match expected indexes in BlobSidecarsByRange response" + ); + } + } + validateSidecarsPromises.push( validateBlockBlobSidecars(block.message.slot, blockRoot, blockKzgCommitments.length, blockBlobSidecars).then( - () => ({blockRoot, blobSidecars}) + () => ({blockRoot, blobSidecars: blockBlobSidecars}) ) ); } @@ -670,6 +682,7 @@ export enum DownloadByRangeErrorCode { OUT_OF_ORDER_BLOCKS = "DOWNLOAD_BY_RANGE_OUT_OF_ORDER_BLOCKS", MISSING_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOBS", + OUT_OF_ORDER_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_OUT_OF_ORDER_BLOBS", EXTRA_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_EXTRA_BLOBS", MISSING_COLUMNS = "DOWNLOAD_BY_RANGE_ERROR_MISSING_COLUMNS", @@ -733,6 +746,10 @@ export type DownloadByRangeErrorType = expected: number; actual: number; } + | { + code: DownloadByRangeErrorCode.OUT_OF_ORDER_BLOBS; + slot: number; + } | { code: DownloadByRangeErrorCode.EXTRA_BLOBS; expected: number; From 92e477d70784d2caca4e68ff82a340f23ed3255a Mon Sep 17 00:00:00 2001 From: twoeths <10568965+twoeths@users.noreply.github.com> Date: Mon, 8 Sep 2025 17:36:04 +0700 Subject: [PATCH 126/173] fix: new error code NOT_ENOUGH_SIDECARS_RECEIVED (#8351) **Motivation** - when not enough sidecars received, we throwed `DOWNLOAD_BY_ROOT_ERROR_EXTRA_SIDECAR_RECEIVED` **Description** - define and use the new `NOT_ENOUGH_SIDECARS_RECEIVED` error instead Closes #8350 --------- Co-authored-by: Tuyen Nguyen --- .../src/sync/utils/downloadByRoot.ts | 25 ++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRoot.ts b/packages/beacon-node/src/sync/utils/downloadByRoot.ts index bc2e72b49213..68ba9df29ab2 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRoot.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRoot.ts @@ -1,7 +1,7 @@ import {ChainForkConfig} from "@lodestar/config"; import {ForkPostDeneb, ForkPostFulu, ForkPreFulu, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; import {SignedBeaconBlock, deneb, fulu} from "@lodestar/types"; -import {LodestarError, fromHex, prettyBytes, toRootHex} from "@lodestar/utils"; +import {LodestarError, fromHex, prettyBytes, prettyPrintIndices, toRootHex} from "@lodestar/utils"; import {isBlockInputBlobs, isBlockInputColumns} from "../../chain/blocks/blockInput/blockInput.js"; import {BlobMeta, BlockInputSource, IBlockInput, MissingColumnMeta} from "../../chain/blocks/blockInput/types.js"; import {SeenBlockInput} from "../../chain/seenCache/seenGossipBlockInput.js"; @@ -346,6 +346,22 @@ export async function fetchAndValidateColumns({ const columnSidecars = await network.sendDataColumnSidecarsByRoot(peerIdStr, [ {blockRoot, columns: requestedColumns}, ]); + + // sanity check if peer returned correct number of columnSidecars + if (columnSidecars.length < requestedColumns.length) { + const returnedColumns = new Set(columnSidecars.map((c) => c.index)); + throw new DownloadByRootError( + { + code: DownloadByRootErrorCode.NOT_ENOUGH_SIDECARS_RECEIVED, + peer: prettyPrintPeerIdStr(peerIdStr), + blockRoot: prettyBytes(blockRoot), + missingIndices: prettyPrintIndices(requestedColumns.filter(c => !returnedColumns.has(c))), + }, + "Did not receive all of the requested columnSidecars" + ); + } + + // check each returned columnSidecar for (let i = 0; i < requestedColumns.length; i++) { const columnSidecar = columnSidecars[i]; if (columnSidecar.index !== requestedColumns[i]) { @@ -419,6 +435,7 @@ export async function validateColumnSidecars({ export enum DownloadByRootErrorCode { MISMATCH_BLOCK_ROOT = "DOWNLOAD_BY_ROOT_ERROR_MISMATCH_BLOCK_ROOT", EXTRA_SIDECAR_RECEIVED = "DOWNLOAD_BY_ROOT_ERROR_EXTRA_SIDECAR_RECEIVED", + NOT_ENOUGH_SIDECARS_RECEIVED = "DOWNLOAD_BY_ROOT_ERROR_NOT_ENOUGH_SIDECARS_RECEIVED", INVALID_INCLUSION_PROOF = "DOWNLOAD_BY_ROOT_ERROR_INVALID_INCLUSION_PROOF", INVALID_KZG_PROOF = "DOWNLOAD_BY_ROOT_ERROR_INVALID_KZG_PROOF", MISSING_BLOCK_RESPONSE = "DOWNLOAD_BY_ROOT_ERROR_MISSING_BLOCK_RESPONSE", @@ -439,6 +456,12 @@ export type DownloadByRootErrorType = blockRoot: string; invalidIndex: number; } + | { + code: DownloadByRootErrorCode.NOT_ENOUGH_SIDECARS_RECEIVED; + peer: string; + blockRoot: string; + missingIndices: string; + } | { code: DownloadByRootErrorCode.INVALID_INCLUSION_PROOF; peer: string; From aabe8be5a77c3ed3827f93386e9f289c54822f90 Mon Sep 17 00:00:00 2001 From: twoeths <10568965+twoeths@users.noreply.github.com> Date: Mon, 8 Sep 2025 17:37:29 +0700 Subject: [PATCH 127/173] fix: resolve data promise for BlockInputPreData (#8299) **Motivation** - not able to process predeneb blocks because node waits for data to be complete but it will never happen **Description** - resolve data promise for BlockInputPreData in the constructor --------- Co-authored-by: Tuyen Nguyen --- packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts index d6fae7d2ef6f..5daecf4aa684 100644 --- a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts +++ b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts @@ -201,6 +201,8 @@ export class BlockInputPreData extends AbstractBlockInput { private constructor(init: BlockInputInit, state: BlockInputPreDataState) { super(init); this.state = state; + this.dataPromise.resolve(null); + this.blockPromise.resolve(state.block); } static createFromBlock(props: AddBlock & CreateBlockInputMeta): BlockInputPreData { From 10dbedeca0a38a58c292edc708858b336dff768e Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 9 Sep 2025 00:57:58 +0700 Subject: [PATCH 128/173] docs: add TODO --- packages/beacon-node/src/metrics/metrics/beacon.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/beacon-node/src/metrics/metrics/beacon.ts b/packages/beacon-node/src/metrics/metrics/beacon.ts index 2dd9bf6bc4b4..00aaa7f75680 100644 --- a/packages/beacon-node/src/metrics/metrics/beacon.ts +++ b/packages/beacon-node/src/metrics/metrics/beacon.ts @@ -287,6 +287,7 @@ export function createBeaconMetrics(register: RegistryMetricCreator) { }), }, + // TODO(fulu): check if these and metrics in lodestar.ts for dataColumns should/can be combined or organized together peerDas: { dataColumnSidecarProcessingRequests: register.counter({ name: "beacon_data_column_sidecar_processing_requests_total", From 423c77ab9a70b0f57665688c5b1cd4f57de69313 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 9 Sep 2025 01:04:55 +0700 Subject: [PATCH 129/173] feat: only cache gossip block after validation --- .../src/network/processor/gossipHandlers.ts | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/packages/beacon-node/src/network/processor/gossipHandlers.ts b/packages/beacon-node/src/network/processor/gossipHandlers.ts index 24ec21677988..19d3158693d4 100644 --- a/packages/beacon-node/src/network/processor/gossipHandlers.ts +++ b/packages/beacon-node/src/network/processor/gossipHandlers.ts @@ -130,18 +130,8 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand // always set block to seen cache for all forks so that we don't need to download it // TODO: validate block before adding to cache // tracked in https://github.com/ChainSafe/lodestar/issues/7957 - const blockInput = chain.seenGossipBlockInput.getByBlock({ - block: signedBlock, - blockRootHex, - source: BlockInputSource.gossip, - seenTimestampSec, - peerIdStr, - }); - - const blockInputMeta = blockInput.getLogMeta(); const logCtx = { - ...blockInputMeta, currentSlot: chain.clock.currentSlot, peerId: peerIdStr, delaySec, @@ -150,8 +140,17 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand logger.debug("Received gossip block", {...logCtx}); + let blockInput: IBlockInput | undefined; try { await validateGossipBlock(config, chain, signedBlock, fork); + blockInput = chain.seenGossipBlockInput.getByBlock({ + block: signedBlock, + blockRootHex, + source: BlockInputSource.gossip, + seenTimestampSec, + peerIdStr, + }); + const blockInputMeta = blockInput.getLogMeta(); const recvToValidation = Date.now() / 1000 - seenTimestampSec; const validationTime = recvToValidation - recvToValLatency; @@ -159,7 +158,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand metrics?.gossipBlock.gossipValidation.recvToValidation.observe(recvToValidation); metrics?.gossipBlock.gossipValidation.validationTime.observe(validationTime); - logger.debug("Validated gossip block", {...logCtx, recvToValidation, validationTime}); + logger.debug("Validated gossip block", {...blockInputMeta, ...logCtx, recvToValidation, validationTime}); chain.emitter.emit(routes.events.EventType.blockGossip, {slot, block: blockRootHex}); @@ -169,8 +168,9 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand // TODO(fulu): check that this is the only error that should trigger resolution of the block and all others // cause the block to get thrown away // Don't trigger this yet if full block and blobs haven't arrived yet - if (e.type.code === BlockErrorCode.PARENT_UNKNOWN && blockInput !== null) { + if (e.type.code === BlockErrorCode.PARENT_UNKNOWN && blockInput) { logger.debug("Gossip block has error", {slot, root: blockShortHex, code: e.type.code}); + // TODO(fulu): should this be unknownParent event? chain.emitter.emit(ChainEvent.incompleteBlockInput, { blockInput, peer: peerIdStr, From 588c237e5325e421c2773e9aac624bf1bef23ce5 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 9 Sep 2025 02:07:19 +0700 Subject: [PATCH 130/173] docs: update comments and add TODOs --- .../src/network/processor/gossipHandlers.ts | 10 ++++++++++ packages/beacon-node/src/sync/constants.ts | 3 ++- packages/beacon-node/src/sync/range/batch.ts | 1 - 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/packages/beacon-node/src/network/processor/gossipHandlers.ts b/packages/beacon-node/src/network/processor/gossipHandlers.ts index 19d3158693d4..84fcc35f56f7 100644 --- a/packages/beacon-node/src/network/processor/gossipHandlers.ts +++ b/packages/beacon-node/src/network/processor/gossipHandlers.ts @@ -248,6 +248,11 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand if (e.type.code === BlobSidecarErrorCode.PARENT_UNKNOWN) { logger.debug("Gossip blob has error", {slot, root: blockShortHex, code: e.type.code}); // no need to trigger `unknownBlockParent` event here, as we already did it in `validateBeaconBlock()` + // + // TODO(fulu): is this note above correct? Could have random blob that we see that could trigger + // unknownBlockSync. And duplicate addition of a block will be deduplicated by the + // BlockInputSync event handler. Check this!! + // events.emit(NetworkEvent.unknownBlockParent, {blockInput, peer: peerIdStr}); } if (e.action === GossipAction.REJECT) { @@ -327,6 +332,11 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand `gossip_reject_slot_${slot}_index_${dataColumnSidecar.index}` ); // no need to trigger `unknownBlockParent` event here, as we already did it in `validateBeaconBlock()` + // + // TODO(fulu): is this note above correct? Could have random column that we see that could trigger + // unknownBlockSync. And duplicate addition of a block will be deduplicated by the + // BlockInputSync event handler. Check this!! + // events.emit(NetworkEvent.unknownBlockParent, {blockInput, peer: peerIdStr}); } throw e; diff --git a/packages/beacon-node/src/sync/constants.ts b/packages/beacon-node/src/sync/constants.ts index 0df632917acd..7bef3598b181 100644 --- a/packages/beacon-node/src/sync/constants.ts +++ b/packages/beacon-node/src/sync/constants.ts @@ -12,7 +12,8 @@ export const MAX_BATCH_DOWNLOAD_ATTEMPTS = 20; /** * Consider batch faulty after downloading and processing this number of times - * for example a peer may send us a non-canonical chain segment or not returning all blocks + * as in https://github.com/ChainSafe/lodestar/issues/8147 we cannot proceed the sync chain if there is unknown parent + * from prior batch. For example a peer may send us a non-canonical chain segment or not returning all blocks * in that case we should throw error and `RangeSync` should remove that error chain and add a new one. **/ export const MAX_BATCH_PROCESSING_ATTEMPTS = 0; diff --git a/packages/beacon-node/src/sync/range/batch.ts b/packages/beacon-node/src/sync/range/batch.ts index e43b29b3f9e7..60bdb284b9bd 100644 --- a/packages/beacon-node/src/sync/range/batch.ts +++ b/packages/beacon-node/src/sync/range/batch.ts @@ -270,7 +270,6 @@ export class Batch { /** * Downloading -> AwaitingProcessing - * pendingDataColumns is null when a complete download is done, otherwise it contains the columns that are still pending */ downloadingSuccess(peer: PeerIdStr, blocks: IBlockInput[]): DownloadSuccessState { if (this.state.status !== BatchStatus.Downloading) { From 119261867d7a6813e0260d01d001c29edf2ad369 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 9 Sep 2025 02:07:42 +0700 Subject: [PATCH 131/173] refactor: dont create separate variable, use object prop --- packages/beacon-node/src/sync/range/utils/peerBalancer.ts | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/packages/beacon-node/src/sync/range/utils/peerBalancer.ts b/packages/beacon-node/src/sync/range/utils/peerBalancer.ts index 398174feb315..b1f321d94005 100644 --- a/packages/beacon-node/src/sync/range/utils/peerBalancer.ts +++ b/packages/beacon-node/src/sync/range/utils/peerBalancer.ts @@ -118,7 +118,7 @@ export class ChainPeersBalancer { } for (const peer of this.peers) { - const {earliestAvailableSlot, custodyGroups, target, peerId} = peer; + const {earliestAvailableSlot, target, peerId} = peer; const activeRequest = this.activeRequestsByPeer.get(peerId) ?? 0; if (noActiveRequest && activeRequest > 0) { @@ -163,8 +163,7 @@ export class ChainPeersBalancer { continue; } - const peerColumns = custodyGroups; - const columns = peerColumns.reduce((acc, elem) => { + const columns = peer.custodyGroups.reduce((acc, elem) => { if (requestColumns.includes(elem)) { acc.push(elem); } From e257931764a1c70ae257d4a173877b5fa016bf23 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 9 Sep 2025 16:27:54 +0700 Subject: [PATCH 132/173] docs: add TODO --- packages/beacon-node/src/network/peers/peerManager.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/beacon-node/src/network/peers/peerManager.ts b/packages/beacon-node/src/network/peers/peerManager.ts index bf0351035f30..c4a6349b95cb 100644 --- a/packages/beacon-node/src/network/peers/peerManager.ts +++ b/packages/beacon-node/src/network/peers/peerManager.ts @@ -357,6 +357,8 @@ export class PeerManager { (metadata as Partial).custodyGroupCount ?? // TODO: spec says that Clients MAY reject peers with a value less than CUSTODY_REQUIREMENT this.config.CUSTODY_REQUIREMENT, + // TODO(fulu): this should be columns not groups. need to change everywhere. we consume columns and should + // cache that instead so if groups->columns ever changes from 1-1 we only need to update that here custodyGroups, samplingGroups, }; From 633d137de336e559388dc3c78fde56522ce2bb92 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 9 Sep 2025 16:28:11 +0700 Subject: [PATCH 133/173] refactor: imports --- packages/beacon-node/src/sync/unknownBlock.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/packages/beacon-node/src/sync/unknownBlock.ts b/packages/beacon-node/src/sync/unknownBlock.ts index 0fbb5d333ddd..48c6b05c8962 100644 --- a/packages/beacon-node/src/sync/unknownBlock.ts +++ b/packages/beacon-node/src/sync/unknownBlock.ts @@ -1,8 +1,7 @@ import {ChainForkConfig} from "@lodestar/config"; import {ForkSeq, INTERVALS_PER_SLOT} from "@lodestar/params"; import {RootHex} from "@lodestar/types"; -import {Logger, prettyBytes, prettyPrintIndices, pruneSetToMax} from "@lodestar/utils"; -import {sleep} from "@lodestar/utils"; +import {sleep, Logger, prettyBytes, prettyPrintIndices, pruneSetToMax} from "@lodestar/utils"; import {isBlockInputBlobs, isBlockInputColumns} from "../chain/blocks/blockInput/blockInput.js"; import {BlockInputSource, IBlockInput} from "../chain/blocks/blockInput/types.js"; import {BlockError, BlockErrorCode} from "../chain/errors/index.js"; From 5c3d888b56d2c831f63fd481aaf2eab95f91b3bc Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Tue, 9 Sep 2025 19:24:02 +0700 Subject: [PATCH 134/173] fix: remove duplicate BlockInputCache --- packages/beacon-node/src/chain/chain.ts | 12 +----------- packages/beacon-node/src/chain/interface.ts | 1 - 2 files changed, 1 insertion(+), 12 deletions(-) diff --git a/packages/beacon-node/src/chain/chain.ts b/packages/beacon-node/src/chain/chain.ts index 5c86aff46d55..17ddecbb06f9 100644 --- a/packages/beacon-node/src/chain/chain.ts +++ b/packages/beacon-node/src/chain/chain.ts @@ -281,16 +281,6 @@ export class BeaconChain implements IBeaconChain { initialCustodyGroupCount, }); - this.seenGossipBlockInput = new SeenBlockInput({ - config: this.config, - custodyConfig: this.custodyConfig, - chainEvents: emitter, - clock, - logger, - metrics, - signal, - }); - this.beaconProposerCache = new BeaconProposerCache(opts); this.checkpointBalancesCache = new CheckpointBalancesCache(); this.seenBlockInputCache = new SeenBlockInput({ @@ -448,7 +438,7 @@ export class BeaconChain implements IBeaconChain { } seenBlock(blockRoot: RootHex): boolean { - return this.seenGossipBlockInput.has(blockRoot) || this.forkChoice.hasBlockHex(blockRoot); + return this.seenBlockInputCache.has(blockRoot) || this.forkChoice.hasBlockHex(blockRoot); } regenCanAcceptWork(): boolean { diff --git a/packages/beacon-node/src/chain/interface.ts b/packages/beacon-node/src/chain/interface.ts index d9a2e8a7fa7e..9ee5b7660a99 100644 --- a/packages/beacon-node/src/chain/interface.ts +++ b/packages/beacon-node/src/chain/interface.ts @@ -126,7 +126,6 @@ export interface IBeaconChain { readonly seenContributionAndProof: SeenContributionAndProof; readonly seenAttestationDatas: SeenAttestationDatas; readonly seenBlockInputCache: SeenBlockInput; - readonly seenGossipBlockInput: SeenBlockInput; // Seen cache for liveness checks readonly seenBlockAttesters: SeenBlockAttesters; From 234c26fa242bef9e6bc238c00c37e18e3e8ef01e Mon Sep 17 00:00:00 2001 From: twoeths <10568965+twoeths@users.noreply.github.com> Date: Tue, 9 Sep 2025 23:42:05 +0700 Subject: [PATCH 135/173] feat: track DownloadByRoot errors on new metrics (#8352) **Motivation** - there are a lot of DownloadByRoot errors happening on the logs that we want to know more about **Description** - track errors by code for all clients - given a specific code, count errors by client this will give us an overview of which clients have specific issues in order to work with them later also may want to find mitigate some of them later (will need to monitor metrics) --------- Co-authored-by: Tuyen Nguyen Co-authored-by: Cayman --- .../beacon-node/src/metrics/metrics/lodestar.ts | 11 +++++++++++ packages/beacon-node/src/sync/unknownBlock.ts | 15 ++++++++++++++- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/packages/beacon-node/src/metrics/metrics/lodestar.ts b/packages/beacon-node/src/metrics/metrics/lodestar.ts index 5385c2dfe226..99a988ea5d00 100644 --- a/packages/beacon-node/src/metrics/metrics/lodestar.ts +++ b/packages/beacon-node/src/metrics/metrics/lodestar.ts @@ -552,6 +552,17 @@ export function createLodestarMetrics( help: "Total number of blocks whose data availability was resolved", labelNames: ["source"], }), + downloadByRoot: { + success: register.gauge({ + name: "lodestar_sync_unknown_block_download_by_root_success_total", + help: "Total number of successful downloadByRoot calls", + }), + error: register.gauge<{code: string; client: string}>({ + name: "lodestar_sync_unknown_block_download_by_root_error_total", + help: "Total number of errored downloadByRoot calls", + labelNames: ["code", "client"], + }), + }, peerBalancer: { peersMetaCount: register.gauge({ name: "lodestar_sync_unknown_block_peer_balancer_peers_meta_count", diff --git a/packages/beacon-node/src/sync/unknownBlock.ts b/packages/beacon-node/src/sync/unknownBlock.ts index 48c6b05c8962..8d0230c21e0f 100644 --- a/packages/beacon-node/src/sync/unknownBlock.ts +++ b/packages/beacon-node/src/sync/unknownBlock.ts @@ -25,8 +25,9 @@ import { getBlockInputSyncCacheItemSlot, isPendingBlockInput, } from "./types.js"; -import {downloadByRoot} from "./utils/downloadByRoot.js"; +import {DownloadByRootError, DownloadByRootErrorCode, downloadByRoot} from "./utils/downloadByRoot.js"; import {getAllDescendantBlocks, getDescendantBlocks, getUnknownAndAncestorBlocks} from "./utils/pendingBlocksTree.js"; +import {RequestError} from "@lodestar/reqresp"; const MAX_ATTEMPTS_PER_BLOCK = 5; const MAX_KNOWN_BAD_BLOCKS = 500; @@ -515,12 +516,24 @@ export class BlockInputSync { peerMeta, cacheItem, }); + this.metrics?.blockInputSync.downloadByRoot.success.inc(); } catch (e) { this.logger.debug( "Error downloading in BlockInputSync.fetchBlockInput", {attempt: i, rootHex, peer: peerId, peerClient}, e as Error ); + const downloadByRootMetrics = this.metrics?.blockInputSync.downloadByRoot; + if (e instanceof DownloadByRootError) { + const errorCode = e.type.code; + downloadByRootMetrics?.error.inc({code: errorCode, client: peerClient}); + } else if (e instanceof RequestError) { + // should look into req_resp metrics in this case + downloadByRootMetrics?.error.inc({code: "req_resp", client: peerClient}); + } else { + // investigate if this happens + downloadByRootMetrics?.error.inc({code: "unknown", client: peerClient}); + } } finally { this.peerBalancer.onRequestCompleted(peerId); } From 1737333370020cc428cccdf979fee6f0c71a1413 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Wed, 10 Sep 2025 00:28:03 +0700 Subject: [PATCH 136/173] fix: build error --- packages/beacon-node/src/chain/chain.ts | 1 - packages/beacon-node/src/sync/unknownBlock.ts | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/packages/beacon-node/src/chain/chain.ts b/packages/beacon-node/src/chain/chain.ts index 17ddecbb06f9..3f9f1051c898 100644 --- a/packages/beacon-node/src/chain/chain.ts +++ b/packages/beacon-node/src/chain/chain.ts @@ -150,7 +150,6 @@ export class BeaconChain implements IBeaconChain { readonly seenSyncCommitteeMessages = new SeenSyncCommitteeMessages(); readonly seenContributionAndProof: SeenContributionAndProof; readonly seenAttestationDatas: SeenAttestationDatas; - readonly seenGossipBlockInput: SeenBlockInput; readonly seenBlockInputCache: SeenBlockInput; // Seen cache for liveness checks readonly seenBlockAttesters = new SeenBlockAttesters(); diff --git a/packages/beacon-node/src/sync/unknownBlock.ts b/packages/beacon-node/src/sync/unknownBlock.ts index 8d0230c21e0f..1da4167f1772 100644 --- a/packages/beacon-node/src/sync/unknownBlock.ts +++ b/packages/beacon-node/src/sync/unknownBlock.ts @@ -512,7 +512,7 @@ export class BlockInputSync { cacheItem = await downloadByRoot({ config: this.config, network: this.network, - seenCache: this.chain.seenGossipBlockInput, + seenCache: this.chain.seenBlockInputCache, peerMeta, cacheItem, }); From c884573184977d74b02b50a72cd7d8e07eb3f625 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Wed, 10 Sep 2025 00:39:42 +0700 Subject: [PATCH 137/173] fix: build error --- .../src/network/processor/gossipHandlers.ts | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/packages/beacon-node/src/network/processor/gossipHandlers.ts b/packages/beacon-node/src/network/processor/gossipHandlers.ts index 84fcc35f56f7..52662b490289 100644 --- a/packages/beacon-node/src/network/processor/gossipHandlers.ts +++ b/packages/beacon-node/src/network/processor/gossipHandlers.ts @@ -143,7 +143,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand let blockInput: IBlockInput | undefined; try { await validateGossipBlock(config, chain, signedBlock, fork); - blockInput = chain.seenGossipBlockInput.getByBlock({ + blockInput = chain.seenBlockInputCache.getByBlock({ block: signedBlock, blockRootHex, source: BlockInputSource.gossip, @@ -185,7 +185,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand } } - chain.seenGossipBlockInput.prune(blockRootHex); + chain.seenBlockInputCache.prune(blockRootHex); throw e; } } @@ -207,7 +207,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand try { await validateGossipBlobSidecar(fork, chain, blobSidecar, subnet); - const blockInput = chain.seenGossipBlockInput.getByBlob({ + const blockInput = chain.seenBlockInputCache.getByBlob({ blockRootHex, blobSidecar, source: BlockInputSource.gossip, @@ -287,7 +287,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand try { await validateGossipDataColumnSidecar(chain, dataColumnSidecar, gossipSubnet, metrics); - const blockInput = chain.seenGossipBlockInput.getByColumn({ + const blockInput = chain.seenBlockInputCache.getByColumn({ blockRootHex, columnSidecar: dataColumnSidecar, source: BlockInputSource.gossip, @@ -398,7 +398,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand // Returns the delay between the start of `block.slot` and `current time` const delaySec = chain.clock.secFromSlot(slot); metrics?.gossipBlock.elapsedTimeTillProcessed.observe(delaySec); - chain.seenGossipBlockInput.prune(blockInput.blockRootHex); + chain.seenBlockInputCache.prune(blockInput.blockRootHex); }) .catch((e) => { // Adjust verbosity based on error type @@ -433,7 +433,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand metrics?.gossipBlock.processBlockErrors.inc({error: e instanceof BlockError ? e.type.code : "NOT_BLOCK_ERROR"}); logger[logLevel]("Error receiving block", {slot, peer: peerIdStr}, e as Error); // TODO(fulu): Revisit when we prune block inputs - chain.seenGossipBlockInput.prune(blockInput.blockRootHex); + chain.seenBlockInputCache.prune(blockInput.blockRootHex); }); } From 855bdbfc01205ab12b705e140af1e8958c21f0c7 Mon Sep 17 00:00:00 2001 From: Cayman Date: Tue, 9 Sep 2025 16:18:25 -0400 Subject: [PATCH 138/173] fix: prevent faulty by range EXTRA_COLUMNS error --- .../src/sync/utils/downloadByRange.ts | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index d1539757b91f..0fbf8f5a8d59 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -527,16 +527,26 @@ export async function validateColumnsByRangeResponse( dataRequestBlocks: ValidatedBlock[], columnSidecars: fulu.DataColumnSidecars ): Promise { + // Expected column count considering currently-validated batch blocks const expectedColumnCount = dataRequestBlocks.reduce((acc, {block}) => { return (block as SignedBeaconBlock).message.body.blobKzgCommitments.length > 0 ? request.columns.length + acc : acc; }, 0); - if (columnSidecars.length > expectedColumnCount) { + const nextSlot = dataRequestBlocks.length + ? (dataRequestBlocks.at(-1) as ValidatedBlock).block.message.slot + 1 + : request.startSlot; + const possiblyMissingBlocks = nextSlot - request.startSlot + request.count; + + // Allow for extra columns if some blocks are missing from the end of a batch + // Eg: If we requested 10 blocks but only 8 were returned, allow for up to 2 * columns.length extra columns + const maxColumnCount = expectedColumnCount + possiblyMissingBlocks * request.columns.length; + + if (columnSidecars.length > maxColumnCount) { throw new DownloadByRangeError( { code: DownloadByRangeErrorCode.EXTRA_COLUMNS, - expected: expectedColumnCount, + max: maxColumnCount, actual: columnSidecars.length, }, "Extra data columns received in DataColumnSidecarsByRange response" @@ -762,7 +772,7 @@ export type DownloadByRangeErrorType = } | { code: DownloadByRangeErrorCode.EXTRA_COLUMNS; - expected: number; + max: number; actual: number; } | { From 143d1847f4387ee478375ba936ca6ecc2a747490 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Wed, 10 Sep 2025 03:56:26 +0700 Subject: [PATCH 139/173] feat: reimplement PR#7940 for column reconstruction --- .../src/chain/blocks/blockInput/blockInput.ts | 4 + .../src/chain/blocks/blockInput/types.ts | 1 + .../src/metrics/metrics/lodestar.ts | 14 ++- .../src/network/processor/gossipHandlers.ts | 32 ++++++- packages/beacon-node/src/util/blobs.ts | 4 +- packages/beacon-node/src/util/dataColumns.ts | 93 ++++++++++++++++++- .../beacon-node/test/unit/util/kzg.test.ts | 4 +- 7 files changed, 144 insertions(+), 8 deletions(-) diff --git a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts index 5daecf4aa684..6f4a46d7554c 100644 --- a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts +++ b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts @@ -607,6 +607,10 @@ export class BlockInputColumns extends AbstractBlockInput & CreateBlockInputMeta & {sampledColumns: ColumnIndex[]; custodyColumns: ColumnIndex[]} diff --git a/packages/beacon-node/src/chain/blocks/blockInput/types.ts b/packages/beacon-node/src/chain/blocks/blockInput/types.ts index 8157630a619b..252457113321 100644 --- a/packages/beacon-node/src/chain/blocks/blockInput/types.ts +++ b/packages/beacon-node/src/chain/blocks/blockInput/types.ts @@ -20,6 +20,7 @@ export enum BlockInputSource { engine = "engine", byRange = "req_resp_by_range", byRoot = "req_resp_by_root", + recovery = "recovery", } export type PromiseParts = { diff --git a/packages/beacon-node/src/metrics/metrics/lodestar.ts b/packages/beacon-node/src/metrics/metrics/lodestar.ts index 99a988ea5d00..70424b8b21dd 100644 --- a/packages/beacon-node/src/metrics/metrics/lodestar.ts +++ b/packages/beacon-node/src/metrics/metrics/lodestar.ts @@ -20,7 +20,7 @@ import {BackfillSyncMethod} from "../../sync/backfill/backfill.js"; import {PendingBlockType} from "../../sync/types.js"; import {PeerSyncType, RangeSyncType} from "../../sync/utils/remoteSyncType.js"; import {AllocSource} from "../../util/bufferPool.js"; -import {RecoverResult} from "../../util/dataColumns.js"; +import {DataColumnReconstructionCode} from "../../util/dataColumns.js"; import {LodestarMetadata} from "../options.js"; import {RegistryMetricCreator} from "../utils/registryMetricCreator.js"; @@ -763,11 +763,21 @@ export function createLodestarMetrics( help: "Time elapsed between block slot time and the time data column sidecar reconstructed", buckets: [2, 4, 6, 8, 10, 12], }), + recoverTime: register.histogram({ + name: "lodestar_recover_data_column_sidecar_recover_time_seconds", + help: "Time elapsed to recover data column sidecar", + // this data comes from 20 blobs in `fusaka-devnet-1`, need to reevaluate in the future + buckets: [0.4, 0.6, 0.8, 1.0, 1.2], + }), custodyBeforeReconstruction: register.gauge({ name: "lodestar_data_columns_in_custody_before_reconstruction", help: "Number of data columns in custody before reconstruction", }), - reconstructionResult: register.gauge<{result: RecoverResult}>({ + numberOfColumnsRecovered: register.gauge({ + name: "lodestar_recover_data_column_sidecar_recovered_columns_total", + help: "Total number of columns that were recovered", + }), + reconstructionResult: register.gauge<{result: DataColumnReconstructionCode}>({ name: "lodestar_data_column_sidecars_reconstruction_result", help: "Data column sidecars reconstruction result", labelNames: ["result"], diff --git a/packages/beacon-node/src/network/processor/gossipHandlers.ts b/packages/beacon-node/src/network/processor/gossipHandlers.ts index 52662b490289..764b9e4ddaf1 100644 --- a/packages/beacon-node/src/network/processor/gossipHandlers.ts +++ b/packages/beacon-node/src/network/processor/gossipHandlers.ts @@ -1,6 +1,13 @@ import {routes} from "@lodestar/api"; import {BeaconConfig, ChainForkConfig} from "@lodestar/config"; -import {ForkName, ForkPostElectra, ForkPreElectra, ForkSeq, isForkPostElectra} from "@lodestar/params"; +import { + ForkName, + ForkPostElectra, + ForkPreElectra, + ForkSeq, + isForkPostElectra, + NUMBER_OF_COLUMNS, +} from "@lodestar/params"; import {computeTimeAtSlot} from "@lodestar/state-transition"; import { Root, @@ -67,6 +74,8 @@ import {INetwork} from "../interface.js"; import {PeerAction} from "../peers/index.js"; import {AggregatorTracker} from "./aggregatorTracker.js"; import {getDataColumnSidecarsFromExecution} from "../../util/execution.js"; +import {DataColumnReconstructionError, recoverDataColumnSidecars} from "../../util/dataColumns.js"; +import {callInNextEventLoop} from "../../util/eventLoop.js"; /** * Gossip handler options as part of network options @@ -295,6 +304,27 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand peerIdStr, }); + // only triggers reconstruction on the 64th column to deduplicate the expensive request + if (blockInput.columnCount === NUMBER_OF_COLUMNS / 2) { + // do not await to block gossip handler + callInNextEventLoop(() => { + recoverDataColumnSidecars(blockInput, chain.clock, metrics).catch((err) => { + if (err instanceof DataColumnReconstructionError) { + metrics?.recoverDataColumnSidecars.reconstructionResult.inc({ + result: err.type.code, + }); + } + logger.debug( + "Error recovering column sidecars", + { + blockRoot: blockRootHex, + }, + err + ); + }); + }); + } + const recvToValidation = Date.now() / 1000 - seenTimestampSec; const validationTime = recvToValidation - recvToValLatency; diff --git a/packages/beacon-node/src/util/blobs.ts b/packages/beacon-node/src/util/blobs.ts index d34a699c9cb5..d21e251eb624 100644 --- a/packages/beacon-node/src/util/blobs.ts +++ b/packages/beacon-node/src/util/blobs.ts @@ -70,7 +70,7 @@ export function getBlobSidecars( * If the node obtains 50%+ of all the columns, it SHOULD reconstruct the full data matrix via the recover_matrix helper * See https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.4/specs/fulu/das-core.md#recover_matrix */ -export async function recoverDataColumnSidecars( +export async function dataColumnMatrixRecovery( partialSidecars: Map ): Promise { const columnCount = partialSidecars.size; @@ -160,7 +160,7 @@ export async function reconstructBlobs(sidecars: fulu.DataColumnSidecars): Promi fullSidecars = sidecars; } else { const sidecarsByIndex = new Map(sidecars.map((sc) => [sc.index, sc])); - const recoveredSidecars = await recoverDataColumnSidecars(sidecarsByIndex); + const recoveredSidecars = await dataColumnMatrixRecovery(sidecarsByIndex); if (recoveredSidecars === null) { // Should not happen because we check the column count above throw Error("Failed to reconstruct the full data matrix"); diff --git a/packages/beacon-node/src/util/dataColumns.ts b/packages/beacon-node/src/util/dataColumns.ts index 6649faa43466..4e2da079801f 100644 --- a/packages/beacon-node/src/util/dataColumns.ts +++ b/packages/beacon-node/src/util/dataColumns.ts @@ -14,9 +14,14 @@ import { fulu, } from "@lodestar/types"; import {ssz} from "@lodestar/types"; -import {bytesToBigInt} from "@lodestar/utils"; +import {bytesToBigInt, LodestarError} from "@lodestar/utils"; import {NodeId} from "../network/subnets/index.js"; import {kzg} from "./kzg.js"; +import {dataColumnMatrixRecovery} from "./blobs.js"; +import {BlockInputColumns} from "../chain/blocks/blockInput/blockInput.js"; +import {Metrics} from "../metrics/metrics.js"; +import {IClock} from "./clock.js"; +import {BlockInputSource} from "../chain/blocks/blockInput/types.js"; export enum RecoverResult { // the recover is not attempted because we have less than `NUMBER_OF_COLUMNS / 2` columns @@ -331,3 +336,89 @@ export function getDataColumnSidecarsFromColumnSidecar( cellsAndKzgProofs ); } + +/** + * If we receive more than half of NUMBER_OF_COLUMNS (64) we should recover all remaining columns + */ +export async function recoverDataColumnSidecars( + blockInput: BlockInputColumns, + clock: IClock, + metrics: Metrics | null +): Promise { + const existingColumns = blockInput.getAllColumns(); + const columnCount = existingColumns.length; + if (columnCount >= NUMBER_OF_COLUMNS) { + // We have all columns + throw new DataColumnReconstructionError({code: DataColumnReconstructionCode.NotAttemptedAlreadyFull}); + } + + if (columnCount < NUMBER_OF_COLUMNS / 2) { + // We don't have enough columns to recover + throw new DataColumnReconstructionError({code: DataColumnReconstructionCode.NotAttemptedHaveLessThanHalf}); + } + + metrics?.recoverDataColumnSidecars.custodyBeforeReconstruction.set(columnCount); + const partialSidecars = new Map(); + for (const columnSidecar of existingColumns) { + // the more columns we put, the slower the recover + if (partialSidecars.size >= NUMBER_OF_COLUMNS / 2) { + break; + } + partialSidecars.set(columnSidecar.index, columnSidecar); + } + + const timer = metrics?.recoverDataColumnSidecars.recoverTime.startTimer(); + // if this function throws, we catch at the consumer side + const fullSidecars = await dataColumnMatrixRecovery(partialSidecars); + timer?.(); + if (fullSidecars == null) { + throw new DataColumnReconstructionError( + {code: DataColumnReconstructionCode.ReconstructionFailed}, + "No sidecars rebuilt via dataColumnMatrixRecovery" + ); + } + + const firstDataColumn = existingColumns.at(0); + if (firstDataColumn) { + const slot = firstDataColumn.signedBlockHeader.message.slot; + const secFromSlot = clock.secFromSlot(slot); + metrics?.recoverDataColumnSidecars.elapsedTimeTillReconstructed.observe(secFromSlot); + } + + if (blockInput.getAllColumns().length === NUMBER_OF_COLUMNS) { + // either gossip or getBlobsV2 resolved availability while we were recovering + throw new DataColumnReconstructionError({code: DataColumnReconstructionCode.ReceivedAllDuringReconstruction}); + } + + // We successfully recovered the data columns, update the cache + for (const columnSidecar of fullSidecars) { + if (!blockInput.hasColumn(columnSidecar.index)) { + blockInput.addColumn({ + blockRootHex: blockInput.blockRootHex, + columnSidecar, + seenTimestampSec: Date.now(), + source: BlockInputSource.recovery, + }); + } + } + + metrics?.recoverDataColumnSidecars.reconstructionResult.inc({result: DataColumnReconstructionCode.Success}); +} + +export enum DataColumnReconstructionCode { + NotAttemptedAlreadyFull = "DATA_COLUMN_RECONSTRUCTION_NOT_ATTEMPTED_ALREADY_FULL", + NotAttemptedHaveLessThanHalf = "DATA_COLUMN_RECONSTRUCTION_NOT_ATTEMPTED_HAVE_LESS_THAN_HALF", + ReconstructionFailed = "DATA_COLUMN_RECONSTRUCTION_RECONSTRUCTION_FAILED", + ReceivedAllDuringReconstruction = "DATA_COLUMN_RECONSTRUCTION_RECEIVED_ALL_DURING_RECONSTRUCTION", + Success = "DATA_COLUMN_RECONSTRUCTION_SUCCESS", +} + +type DataColumnReconstructionErrorType = { + code: + | DataColumnReconstructionCode.NotAttemptedAlreadyFull + | DataColumnReconstructionCode.NotAttemptedHaveLessThanHalf + | DataColumnReconstructionCode.ReceivedAllDuringReconstruction + | DataColumnReconstructionCode.ReconstructionFailed; +}; + +export class DataColumnReconstructionError extends LodestarError {} diff --git a/packages/beacon-node/test/unit/util/kzg.test.ts b/packages/beacon-node/test/unit/util/kzg.test.ts index ac242ee03819..d008c1f246d0 100644 --- a/packages/beacon-node/test/unit/util/kzg.test.ts +++ b/packages/beacon-node/test/unit/util/kzg.test.ts @@ -4,7 +4,7 @@ import {signedBlockToSignedHeader} from "@lodestar/state-transition"; import {deneb, fulu, ssz} from "@lodestar/types"; import {afterEach, describe, expect, it} from "vitest"; import {validateBlockBlobSidecars, validateGossipBlobSidecar} from "../../../src/chain/validation/blobSidecar.js"; -import {getBlobSidecars, recoverDataColumnSidecars} from "../../../src/util/blobs.js"; +import {getBlobSidecars, dataColumnMatrixRecovery} from "../../../src/util/blobs.js"; import {getDataColumnSidecarsFromBlock} from "../../../src/util/dataColumns.js"; import {kzg} from "../../../src/util/kzg.js"; import {shuffle} from "../../../src/util/shuffle.js"; @@ -137,7 +137,7 @@ describe("KZG", () => { } } - const recoveredSidecars = await recoverDataColumnSidecars(shuffledPartial); + const recoveredSidecars = await dataColumnMatrixRecovery(shuffledPartial); expect(recoveredSidecars !== null).toBeTruthy(); if (recoveredSidecars == null) { // should not happen From d48201669bd2369d50b11c43c6256e8832472cb5 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Wed, 10 Sep 2025 04:19:20 +0700 Subject: [PATCH 140/173] chore: lint and check-types --- .../beacon-node/src/execution/engine/http.ts | 14 +- .../src/execution/engine/interface.ts | 12 +- .../beacon-node/src/execution/engine/types.ts | 1 - packages/beacon-node/src/sync/unknownBlock.ts | 2 +- .../src/sync/utils/downloadByRoot.ts | 14 +- packages/beacon-node/src/util/execution.ts | 16 +- .../unit/sync/utils/downloadByRoot.test.ts | 340 +----------------- .../test/unit/util/execution.test.ts | 230 ++++++++++++ 8 files changed, 269 insertions(+), 360 deletions(-) create mode 100644 packages/beacon-node/test/unit/util/execution.test.ts diff --git a/packages/beacon-node/src/execution/engine/http.ts b/packages/beacon-node/src/execution/engine/http.ts index 33a1b66c39ab..e4e69f9d5fab 100644 --- a/packages/beacon-node/src/execution/engine/http.ts +++ b/packages/beacon-node/src/execution/engine/http.ts @@ -491,8 +491,16 @@ export class ExecutionEngineHttp implements IExecutionEngine { return response.map(deserializeExecutionPayloadBody); } - async getBlobs(fork: ForkPostFulu, versionedHashes: VersionedHashes, buffers?: Uint8Array[]): Promise; - async getBlobs(fork: ForkPreFulu, versionedHashes: VersionedHashes, buffers?: Uint8Array[]): Promise<(BlobAndProof | null)[]>; + async getBlobs( + fork: ForkPostFulu, + versionedHashes: VersionedHashes, + buffers?: Uint8Array[] + ): Promise; + async getBlobs( + fork: ForkPreFulu, + versionedHashes: VersionedHashes, + buffers?: Uint8Array[] + ): Promise<(BlobAndProof | null)[]>; async getBlobs( fork: ForkName, versionedHashes: VersionedHashes @@ -539,7 +547,7 @@ export class ExecutionEngineHttp implements IExecutionEngine { throw Error(`Invalid buffer[${i}] length=${buffer.length} expected=${BLOB_AND_PROOF_V2_RPC_BYTES}`); } } - } + } const response = await this.rpc.fetchWithRetries< EngineApiRpcReturnTypes["engine_getBlobsV2"], diff --git a/packages/beacon-node/src/execution/engine/interface.ts b/packages/beacon-node/src/execution/engine/interface.ts index 76f262610b06..db4901956ccd 100644 --- a/packages/beacon-node/src/execution/engine/interface.ts +++ b/packages/beacon-node/src/execution/engine/interface.ts @@ -187,6 +187,14 @@ export interface IExecutionEngine { getPayloadBodiesByRange(fork: ForkName, start: number, count: number): Promise<(ExecutionPayloadBody | null)[]>; - getBlobs(fork: ForkPostFulu, versionedHashes: VersionedHashes, buffers?: Uint8Array[]): Promise; - getBlobs(fork: ForkPreFulu, versionedHashes: VersionedHashes, buffers?: Uint8Array[]): Promise<(BlobAndProof | null)[]>; + getBlobs( + fork: ForkPostFulu, + versionedHashes: VersionedHashes, + buffers?: Uint8Array[] + ): Promise; + getBlobs( + fork: ForkPreFulu, + versionedHashes: VersionedHashes, + buffers?: Uint8Array[] + ): Promise<(BlobAndProof | null)[]>; } diff --git a/packages/beacon-node/src/execution/engine/types.ts b/packages/beacon-node/src/execution/engine/types.ts index a8396410964b..c58b766d2e4d 100644 --- a/packages/beacon-node/src/execution/engine/types.ts +++ b/packages/beacon-node/src/execution/engine/types.ts @@ -640,4 +640,3 @@ export function assertReqSizeLimit(blockHashesReqCount: number, count: number): } return; } - diff --git a/packages/beacon-node/src/sync/unknownBlock.ts b/packages/beacon-node/src/sync/unknownBlock.ts index 1da4167f1772..7803745d8a03 100644 --- a/packages/beacon-node/src/sync/unknownBlock.ts +++ b/packages/beacon-node/src/sync/unknownBlock.ts @@ -25,7 +25,7 @@ import { getBlockInputSyncCacheItemSlot, isPendingBlockInput, } from "./types.js"; -import {DownloadByRootError, DownloadByRootErrorCode, downloadByRoot} from "./utils/downloadByRoot.js"; +import {DownloadByRootError, downloadByRoot} from "./utils/downloadByRoot.js"; import {getAllDescendantBlocks, getDescendantBlocks, getUnknownAndAncestorBlocks} from "./utils/pendingBlocksTree.js"; import {RequestError} from "@lodestar/reqresp"; diff --git a/packages/beacon-node/src/sync/utils/downloadByRoot.ts b/packages/beacon-node/src/sync/utils/downloadByRoot.ts index 68ba9df29ab2..25c9f960f29a 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRoot.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRoot.ts @@ -289,19 +289,17 @@ export async function fetchAndValidateBlock({ } export async function fetchAndValidateBlobs({ - config, network, - forkName, peerIdStr, blockRoot, block, blobMeta, }: FetchByRootAndValidateBlobsProps): Promise { const blobSidecars: deneb.BlobSidecars = await fetchBlobsByRoot({ - network, - peerIdStr, - blobMeta, - }); + network, + peerIdStr, + blobMeta, + }); await validateBlockBlobSidecars(block.message.slot, blockRoot, blobMeta.length, blobSidecars); @@ -326,9 +324,7 @@ export async function fetchBlobsByRoot({ } export async function fetchAndValidateColumns({ - config, network, - forkName, peerMeta, block, blockRoot, @@ -355,7 +351,7 @@ export async function fetchAndValidateColumns({ code: DownloadByRootErrorCode.NOT_ENOUGH_SIDECARS_RECEIVED, peer: prettyPrintPeerIdStr(peerIdStr), blockRoot: prettyBytes(blockRoot), - missingIndices: prettyPrintIndices(requestedColumns.filter(c => !returnedColumns.has(c))), + missingIndices: prettyPrintIndices(requestedColumns.filter((c) => !returnedColumns.has(c))), }, "Did not receive all of the requested columnSidecars" ); diff --git a/packages/beacon-node/src/util/execution.ts b/packages/beacon-node/src/util/execution.ts index 8920f5a69399..76b45666f31d 100644 --- a/packages/beacon-node/src/util/execution.ts +++ b/packages/beacon-node/src/util/execution.ts @@ -11,7 +11,7 @@ import {Metrics} from "../metrics/index.js"; import {fulu} from "@lodestar/types"; import {isBlockInputColumns} from "../chain/blocks/blockInput/blockInput.js"; import {ForkPostFulu} from "@lodestar/params"; -import { BLOB_AND_PROOF_V2_RPC_BYTES } from "../execution/engine/types.js"; +import {BLOB_AND_PROOF_V2_RPC_BYTES} from "../execution/engine/types.js"; let running = false; // Preallocate buffers for getBlobsV2 RPC calls @@ -56,13 +56,17 @@ export async function getDataColumnSidecarsFromExecution( metrics?.peerDas.getBlobsV2Requests.inc(); const timer = metrics?.peerDas.getBlobsV2RequestDuration.startTimer(); if (blobAndProofBuffers) { - for (let i = 0; i < versionedHashes.length; i++) { - if (blobAndProofBuffers[i] === undefined) { - blobAndProofBuffers[i] = new Uint8Array(BLOB_AND_PROOF_V2_RPC_BYTES); + for (let i = 0; i < versionedHashes.length; i++) { + if (blobAndProofBuffers[i] === undefined) { + blobAndProofBuffers[i] = new Uint8Array(BLOB_AND_PROOF_V2_RPC_BYTES); + } } } - } - const blobs = await executionEngine.getBlobs(blockInput.forkName as ForkPostFulu, versionedHashes, blobAndProofBuffers); + const blobs = await executionEngine.getBlobs( + blockInput.forkName as ForkPostFulu, + versionedHashes, + blobAndProofBuffers + ); timer?.(); // Execution engine was unable to find one or more blobs diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts index e42c94e85b16..11ab17d9f929 100644 --- a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts @@ -1,15 +1,10 @@ import {randomBytes} from "node:crypto"; -import {BYTES_PER_BLOB, BYTES_PER_COMMITMENT, BYTES_PER_PROOF} from "@crate-crypto/node-eth-kzg"; import {ForkName, NUMBER_OF_COLUMNS} from "@lodestar/params"; -import {deneb, fulu, ssz} from "@lodestar/types"; -import {BlobAndProof} from "@lodestar/types/lib/deneb/types.js"; +import {ssz} from "@lodestar/types"; import {prettyBytes} from "@lodestar/utils"; import {afterAll, afterEach, beforeAll, beforeEach, describe, expect, it, vi} from "vitest"; import {BlobMeta, MissingColumnMeta} from "../../../../src/chain/blocks/blockInput/types.js"; import {BlobSidecarValidationError} from "../../../../src/chain/errors/blobSidecarError.js"; -import {validateBlockBlobSidecars} from "../../../../src/chain/validation/blobSidecar.js"; -import {validateBlockDataColumnSidecars} from "../../../../src/chain/validation/dataColumnSidecar.js"; -import {IExecutionEngine} from "../../../../src/execution/index.js"; import {INetwork} from "../../../../src/network/index.js"; import { DownloadByRootError, @@ -18,12 +13,9 @@ import { fetchAndValidateColumns, fetchBlobsByRoot, fetchColumnsByRoot, - fetchGetBlobsV1AndBuildSidecars, - fetchGetBlobsV2AndBuildSidecars, } from "../../../../src/sync/utils/downloadByRoot.js"; import {kzgCommitmentToVersionedHash} from "../../../../src/util/blobs.js"; import {CustodyConfig} from "../../../../src/util/dataColumns.js"; -import {kzg} from "../../../../src/util/kzg.js"; import {ROOT_SIZE} from "../../../../src/util/sszBytes.js"; import { config, @@ -42,7 +34,6 @@ describe("downloadByRoot.ts", () => { earliestAvailableSlot: 0, }; let network: INetwork; - let executionEngine: IExecutionEngine; describe("fetchAndValidateBlock", () => { let capellaBlock: ReturnType; @@ -104,12 +95,10 @@ describe("downloadByRoot.ts", () => { describe("fetchAndValidateBlobs", () => { const forkName = ForkName.deneb; let denebBlockWithBlobs: ReturnType; - let blobsAndProofs: deneb.BlobAndProof[]; let blobMeta: BlobMeta[]; beforeEach(() => { denebBlockWithBlobs = generateBlockWithBlobSidecars({forkName, count: 6}); - blobsAndProofs = denebBlockWithBlobs.blobSidecars.map(({blob, kzgProof}) => ({blob, proof: kzgProof})); blobMeta = denebBlockWithBlobs.versionedHashes.map((versionedHash, index) => ({ index, blockRoot: denebBlockWithBlobs.blockRoot, @@ -127,15 +116,9 @@ describe("downloadByRoot.ts", () => { sendBlobSidecarsByRoot: sendBlobSidecarsByRootMock, } as unknown as INetwork; - const getBlobsMock = vi.fn(() => Promise.resolve(blobsAndProofs)); - executionEngine = { - getBlobs: getBlobsMock, - } as unknown as IExecutionEngine; - const response = await fetchAndValidateBlobs({ config, network, - executionEngine, forkName, peerIdStr, blockRoot: denebBlockWithBlobs.blockRoot, @@ -152,15 +135,9 @@ describe("downloadByRoot.ts", () => { sendBlobSidecarsByRoot: sendBlobSidecarsByRootMock, } as unknown as INetwork; - const getBlobsMock = vi.fn(() => Promise.resolve([])); - executionEngine = { - getBlobs: getBlobsMock, - } as unknown as IExecutionEngine; - const response = await fetchAndValidateBlobs({ config, network, - executionEngine, forkName, peerIdStr, blockRoot: denebBlockWithBlobs.blockRoot, @@ -172,13 +149,6 @@ describe("downloadByRoot.ts", () => { }); it("should fetch remaining blobs from network when execution engine is incomplete", async () => { - const getBlobsMock = vi.fn(() => - Promise.resolve([blobsAndProofs[0], null, blobsAndProofs[2], null, blobsAndProofs[4], null]) - ); - executionEngine = { - getBlobs: getBlobsMock, - } as unknown as IExecutionEngine; - const sendBlobSidecarsByRootMock = vi.fn(() => Promise.resolve([ denebBlockWithBlobs.blobSidecars[1], @@ -193,7 +163,6 @@ describe("downloadByRoot.ts", () => { const response = await fetchAndValidateBlobs({ config, network, - executionEngine, forkName, peerIdStr, blockRoot: denebBlockWithBlobs.blockRoot, @@ -201,10 +170,6 @@ describe("downloadByRoot.ts", () => { blobMeta, }); - expect(getBlobsMock).toHaveBeenCalledExactlyOnceWith( - forkName, - blobMeta.map(({versionedHash}) => versionedHash) - ); expect(sendBlobSidecarsByRootMock).toHaveBeenCalledExactlyOnceWith(peerIdStr, [ {blockRoot: denebBlockWithBlobs.blockRoot, index: 1}, {blockRoot: denebBlockWithBlobs.blockRoot, index: 3}, @@ -218,10 +183,6 @@ describe("downloadByRoot.ts", () => { it("should gracefully handle getBlobsV1 failure", async () => { const rejectedError = new Error("TESTING_ERROR"); - const getBlobsMock = vi.fn(() => Promise.reject(rejectedError)); - executionEngine = { - getBlobs: getBlobsMock, - } as unknown as IExecutionEngine; const sendBlobSidecarsByRootMock = vi.fn(() => Promise.resolve(denebBlockWithBlobs.blobSidecars)); const loggerMock = { @@ -235,17 +196,13 @@ describe("downloadByRoot.ts", () => { const response = await fetchAndValidateBlobs({ config, network, - executionEngine, forkName, peerIdStr, blockRoot: denebBlockWithBlobs.blockRoot, block: denebBlockWithBlobs.block, blobMeta, }); - expect(getBlobsMock).toHaveBeenCalledExactlyOnceWith( - forkName, - blobMeta.map(({versionedHash}) => versionedHash) - ); + expect(loggerMock.error).toHaveBeenCalledExactlyOnceWith( `error fetching/building blobSidecars for blockRoot=${prettyBytes(denebBlockWithBlobs.blockRoot)} via getBlobsV1`, {}, @@ -267,18 +224,12 @@ describe("downloadByRoot.ts", () => { sendBlobSidecarsByRoot: sendBlobSidecarsByRootMock, } as unknown as INetwork; - const getBlobsMock = vi.fn(() => Promise.resolve(blobsAndProofs)); - executionEngine = { - getBlobs: getBlobsMock, - } as unknown as IExecutionEngine; - const requestedBlockRoot = randomBytes(ROOT_SIZE); await expect( fetchAndValidateBlobs({ config, network, - executionEngine, forkName, peerIdStr, blockRoot: requestedBlockRoot, @@ -289,138 +240,6 @@ describe("downloadByRoot.ts", () => { }); }); - describe("fetchGetBlobsV1AndBuildSidecars", () => { - let denebBlockWithBlobs: ReturnType; - let blobsAndProofs: deneb.BlobAndProof[]; - let blobMeta: BlobMeta[]; - const forkName = ForkName.deneb; - - beforeEach(() => { - denebBlockWithBlobs = generateBlockWithBlobSidecars({forkName, count: 6}); - blobsAndProofs = denebBlockWithBlobs.blobSidecars.map(({blob, kzgProof}) => ({blob, proof: kzgProof})); - blobMeta = denebBlockWithBlobs.versionedHashes.map( - (versionedHash, index) => ({index, versionedHash}) as BlobMeta - ); - }); - - afterEach(() => { - vi.resetAllMocks(); - }); - - it("should call getBlobs with the correct arguments", async () => { - const getBlobsMock = vi.fn(() => Promise.resolve(blobsAndProofs)); - executionEngine = { - getBlobs: getBlobsMock, - } as unknown as IExecutionEngine; - - await fetchGetBlobsV1AndBuildSidecars({ - config, - forkName, - executionEngine, - block: denebBlockWithBlobs.block, - blobMeta: blobMeta, - }); - - expect(getBlobsMock).toHaveBeenCalledOnce(); - expect(getBlobsMock).toHaveBeenCalledWith(forkName, denebBlockWithBlobs.versionedHashes); - }); - - it("should return empty array when execution engine returns no blobs", async () => { - const getBlobsMock = vi.fn(() => Promise.resolve([])); - executionEngine = { - getBlobs: getBlobsMock, - } as unknown as IExecutionEngine; - - const response = await fetchGetBlobsV1AndBuildSidecars({ - config, - forkName, - executionEngine, - block: denebBlockWithBlobs.block, - blobMeta: blobMeta, - }); - expect(response).toEqual([]); - }); - - it("should build valid blob sidecars from execution engine response", async () => { - const getBlobsMock = vi.fn(() => Promise.resolve(blobsAndProofs)); - executionEngine = { - getBlobs: getBlobsMock, - } as unknown as IExecutionEngine; - - const response = await fetchGetBlobsV1AndBuildSidecars({ - config, - forkName, - executionEngine, - block: denebBlockWithBlobs.block, - blobMeta: blobMeta, - }); - - expect(getBlobsMock).toHaveBeenCalledOnce(); - expect(response).toBeDefined(); - expect(response).toBeInstanceOf(Array); - expect(response.length).toEqual(blobsAndProofs.length); - for (const blobSidecar of response) { - blobSidecar.kzgCommitmentInclusionProof; - expect(blobSidecar).toHaveProperty("index"); - expect(blobSidecar.index).toBeTypeOf("number"); - - expect(blobSidecar).toHaveProperty("blob"); - expect(blobSidecar.blob).toBeInstanceOf(Uint8Array); - expect(blobSidecar.blob.length).toEqual(BYTES_PER_BLOB); - - expect(blobSidecar).toHaveProperty("kzgProof"); - expect(blobSidecar.kzgProof).toBeInstanceOf(Uint8Array); - expect(blobSidecar.kzgProof.length).toEqual(BYTES_PER_PROOF); - - expect(blobSidecar).toHaveProperty("kzgCommitment"); - expect(blobSidecar.kzgCommitment).toBeInstanceOf(Uint8Array); - expect(blobSidecar.kzgCommitment.length).toEqual(BYTES_PER_COMMITMENT); - - expect(blobSidecar).toHaveProperty("kzgCommitmentInclusionProof"); - expect(blobSidecar.kzgCommitmentInclusionProof).toBeInstanceOf(Array); - blobSidecar.kzgCommitmentInclusionProof.map((proof) => expect(proof).toBeInstanceOf(Uint8Array)); - - expect(blobSidecar).toHaveProperty("signedBlockHeader"); - expect(blobSidecar.signedBlockHeader.message.slot).toBe(denebBlockWithBlobs.block.message.slot); - expect(blobSidecar.signedBlockHeader.message.proposerIndex).toBe( - denebBlockWithBlobs.block.message.proposerIndex - ); - expect(blobSidecar.signedBlockHeader.message.parentRoot).toEqual(denebBlockWithBlobs.block.message.parentRoot); - expect(blobSidecar.signedBlockHeader.message.stateRoot).toEqual(denebBlockWithBlobs.block.message.stateRoot); - } - - await expect( - validateBlockBlobSidecars( - denebBlockWithBlobs.block.message.slot, - denebBlockWithBlobs.blockRoot, - denebBlockWithBlobs.block.message.body.blobKzgCommitments.length, - response - ) - ).resolves.toBeUndefined(); - }); - - it("should handle partial blob response from execution engine", async () => { - const engineResponse: (BlobAndProof | null)[] = [...blobsAndProofs]; - engineResponse[2] = null; - engineResponse[4] = null; - const getBlobsMock = vi.fn(() => Promise.resolve(engineResponse)); - executionEngine = { - getBlobs: getBlobsMock, - } as unknown as IExecutionEngine; - - const response = await fetchGetBlobsV1AndBuildSidecars({ - config, - forkName, - executionEngine, - block: denebBlockWithBlobs.block, - blobMeta: blobMeta, - }); - - expect(response.length).toEqual(4); - expect(response.map(({index}) => index)).toEqual([0, 1, 3, 5]); - }); - }); - describe("fetchBlobsByRoot", () => { let denebBlockWithColumns: ReturnType; let blockRoot: Uint8Array; @@ -475,18 +294,12 @@ describe("downloadByRoot.ts", () => { describe("fetchAndValidateColumns", () => { const forkName = ForkName.fulu; let fuluBlockWithColumns: ReturnType; - let blobAndProofs: fulu.BlobAndProofV2[]; let columnMeta: MissingColumnMeta; let versionedHashes: Uint8Array[]; let custodyConfig: CustodyConfig; beforeEach(() => { fuluBlockWithColumns = generateBlockWithColumnSidecars({forkName, returnBlobs: true}); - // biome-ignore lint/style/noNonNullAssertion: returnBlobs = true - const blobs = fuluBlockWithColumns.blobs!; - blobAndProofs = blobs - .map((b) => kzg.computeCellsAndKzgProofs(b)) - .map(({proofs}, i) => ({proofs, blob: blobs[i]})); versionedHashes = fuluBlockWithColumns.block.message.body.blobKzgCommitments.map((c) => kzgCommitmentToVersionedHash(c) ); @@ -516,15 +329,9 @@ describe("downloadByRoot.ts", () => { }, } as unknown as INetwork; - const getBlobsMock = vi.fn(() => Promise.resolve(blobAndProofs)); - executionEngine = { - getBlobs: getBlobsMock, - } as unknown as IExecutionEngine; - const response = await fetchAndValidateColumns({ config, network, - executionEngine, forkName, peerMeta, blockRoot: fuluBlockWithColumns.blockRoot, @@ -532,7 +339,6 @@ describe("downloadByRoot.ts", () => { columnMeta, }); - expect(getBlobsMock).toHaveBeenCalledExactlyOnceWith(forkName, versionedHashes); expect(sendDataColumnSidecarsByRootMock).not.toHaveBeenCalled(); // Should only return the columns we need (missing) expect(response.map((c) => c.index)).toEqual(columnMeta.missing); @@ -551,11 +357,6 @@ describe("downloadByRoot.ts", () => { }, } as unknown as INetwork; - const getBlobsMock = vi.fn(() => Promise.resolve(blobAndProofs)); - executionEngine = { - getBlobs: getBlobsMock, - } as unknown as IExecutionEngine; - // Columns 0, 1 are already published (not in missing) // Columns 2, 3, 4, 5, 6, 7 are missing sampledColumns and need to be fetched // After reconstruction, we should publish columns 2, 3 (we custody them and they weren't published) @@ -568,7 +369,6 @@ describe("downloadByRoot.ts", () => { await fetchAndValidateColumns({ config, network, - executionEngine, forkName, peerMeta, blockRoot: fuluBlockWithColumns.blockRoot, @@ -594,11 +394,6 @@ describe("downloadByRoot.ts", () => { }, } as unknown as INetwork; - const getBlobsMock = vi.fn(() => Promise.resolve(blobAndProofs)); - executionEngine = { - getBlobs: getBlobsMock, - } as unknown as IExecutionEngine; - const missing = [0, 4, 6, 10, 12]; const testColumnMeta = { missing, // Only need these columns @@ -608,7 +403,6 @@ describe("downloadByRoot.ts", () => { const response = await fetchAndValidateColumns({ config, network, - executionEngine, forkName, peerMeta, blockRoot: fuluBlockWithColumns.blockRoot, @@ -636,15 +430,9 @@ describe("downloadByRoot.ts", () => { }, } as unknown as INetwork; - const getBlobsMock = vi.fn(() => Promise.resolve(null)); // No blobs from execution engine - executionEngine = { - getBlobs: getBlobsMock, - } as unknown as IExecutionEngine; - const response = await fetchAndValidateColumns({ config, network, - executionEngine, forkName, peerMeta, blockRoot: fuluBlockWithColumns.blockRoot, @@ -652,7 +440,6 @@ describe("downloadByRoot.ts", () => { columnMeta, }); - expect(getBlobsMock).toHaveBeenCalledExactlyOnceWith(forkName, versionedHashes); expect(sendDataColumnSidecarsByRootMock).toHaveBeenCalledExactlyOnceWith(peerIdStr, [ {blockRoot: fuluBlockWithColumns.blockRoot, columns: columnMeta.missing}, ]); @@ -661,10 +448,6 @@ describe("downloadByRoot.ts", () => { it("should gracefully handle getBlobsV2 failure", async () => { const rejectedError = new Error("TESTING_ERROR"); - const getBlobsMock = vi.fn(() => Promise.reject(rejectedError)); - executionEngine = { - getBlobs: getBlobsMock, - } as unknown as IExecutionEngine; const neededColumns = fuluBlockWithColumns.columnSidecars.filter((c) => columnMeta.missing.includes(c.index)); const sendDataColumnSidecarsByRootMock = vi.fn(() => Promise.resolve(neededColumns)); @@ -684,7 +467,6 @@ describe("downloadByRoot.ts", () => { const response = await fetchAndValidateColumns({ config, network, - executionEngine, forkName, peerMeta, blockRoot: fuluBlockWithColumns.blockRoot, @@ -692,7 +474,6 @@ describe("downloadByRoot.ts", () => { columnMeta, }); - expect(getBlobsMock).toHaveBeenCalledExactlyOnceWith(forkName, versionedHashes); expect(loggerMock.error).toHaveBeenCalledExactlyOnceWith( "error building columnSidecars via getBlobsV2", { @@ -732,16 +513,10 @@ describe("downloadByRoot.ts", () => { }, } as unknown as INetwork; - const getBlobsMock = vi.fn(() => Promise.resolve([])); - executionEngine = { - getBlobs: getBlobsMock, - } as unknown as IExecutionEngine; - await expect( fetchAndValidateColumns({ config, network, - executionEngine, forkName, peerMeta, blockRoot: fuluBlockWithColumns.blockRoot, @@ -770,15 +545,9 @@ describe("downloadByRoot.ts", () => { logger: loggerMock, } as unknown as INetwork; - const getBlobsMock = vi.fn(() => Promise.resolve(blobAndProofs)); - executionEngine = { - getBlobs: getBlobsMock, - } as unknown as IExecutionEngine; - const response = await fetchAndValidateColumns({ config, network, - executionEngine, forkName, peerMeta, blockRoot: fuluBlockWithColumns.blockRoot, @@ -833,111 +602,6 @@ describe("downloadByRoot.ts", () => { }); }); - describe("fetchGetBlobsV2AndBuildSidecars", () => { - let fuluBlockWithColumns: ReturnType; - let blobAndProofs: fulu.BlobAndProofV2[]; - let versionedHashes: Uint8Array[]; - - beforeEach(() => { - fuluBlockWithColumns = generateBlockWithColumnSidecars({forkName: ForkName.fulu, returnBlobs: true}); - // biome-ignore lint/style/noNonNullAssertion: returnBlobs = true - const blobs = fuluBlockWithColumns.blobs!; - blobAndProofs = blobs - .map((b) => kzg.computeCellsAndKzgProofs(b)) - .map(({proofs}, i) => ({proofs, blob: blobs[i]})); - versionedHashes = fuluBlockWithColumns.block.message.body.blobKzgCommitments.map((c) => - kzgCommitmentToVersionedHash(c) - ); - }); - - afterEach(() => { - vi.resetAllMocks(); - }); - - it("should call getBlobs with the correct arguments", async () => { - const getBlobsMock = vi.fn(() => Promise.resolve(blobAndProofs)); - executionEngine = { - getBlobs: getBlobsMock, - } as unknown as IExecutionEngine; - - const columnMeta = { - missing: fuluBlockWithColumns.columnSidecars.map((c) => c.index), - versionedHashes, - }; - - await fetchGetBlobsV2AndBuildSidecars({ - config, - executionEngine, - forkName: ForkName.fulu, - block: fuluBlockWithColumns.block, - columnMeta, - }); - - expect(getBlobsMock).toHaveBeenCalledOnce(); - expect(getBlobsMock).toHaveBeenCalledWith(ForkName.fulu, versionedHashes); - }); - - it("should return empty array when execution engine returns no response", async () => { - const getBlobsMock = vi.fn(() => Promise.resolve(null)); - executionEngine = { - getBlobs: getBlobsMock, - } as unknown as IExecutionEngine; - - const columnMeta = { - missing: fuluBlockWithColumns.columnSidecars.map((c) => c.index), - versionedHashes, - }; - - const result = await fetchGetBlobsV2AndBuildSidecars({ - config, - executionEngine, - forkName: ForkName.fulu, - block: fuluBlockWithColumns.block, - columnMeta, - }); - - expect(getBlobsMock).toHaveBeenCalledOnce(); - expect(result).toEqual([]); - }); - - it("should build valid columnSidecars from execution engine blobs", async () => { - const getBlobsMock = vi.fn(() => Promise.resolve(blobAndProofs)); - executionEngine = { - getBlobs: getBlobsMock, - } as unknown as IExecutionEngine; - - const columnMeta = { - missing: fuluBlockWithColumns.columnSidecars.map((c) => c.index), - versionedHashes, - }; - - const result = await fetchGetBlobsV2AndBuildSidecars({ - config, - executionEngine, - forkName: ForkName.fulu, - block: fuluBlockWithColumns.block, - columnMeta, - }); - - expect(getBlobsMock).toHaveBeenCalledOnce(); - expect(result).toBeDefined(); - expect(result).toBeInstanceOf(Array); - expect(result.length).toEqual(NUMBER_OF_COLUMNS); - - // Verify the structure of the returned column sidecars - for (const [_, columnSidecar] of Object.entries(result)) { - expect( - validateBlockDataColumnSidecars( - columnSidecar.signedBlockHeader.message.slot, - fuluBlockWithColumns.blockRoot, - fuluBlockWithColumns.block.message.body.blobKzgCommitments.length, - [columnSidecar] - ) - ).resolves.toBeUndefined(); - } - }); - }); - describe("fetchColumnsByRoot", () => { let fuluBlockWithColumns: ReturnType; beforeAll(() => { diff --git a/packages/beacon-node/test/unit/util/execution.test.ts b/packages/beacon-node/test/unit/util/execution.test.ts new file mode 100644 index 000000000000..cbdfbef7f1f6 --- /dev/null +++ b/packages/beacon-node/test/unit/util/execution.test.ts @@ -0,0 +1,230 @@ +// describe("fetchGetBlobsV1AndBuildSidecars", () => { +// let denebBlockWithBlobs: ReturnType; +// let blobsAndProofs: deneb.BlobAndProof[]; +// let blobMeta: BlobMeta[]; +// const forkName = ForkName.deneb; + +// beforeEach(() => { +// denebBlockWithBlobs = generateBlockWithBlobSidecars({forkName, count: 6}); +// blobsAndProofs = denebBlockWithBlobs.blobSidecars.map(({blob, kzgProof}) => ({blob, proof: kzgProof})); +// blobMeta = denebBlockWithBlobs.versionedHashes.map((versionedHash, index) => ({index, versionedHash}) as BlobMeta); +// }); + +// afterEach(() => { +// vi.resetAllMocks(); +// }); + +// it("should call getBlobs with the correct arguments", async () => { +// const getBlobsMock = vi.fn(() => Promise.resolve(blobsAndProofs)); +// executionEngine = { +// getBlobs: getBlobsMock, +// } as unknown as IExecutionEngine; + +// await fetchGetBlobsV1AndBuildSidecars({ +// config, +// forkName, +// executionEngine, +// block: denebBlockWithBlobs.block, +// blobMeta: blobMeta, +// }); + +// expect(getBlobsMock).toHaveBeenCalledOnce(); +// expect(getBlobsMock).toHaveBeenCalledWith(forkName, denebBlockWithBlobs.versionedHashes); +// }); + +// it("should return empty array when execution engine returns no blobs", async () => { +// const getBlobsMock = vi.fn(() => Promise.resolve([])); +// executionEngine = { +// getBlobs: getBlobsMock, +// } as unknown as IExecutionEngine; + +// const response = await fetchGetBlobsV1AndBuildSidecars({ +// config, +// forkName, +// executionEngine, +// block: denebBlockWithBlobs.block, +// blobMeta: blobMeta, +// }); +// expect(response).toEqual([]); +// }); + +// it("should build valid blob sidecars from execution engine response", async () => { +// const getBlobsMock = vi.fn(() => Promise.resolve(blobsAndProofs)); +// executionEngine = { +// getBlobs: getBlobsMock, +// } as unknown as IExecutionEngine; + +// const response = await fetchGetBlobsV1AndBuildSidecars({ +// config, +// forkName, +// executionEngine, +// block: denebBlockWithBlobs.block, +// blobMeta: blobMeta, +// }); + +// expect(getBlobsMock).toHaveBeenCalledOnce(); +// expect(response).toBeDefined(); +// expect(response).toBeInstanceOf(Array); +// expect(response.length).toEqual(blobsAndProofs.length); +// for (const blobSidecar of response) { +// blobSidecar.kzgCommitmentInclusionProof; +// expect(blobSidecar).toHaveProperty("index"); +// expect(blobSidecar.index).toBeTypeOf("number"); + +// expect(blobSidecar).toHaveProperty("blob"); +// expect(blobSidecar.blob).toBeInstanceOf(Uint8Array); +// expect(blobSidecar.blob.length).toEqual(BYTES_PER_BLOB); + +// expect(blobSidecar).toHaveProperty("kzgProof"); +// expect(blobSidecar.kzgProof).toBeInstanceOf(Uint8Array); +// expect(blobSidecar.kzgProof.length).toEqual(BYTES_PER_PROOF); + +// expect(blobSidecar).toHaveProperty("kzgCommitment"); +// expect(blobSidecar.kzgCommitment).toBeInstanceOf(Uint8Array); +// expect(blobSidecar.kzgCommitment.length).toEqual(BYTES_PER_COMMITMENT); + +// expect(blobSidecar).toHaveProperty("kzgCommitmentInclusionProof"); +// expect(blobSidecar.kzgCommitmentInclusionProof).toBeInstanceOf(Array); +// blobSidecar.kzgCommitmentInclusionProof.map((proof) => expect(proof).toBeInstanceOf(Uint8Array)); + +// expect(blobSidecar).toHaveProperty("signedBlockHeader"); +// expect(blobSidecar.signedBlockHeader.message.slot).toBe(denebBlockWithBlobs.block.message.slot); +// expect(blobSidecar.signedBlockHeader.message.proposerIndex).toBe(denebBlockWithBlobs.block.message.proposerIndex); +// expect(blobSidecar.signedBlockHeader.message.parentRoot).toEqual(denebBlockWithBlobs.block.message.parentRoot); +// expect(blobSidecar.signedBlockHeader.message.stateRoot).toEqual(denebBlockWithBlobs.block.message.stateRoot); +// } + +// await expect( +// validateBlockBlobSidecars( +// denebBlockWithBlobs.block.message.slot, +// denebBlockWithBlobs.blockRoot, +// denebBlockWithBlobs.block.message.body.blobKzgCommitments.length, +// response +// ) +// ).resolves.toBeUndefined(); +// }); + +// it("should handle partial blob response from execution engine", async () => { +// const engineResponse: (BlobAndProof | null)[] = [...blobsAndProofs]; +// engineResponse[2] = null; +// engineResponse[4] = null; +// const getBlobsMock = vi.fn(() => Promise.resolve(engineResponse)); +// executionEngine = { +// getBlobs: getBlobsMock, +// } as unknown as IExecutionEngine; + +// const response = await fetchGetBlobsV1AndBuildSidecars({ +// config, +// forkName, +// executionEngine, +// block: denebBlockWithBlobs.block, +// blobMeta: blobMeta, +// }); + +// expect(response.length).toEqual(4); +// expect(response.map(({index}) => index)).toEqual([0, 1, 3, 5]); +// }); +// }); + +// describe("fetchGetBlobsV2AndBuildSidecars", () => { +// let fuluBlockWithColumns: ReturnType; +// let blobAndProofs: fulu.BlobAndProofV2[]; +// let versionedHashes: Uint8Array[]; + +// beforeEach(() => { +// fuluBlockWithColumns = generateBlockWithColumnSidecars({forkName: ForkName.fulu, returnBlobs: true}); +// // biome-ignore lint/style/noNonNullAssertion: returnBlobs = true +// const blobs = fuluBlockWithColumns.blobs!; +// blobAndProofs = blobs.map((b) => kzg.computeCellsAndKzgProofs(b)).map(({proofs}, i) => ({proofs, blob: blobs[i]})); +// versionedHashes = fuluBlockWithColumns.block.message.body.blobKzgCommitments.map((c) => +// kzgCommitmentToVersionedHash(c) +// ); +// }); + +// afterEach(() => { +// vi.resetAllMocks(); +// }); + +// it("should call getBlobs with the correct arguments", async () => { +// const getBlobsMock = vi.fn(() => Promise.resolve(blobAndProofs)); +// executionEngine = { +// getBlobs: getBlobsMock, +// } as unknown as IExecutionEngine; + +// const columnMeta = { +// missing: fuluBlockWithColumns.columnSidecars.map((c) => c.index), +// versionedHashes, +// }; + +// await fetchGetBlobsV2AndBuildSidecars({ +// config, +// executionEngine, +// forkName: ForkName.fulu, +// block: fuluBlockWithColumns.block, +// columnMeta, +// }); + +// expect(getBlobsMock).toHaveBeenCalledOnce(); +// expect(getBlobsMock).toHaveBeenCalledWith(ForkName.fulu, versionedHashes); +// }); + +// it("should return empty array when execution engine returns no response", async () => { +// const getBlobsMock = vi.fn(() => Promise.resolve(null)); +// executionEngine = { +// getBlobs: getBlobsMock, +// } as unknown as IExecutionEngine; + +// const columnMeta = { +// missing: fuluBlockWithColumns.columnSidecars.map((c) => c.index), +// versionedHashes, +// }; + +// const result = await fetchGetBlobsV2AndBuildSidecars({ +// config, +// executionEngine, +// forkName: ForkName.fulu, +// block: fuluBlockWithColumns.block, +// columnMeta, +// }); + +// expect(getBlobsMock).toHaveBeenCalledOnce(); +// expect(result).toEqual([]); +// }); + +// it("should build valid columnSidecars from execution engine blobs", async () => { +// const getBlobsMock = vi.fn(() => Promise.resolve(blobAndProofs)); +// executionEngine = { +// getBlobs: getBlobsMock, +// } as unknown as IExecutionEngine; + +// const columnMeta = { +// missing: fuluBlockWithColumns.columnSidecars.map((c) => c.index), +// versionedHashes, +// }; + +// const result = await fetchGetBlobsV2AndBuildSidecars({ +// config, +// executionEngine, +// forkName: ForkName.fulu, +// block: fuluBlockWithColumns.block, +// columnMeta, +// }); + +// expect(getBlobsMock).toHaveBeenCalledOnce(); +// expect(result).toBeDefined(); +// expect(result).toBeInstanceOf(Array); +// expect(result.length).toEqual(NUMBER_OF_COLUMNS); + +// // Verify the structure of the returned column sidecars +// for (const [_, columnSidecar] of Object.entries(result)) { +// expect( +// validateBlockDataColumnSidecars( +// columnSidecar.signedBlockHeader.message.slot, +// fuluBlockWithColumns.blockRoot, +// fuluBlockWithColumns.block.message.body.blobKzgCommitments.length, +// [columnSidecar] +// ) +// ).resolves.toBeUndefined(); +// } +// }); +// }); From 3b972266ec80c21f3895294e657b24dcc0d24b34 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Wed, 10 Sep 2025 04:23:38 +0700 Subject: [PATCH 141/173] feat: dont check for listener twice --- .../src/api/impl/beacon/blocks/index.ts | 20 +++++++++---------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/packages/beacon-node/src/api/impl/beacon/blocks/index.ts b/packages/beacon-node/src/api/impl/beacon/blocks/index.ts index 0e35f81f872f..abef77ef9823 100644 --- a/packages/beacon-node/src/api/impl/beacon/blocks/index.ts +++ b/packages/beacon-node/src/api/impl/beacon/blocks/index.ts @@ -329,17 +329,15 @@ export function getBeaconBlockApi({ } else if (isBlockInputBlobs(blockForImport) && chain.emitter.listenerCount(routes.events.EventType.blobSidecar)) { const blobSidecars = blockForImport.getBlobs(); - if (chain.emitter.listenerCount(routes.events.EventType.blobSidecar)) { - for (const blobSidecar of blobSidecars) { - const {index, kzgCommitment} = blobSidecar; - chain.emitter.emit(routes.events.EventType.blobSidecar, { - blockRoot, - slot, - index, - kzgCommitment: toHex(kzgCommitment), - versionedHash: toHex(kzgCommitmentToVersionedHash(kzgCommitment)), - }); - } + for (const blobSidecar of blobSidecars) { + const {index, kzgCommitment} = blobSidecar; + chain.emitter.emit(routes.events.EventType.blobSidecar, { + blockRoot, + slot, + index, + kzgCommitment: toHex(kzgCommitment), + versionedHash: toHex(kzgCommitmentToVersionedHash(kzgCommitment)), + }); } } }; From cfe8ba2194697bc671cf561327cf0ef406f84e64 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Wed, 10 Sep 2025 04:25:04 +0700 Subject: [PATCH 142/173] feat: getLogMeta to print slot first --- .../beacon-node/src/chain/blocks/blockInput/blockInput.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts index 6f4a46d7554c..d083d1776483 100644 --- a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts +++ b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts @@ -142,8 +142,8 @@ abstract class AbstractBlockInput Date: Wed, 10 Sep 2025 04:25:58 +0700 Subject: [PATCH 143/173] fix: dont destructure logMeta in debug call --- packages/beacon-node/src/network/processor/gossipHandlers.ts | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/packages/beacon-node/src/network/processor/gossipHandlers.ts b/packages/beacon-node/src/network/processor/gossipHandlers.ts index 764b9e4ddaf1..8612ea4499e7 100644 --- a/packages/beacon-node/src/network/processor/gossipHandlers.ts +++ b/packages/beacon-node/src/network/processor/gossipHandlers.ts @@ -385,9 +385,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand metrics?.gossipBlock.elapsedTimeTillReceived.observe({source: OpSource.gossip}, delaySec); chain.validatorMonitor?.registerBeaconBlock(OpSource.gossip, delaySec, signedBlock.message); if (!blockInput.hasBlockAndAllData()) { - chain.logger.debug("Received gossip block, attempting fetch of unavailable data", { - ...blockInput.getLogMeta(), - }); + chain.logger.debug("Received gossip block, attempting fetch of unavailable data", blockInput.getLogMeta()); // The data is not yet fully available, immediately trigger an aggressive pull via unknown block sync chain.emitter.emit(ChainEvent.incompleteBlockInput, { blockInput, From 02696de0e74c1e34dc875aa8333dc23e7fdf123a Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Wed, 10 Sep 2025 04:29:51 +0700 Subject: [PATCH 144/173] fix: update log comment --- packages/beacon-node/src/network/processor/gossipHandlers.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/beacon-node/src/network/processor/gossipHandlers.ts b/packages/beacon-node/src/network/processor/gossipHandlers.ts index 8612ea4499e7..644899775c37 100644 --- a/packages/beacon-node/src/network/processor/gossipHandlers.ts +++ b/packages/beacon-node/src/network/processor/gossipHandlers.ts @@ -459,7 +459,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand logLevel = LogLevel.error; } metrics?.gossipBlock.processBlockErrors.inc({error: e instanceof BlockError ? e.type.code : "NOT_BLOCK_ERROR"}); - logger[logLevel]("Error receiving block", {slot, peer: peerIdStr}, e as Error); + logger[logLevel]("Error processing block", {slot, peer: peerIdStr}, e as Error); // TODO(fulu): Revisit when we prune block inputs chain.seenBlockInputCache.prune(blockInput.blockRootHex); }); From 92a992d0033e5baa8407f7f4f52109631171ca1c Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Wed, 10 Sep 2025 04:31:20 +0700 Subject: [PATCH 145/173] docs: add comment --- packages/beacon-node/src/network/processor/gossipHandlers.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/beacon-node/src/network/processor/gossipHandlers.ts b/packages/beacon-node/src/network/processor/gossipHandlers.ts index 644899775c37..77fe2a3345c8 100644 --- a/packages/beacon-node/src/network/processor/gossipHandlers.ts +++ b/packages/beacon-node/src/network/processor/gossipHandlers.ts @@ -549,6 +549,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand dataColumnIndex: index, ...blockInput.getLogMeta(), }); + // do not await here to not delay gossip validation blockInput.waitForAllData(cutoffTimeMs).catch((_e) => { chain.logger.debug( "Waited for data after receiving gossip column. Cut-off reached so attempting to fetch remainder of BlockInput", From 7827f144c100fb9d1054a0601d455aa00ccc5c26 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Wed, 10 Sep 2025 04:59:24 +0700 Subject: [PATCH 146/173] fix: replace extra validation check error with log statement --- packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts b/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts index b86cd0e0d953..44396e562b85 100644 --- a/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts +++ b/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts @@ -55,7 +55,7 @@ export async function writeBlockInputToDb(this: BeaconChain, blocksInputs: IBloc const dataColumnSidecars = blockInput.getCustodyColumns(); if (dataColumnSidecars.length !== dataColumnsLen) { - throw Error( + this.logger.debug( `Invalid dataColumnSidecars=${dataColumnSidecars.length} for custody expected custodyColumnsLen=${dataColumnsLen}` ); } From 4f7d53c2c700c6c525cfce6ec77c9c5238666e1c Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Wed, 10 Sep 2025 23:10:25 +0700 Subject: [PATCH 147/173] feat: prune BlockInputs for ByRange --- packages/beacon-node/src/sync/range/batch.ts | 2 +- packages/beacon-node/src/sync/range/chain.ts | 9 +++++++++ packages/beacon-node/src/sync/range/range.ts | 8 ++++++++ 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/packages/beacon-node/src/sync/range/batch.ts b/packages/beacon-node/src/sync/range/batch.ts index 60bdb284b9bd..771fea2fa0f2 100644 --- a/packages/beacon-node/src/sync/range/batch.ts +++ b/packages/beacon-node/src/sync/range/batch.ts @@ -252,7 +252,7 @@ export class Batch { switch (this.state.status) { case BatchStatus.AwaitingValidation: case BatchStatus.Processing: - throw new BatchError(this.wrongStatusErrorType(BatchStatus.AwaitingDownload)); + return []; } return this.state.blocks; } diff --git a/packages/beacon-node/src/sync/range/chain.ts b/packages/beacon-node/src/sync/range/chain.ts index 82697b8e5175..f4ff3b3984a8 100644 --- a/packages/beacon-node/src/sync/range/chain.ts +++ b/packages/beacon-node/src/sync/range/chain.ts @@ -51,6 +51,8 @@ export type SyncChainFns = { getConnectedPeerSyncMeta: (peerId: string) => PeerSyncMeta; /** Hook called when Chain state completes */ onEnd: (err: Error | null, target: ChainTarget | null) => void; + /** Deletes an array of BlockInputs from the BlockInputCache */ + pruneBlockInputs: (blockInputs: IBlockInput[]) => void; }; /** @@ -118,6 +120,8 @@ export class SyncChain { private readonly downloadByRange: SyncChainFns["downloadByRange"]; private readonly reportPeer: SyncChainFns["reportPeer"]; private readonly getConnectedPeerSyncMeta: SyncChainFns["getConnectedPeerSyncMeta"]; + private readonly pruneBlockInputs: SyncChainFns["pruneBlockInputs"]; + /** AsyncIterable that guarantees processChainSegment is run only at once at anytime */ private readonly batchProcessor = new ItTrigger(); /** Sorted map of batches undergoing some kind of processing. */ @@ -308,6 +312,10 @@ export class SyncChain { return; // Ignore } + for (const batch of this.batches.values()) { + this.pruneBlockInputs(batch.getBlocks()); + } + this.status = SyncChainStatus.Error; this.logger.verbose("SyncChain Error", {id: this.logId}, e as Error); @@ -546,6 +554,7 @@ export class SyncChain { if (!res.err) { batch.processingSuccess(); + this.pruneBlockInputs(batch.getBlocks()); // If the processed batch is not empty, validate previous AwaitingValidation blocks. if (blocks.length > 0) { diff --git a/packages/beacon-node/src/sync/range/range.ts b/packages/beacon-node/src/sync/range/range.ts index 370c6643111a..238ef12b1765 100644 --- a/packages/beacon-node/src/sync/range/range.ts +++ b/packages/beacon-node/src/sync/range/range.ts @@ -13,6 +13,7 @@ import {cacheByRangeResponses, downloadByRange} from "../utils/downloadByRange.j import {RangeSyncType, getRangeSyncTarget, rangeSyncTypes} from "../utils/remoteSyncType.js"; import {ChainTarget, SyncChain, SyncChainDebugState, SyncChainFns} from "./chain.js"; import {updateChains} from "./utils/index.js"; +import {IBlockInput} from "../../chain/blocks/blockInput/types.js"; export enum RangeSyncEvent { completedChain = "RangeSync-completedChain", @@ -218,6 +219,12 @@ export class RangeSync extends (EventEmitter as {new (): RangeSyncEmitter}) { return cached; }; + private pruneBlockInputs: SyncChainFns["pruneBlockInputs"] = (blocks: IBlockInput[]) => { + for (const block of blocks) { + this.chain.seenBlockInputCache.prune(block.blockRootHex); + } + }; + /** Convenience method for `SyncChain` */ private reportPeer: SyncChainFns["reportPeer"] = (peer, action, actionName) => { this.network.reportPeer(peer, action, actionName); @@ -249,6 +256,7 @@ export class RangeSync extends (EventEmitter as {new (): RangeSyncEmitter}) { downloadByRange: this.downloadByRange, reportPeer: this.reportPeer, getConnectedPeerSyncMeta: this.getConnectedPeerSyncMeta, + pruneBlockInputs: this.pruneBlockInputs, onEnd: this.onSyncChainEnd, }, {config: this.config, logger: this.logger, custodyConfig: this.chain.custodyConfig, metrics: this.metrics} From 84c589526ed6ff903eec1b061a6498584f90b0c6 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Wed, 10 Sep 2025 23:13:33 +0700 Subject: [PATCH 148/173] feat: prune BlockInputs for ByRoot --- packages/beacon-node/src/sync/unknownBlock.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/beacon-node/src/sync/unknownBlock.ts b/packages/beacon-node/src/sync/unknownBlock.ts index 1da4167f1772..efa38576212e 100644 --- a/packages/beacon-node/src/sync/unknownBlock.ts +++ b/packages/beacon-node/src/sync/unknownBlock.ts @@ -425,6 +425,7 @@ export class BlockInputSync { if (!res.err) { // no need to update status to "processed", delete anyway this.pendingBlocks.delete(pendingBlock.blockInput.blockRootHex); + this.chain.seenBlockInputCache.prune(pendingBlock.blockInput.blockRootHex); // Send child blocks to the processor for (const descendantBlock of getDescendantBlocks(pendingBlock.blockInput.blockRootHex, this.pendingBlocks)) { @@ -610,6 +611,7 @@ export class BlockInputSync { for (const block of badPendingBlocks) { const rootHex = getBlockInputSyncCacheItemRootHex(block); this.pendingBlocks.delete(rootHex); + this.chain.seenBlockInputCache.prune(rootHex); this.logger.debug("Removing bad/unknown/incomplete BlockInputSyncCacheItem", { blockRoot: rootHex, }); From 07a214ca4d5bee284593e64260bc49076bdd3cf0 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Wed, 10 Sep 2025 23:21:07 +0700 Subject: [PATCH 149/173] fix: build error. I love you TS!!!.... and i hate you but I love you!!! --- packages/beacon-node/src/sync/range/chain.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/beacon-node/src/sync/range/chain.ts b/packages/beacon-node/src/sync/range/chain.ts index f4ff3b3984a8..85284bbc277d 100644 --- a/packages/beacon-node/src/sync/range/chain.ts +++ b/packages/beacon-node/src/sync/range/chain.ts @@ -147,6 +147,7 @@ export class SyncChain { this.processChainSegment = fns.processChainSegment; this.downloadByRange = fns.downloadByRange; this.reportPeer = fns.reportPeer; + this.pruneBlockInputs = fns.pruneBlockInputs; this.getConnectedPeerSyncMeta = fns.getConnectedPeerSyncMeta; this.config = config; this.custodyConfig = custodyConfig; From d46bf59929e23f8cc73f673baacab6ff77a039c2 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Thu, 11 Sep 2025 00:50:47 +0700 Subject: [PATCH 150/173] feat: log number of pruned BlockInputs --- .../beacon-node/src/chain/seenCache/seenGossipBlockInput.ts | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts b/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts index 7be4c5e5c578..8e0616620a84 100644 --- a/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts +++ b/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts @@ -126,21 +126,27 @@ export class SeenBlockInput { prune(rootHex: RootHex): void { let blockInput = this.blockInputs.get(rootHex); let parentRootHex = blockInput?.parentRootHex; + let deletedCount = 0; while (blockInput) { + deletedCount++; this.blockInputs.delete(blockInput.blockRootHex); blockInput = this.blockInputs.get(parentRootHex ?? ""); parentRootHex = blockInput?.parentRootHex; } + this.logger?.debug(`BlockInputCache.prune deleted ${deletedCount} cached BlockInputs`); this.pruneToMaxSize(); } onFinalized = (checkpoint: CheckpointWithHex) => { + let deletedCount = 0; const cutoffSlot = computeStartSlotAtEpoch(checkpoint.epoch); for (const [rootHex, blockInput] of this.blockInputs) { if (blockInput.slot < cutoffSlot) { + deletedCount++; this.blockInputs.delete(rootHex); } } + this.logger?.debug(`BlockInputCache.onFinalized deleted ${deletedCount} cached BlockInputs`); this.pruneToMaxSize(); }; From 1a7b1e45104e85b7d702b37a76871b5fd15c06a9 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Thu, 11 Sep 2025 01:09:19 +0700 Subject: [PATCH 151/173] chore: fix check-types --- packages/beacon-node/test/unit/sync/range/chain.test.ts | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/packages/beacon-node/test/unit/sync/range/chain.test.ts b/packages/beacon-node/test/unit/sync/range/chain.test.ts index e0a144e0afc6..9053161d43c9 100644 --- a/packages/beacon-node/test/unit/sync/range/chain.test.ts +++ b/packages/beacon-node/test/unit/sync/range/chain.test.ts @@ -76,6 +76,7 @@ describe("sync / range / chain", () => { custodyGroups: [], }; }; + const pruneBlockInputs: SyncChainFns["pruneBlockInputs"] = (_) => {}; afterEach(() => { if (interval !== null) clearInterval(interval); @@ -132,6 +133,7 @@ describe("sync / range / chain", () => { downloadByRange, getConnectedPeerSyncMeta, reportPeer, + pruneBlockInputs, onEnd, }), {config, logger, custodyConfig, metrics: null} @@ -184,6 +186,7 @@ describe("sync / range / chain", () => { processChainSegment, downloadByRange, reportPeer, + pruneBlockInputs, getConnectedPeerSyncMeta, onEnd, }), @@ -228,6 +231,10 @@ function logSyncChainFns(logger: Logger, fns: SyncChainFns): SyncChainFns { logger.debug("mock reportPeer", {peer: peer.toString(), action, actionName}); return fns.reportPeer(peer, action, actionName); }, + pruneBlockInputs(blockInputs) { + logger.debug("mock pruneBlockInputs", {blockInputsLength: blockInputs.length}); + return fns.pruneBlockInputs(blockInputs); + }, onEnd(err, target) { logger.debug("mock onEnd", {target: target?.slot}, err ?? undefined); return fns.onEnd(err, target); From ee00704e360d28602025d031edbd10cbae9c05c5 Mon Sep 17 00:00:00 2001 From: twoeths <10568965+twoeths@users.noreply.github.com> Date: Thu, 11 Sep 2025 10:28:20 +0700 Subject: [PATCH 152/173] fix: add blocks to all BatchState (#8369) **Motivation** - there is OOM error in https://github.com/ChainSafe/lodestar/issues/8331#issuecomment-3274347065 - after latest fixes by @matthewkeil I found that we cannot get blocks after debug Screenshot 2025-09-11 at 09 34 44 the reason is at `AwaitingValidation` state there is no blocks at all **Description** - add blocks to all BatchState and transfer it over life cycle Closes #8331 **Test result on devnet node 0** - this shows that memory is stable now, also the "cache size" metric Screenshot 2025-09-11 at 10 12 13 Co-authored-by: Tuyen Nguyen --- packages/beacon-node/src/sync/range/batch.ts | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/packages/beacon-node/src/sync/range/batch.ts b/packages/beacon-node/src/sync/range/batch.ts index 771fea2fa0f2..f1647bb73d13 100644 --- a/packages/beacon-node/src/sync/range/batch.ts +++ b/packages/beacon-node/src/sync/range/batch.ts @@ -55,8 +55,8 @@ export type BatchState = | AwaitingDownloadState | {status: BatchStatus.Downloading; peer: PeerIdStr; blocks: IBlockInput[]} | DownloadSuccessState - | {status: BatchStatus.Processing; attempt: Attempt} - | {status: BatchStatus.AwaitingValidation; attempt: Attempt}; + | {status: BatchStatus.Processing; blocks: IBlockInput[]; attempt: Attempt} + | {status: BatchStatus.AwaitingValidation; blocks: IBlockInput[]; attempt: Attempt}; export type BatchMetadata = { startEpoch: Epoch; @@ -249,11 +249,6 @@ export class Batch { } getBlocks(): IBlockInput[] { - switch (this.state.status) { - case BatchStatus.AwaitingValidation: - case BatchStatus.Processing: - return []; - } return this.state.blocks; } @@ -339,7 +334,7 @@ export class Batch { // that the data came from will be handled by the Attempt that goes for processing const peers = this.goodPeers; this.goodPeers = []; - this.state = {status: BatchStatus.Processing, attempt: {peers, hash}}; + this.state = {status: BatchStatus.Processing, blocks, attempt: {peers, hash}}; return blocks; } @@ -351,7 +346,7 @@ export class Batch { throw new BatchError(this.wrongStatusErrorType(BatchStatus.Processing)); } - this.state = {status: BatchStatus.AwaitingValidation, attempt: this.state.attempt}; + this.state = {status: BatchStatus.AwaitingValidation, blocks: this.state.blocks, attempt: this.state.attempt}; } /** From 1d1fbe1ea5c82ac63004c6b45fb7a4ea701e59c3 Mon Sep 17 00:00:00 2001 From: twoeths <10568965+twoeths@users.noreply.github.com> Date: Thu, 11 Sep 2025 18:39:58 +0700 Subject: [PATCH 153/173] feat: track downloadByRange errors in metrics (#8372) **Motivation** - there are a lot of DownloadByRange errors that we want to track in metrics to investigate **Description** - define new metrics and track in `SyncChain` class see also #8352 --------- Co-authored-by: Tuyen Nguyen Co-authored-by: Cayman Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- packages/beacon-node/src/metrics/metrics/lodestar.ts | 11 +++++++++++ packages/beacon-node/src/sync/range/chain.ts | 4 ++++ 2 files changed, 15 insertions(+) diff --git a/packages/beacon-node/src/metrics/metrics/lodestar.ts b/packages/beacon-node/src/metrics/metrics/lodestar.ts index 70424b8b21dd..cdb7ba8d7d61 100644 --- a/packages/beacon-node/src/metrics/metrics/lodestar.ts +++ b/packages/beacon-node/src/metrics/metrics/lodestar.ts @@ -496,6 +496,17 @@ export function createLodestarMetrics( help: "Count of finalized sync peers by group index", labelNames: ["columnIndex"], }), + downloadByRange: { + success: register.gauge({ + name: "lodestar_sync_range_download_by_range_success_total", + help: "Total number of successful downloadByRange calls", + }), + error: register.gauge<{code: string; client: string}>({ + name: "lodestar_sync_range_download_by_range_error_total", + help: "Total number of errored downloadByRange calls", + labelNames: ["code", "client"], + }), + }, }, blockInputSync: { diff --git a/packages/beacon-node/src/sync/range/chain.ts b/packages/beacon-node/src/sync/range/chain.ts index 85284bbc277d..c784d9fea347 100644 --- a/packages/beacon-node/src/sync/range/chain.ts +++ b/packages/beacon-node/src/sync/range/chain.ts @@ -130,6 +130,7 @@ export class SyncChain { private readonly logger: Logger; private readonly config: ChainForkConfig; + private readonly metrics: Metrics | null; private readonly custodyConfig: CustodyConfig; constructor( @@ -150,6 +151,7 @@ export class SyncChain { this.pruneBlockInputs = fns.pruneBlockInputs; this.getConnectedPeerSyncMeta = fns.getConnectedPeerSyncMeta; this.config = config; + this.metrics = metrics; this.custodyConfig = custodyConfig; this.logger = logger; this.logId = `${syncType}-${nextChainId++}`; @@ -462,6 +464,7 @@ export class SyncChain { if (res.err) { // There's several known error cases where we want to take action on the peer const errCode = (res.err as LodestarError<{code: string}>).type?.code; + this.metrics?.syncRange.downloadByRange.error.inc({client: peer.client, code: errCode ?? "UNKNOWN"}); if (this.syncType === RangeSyncType.Finalized) { // For finalized sync, we are stricter with peers as there is no ambiguity about which chain we're syncing. // The below cases indicate the peer may be on a different chain, so are not penalized during head sync. @@ -498,6 +501,7 @@ export class SyncChain { ); batch.downloadingError(peer.peerId); // Throws after MAX_DOWNLOAD_ATTEMPTS } else { + this.metrics?.syncRange.downloadByRange.success.inc(); const downloadSuccessOutput = batch.downloadingSuccess(peer.peerId, res.result); const logMeta: Record = { blockCount: downloadSuccessOutput.blocks.length, From 71269d339ee786b5780b7a38a44385afe4f3e790 Mon Sep 17 00:00:00 2001 From: Cayman Date: Thu, 11 Sep 2025 10:35:12 -0400 Subject: [PATCH 154/173] fix: track getBlobsV2 calls via GetBlobsTracker (#8381) **Motivation** - Lots of getBlobsV2 calls, causing bad sidecar validation times Screenshot from 2025-09-11 08-38-34 Screenshot from 2025-09-11 08-38-40 Screenshot from 2025-09-11 08-44-55 Screenshot from 2025-09-11 08-45-03 **Description** - Add simple class to track when getBlobsV2 is called and for which block. --- .../beacon-node/src/chain/GetBlobsTracker.ts | 72 +++++++++ packages/beacon-node/src/chain/chain.ts | 11 ++ packages/beacon-node/src/chain/interface.ts | 3 + .../src/network/processor/gossipHandlers.ts | 5 +- packages/beacon-node/src/util/execution.ts | 142 ++++++++---------- 5 files changed, 152 insertions(+), 81 deletions(-) create mode 100644 packages/beacon-node/src/chain/GetBlobsTracker.ts diff --git a/packages/beacon-node/src/chain/GetBlobsTracker.ts b/packages/beacon-node/src/chain/GetBlobsTracker.ts new file mode 100644 index 000000000000..cae981000f88 --- /dev/null +++ b/packages/beacon-node/src/chain/GetBlobsTracker.ts @@ -0,0 +1,72 @@ +import {Logger} from "@lodestar/utils"; +import {IExecutionEngine} from "../execution/index.js"; +import {ChainEventEmitter} from "./emitter.js"; +import {Metrics} from "../metrics/metrics.js"; +import {ChainForkConfig} from "@lodestar/config"; +import {IBlockInput} from "./blocks/blockInput/index.js"; +import {getDataColumnSidecarsFromExecution} from "../util/execution.js"; + +export type GetBlobsTrackerInit = { + logger: Logger; + executionEngine: IExecutionEngine; + emitter: ChainEventEmitter; + metrics: Metrics | null; + config: ChainForkConfig; +}; + +/** + * Tracks getBlobsV2 calls to the execution engine to avoid duplicate and multiple in-flight calls + */ +export class GetBlobsTracker { + logger: Logger; + executionEngine: IExecutionEngine; + emitter: ChainEventEmitter; + metrics: Metrics | null; + config: ChainForkConfig; + + /** + * Track last attempted block root + * + * This is sufficient to avoid duplicate calls since we only call this + * function when we see a new block or data column sidecar from gossip. + */ + lastBlockRootHex: string | null = null; + /** Track if a getBlobsV2 call is in-flight */ + running = false; + // Preallocate buffers for getBlobsV2 RPC calls + // See https://github.com/ChainSafe/lodestar/pull/8282 for context + blobAndProofBuffers: Uint8Array[] = []; + + constructor(init: GetBlobsTrackerInit) { + this.logger = init.logger; + this.executionEngine = init.executionEngine; + this.emitter = init.emitter; + this.metrics = init.metrics; + this.config = init.config; + } + + triggerGetBlobs(blockInput: IBlockInput): void { + if (this.running) { + return; + } + + if (this.lastBlockRootHex === blockInput.blockRootHex) { + return; + } + + // We don't care about the outcome of this call, + // just that it has been triggered for this block root. + this.running = true; + this.lastBlockRootHex = blockInput.blockRootHex; + getDataColumnSidecarsFromExecution( + this.config, + this.executionEngine, + this.emitter, + blockInput, + this.metrics, + this.blobAndProofBuffers + ).finally(() => { + this.running = false; + }); + } +} diff --git a/packages/beacon-node/src/chain/chain.ts b/packages/beacon-node/src/chain/chain.ts index 3f9f1051c898..24fef2a54b3d 100644 --- a/packages/beacon-node/src/chain/chain.ts +++ b/packages/beacon-node/src/chain/chain.ts @@ -100,6 +100,7 @@ import {FIFOBlockStateCache} from "./stateCache/fifoBlockStateCache.js"; import {InMemoryCheckpointStateCache} from "./stateCache/inMemoryCheckpointsCache.js"; import {PersistentCheckpointStateCache} from "./stateCache/persistentCheckpointsCache.js"; import {ValidatorMonitor} from "./validatorMonitor.js"; +import {GetBlobsTracker} from "./GetBlobsTracker.js"; /** * The maximum number of cached produced results to keep in memory. @@ -173,6 +174,8 @@ export class BeaconChain implements IBeaconChain { readonly serializedCache: SerializedCache; + readonly getBlobsTracker: GetBlobsTracker; + readonly opts: IChainOptions; protected readonly blockProcessor: BlockProcessor; @@ -394,6 +397,14 @@ export class BeaconChain implements IBeaconChain { this.serializedCache = new SerializedCache(); + this.getBlobsTracker = new GetBlobsTracker({ + logger, + executionEngine: this.executionEngine, + emitter, + metrics, + config, + }); + this.archiveStore = new ArchiveStore( {db, chain: this, logger: logger as LoggerNode, metrics}, {...opts, dbName, anchorState: {finalizedCheckpoint: anchorState.finalizedCheckpoint}}, diff --git a/packages/beacon-node/src/chain/interface.ts b/packages/beacon-node/src/chain/interface.ts index 1b51deda8719..9b8c9e239401 100644 --- a/packages/beacon-node/src/chain/interface.ts +++ b/packages/beacon-node/src/chain/interface.ts @@ -63,6 +63,7 @@ import {SeenBlockAttesters} from "./seenCache/seenBlockAttesters.js"; import {SeenBlockInput} from "./seenCache/seenGossipBlockInput.js"; import {ShufflingCache} from "./shufflingCache.js"; import {ValidatorMonitor} from "./validatorMonitor.js"; +import {GetBlobsTracker} from "./GetBlobsTracker.js"; export {BlockType, type AssembledBlockType}; export {type ProposerPreparationData}; @@ -140,6 +141,8 @@ export interface IBeaconChain { // Cache for serialized objects readonly serializedCache: SerializedCache; + readonly getBlobsTracker: GetBlobsTracker; + readonly opts: IChainOptions; /** Start the processing of chain and load state from disk and related actions */ diff --git a/packages/beacon-node/src/network/processor/gossipHandlers.ts b/packages/beacon-node/src/network/processor/gossipHandlers.ts index 77fe2a3345c8..5e2aed1aeb17 100644 --- a/packages/beacon-node/src/network/processor/gossipHandlers.ts +++ b/packages/beacon-node/src/network/processor/gossipHandlers.ts @@ -73,7 +73,6 @@ import {sszDeserialize} from "../gossip/topic.js"; import {INetwork} from "../interface.js"; import {PeerAction} from "../peers/index.js"; import {AggregatorTracker} from "./aggregatorTracker.js"; -import {getDataColumnSidecarsFromExecution} from "../../util/execution.js"; import {DataColumnReconstructionError, recoverDataColumnSidecars} from "../../util/dataColumns.js"; import {callInNextEventLoop} from "../../util/eventLoop.js"; @@ -393,7 +392,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand source: BlockInputSource.gossip, }); // immediately attempt fetch of data columns from execution engine - getDataColumnSidecarsFromExecution(config, chain.executionEngine, chain.emitter, blockInput, metrics); + chain.getBlobsTracker.triggerGetBlobs(blockInput); } else { metrics?.blockInputFetchStats.totalDataAvailableBlockInputs.inc(); metrics?.blockInputFetchStats.totalDataAvailableBlockInputBlobs.inc( @@ -565,7 +564,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand }); }); // immediately attempt fetch of data columns from execution engine - getDataColumnSidecarsFromExecution(config, chain.executionEngine, chain.emitter, blockInput, metrics); + chain.getBlobsTracker.triggerGetBlobs(blockInput); } }, diff --git a/packages/beacon-node/src/util/execution.ts b/packages/beacon-node/src/util/execution.ts index 76b45666f31d..ba3e484fab11 100644 --- a/packages/beacon-node/src/util/execution.ts +++ b/packages/beacon-node/src/util/execution.ts @@ -13,104 +13,90 @@ import {isBlockInputColumns} from "../chain/blocks/blockInput/blockInput.js"; import {ForkPostFulu} from "@lodestar/params"; import {BLOB_AND_PROOF_V2_RPC_BYTES} from "../execution/engine/types.js"; -let running = false; -// Preallocate buffers for getBlobsV2 RPC calls -// See https://github.com/ChainSafe/lodestar/pull/8282 for context -const blobAndProofBuffers: Uint8Array[] = []; - /** * Post fulu, call getBlobsV2 from execution engine once per slot whenever we see either beacon_block or data_column_sidecar gossip message - * Only a single call can be in-flight at a time, subsequent calls are ignored */ export async function getDataColumnSidecarsFromExecution( config: ChainForkConfig, executionEngine: IExecutionEngine, emitter: ChainEventEmitter, blockInput: IBlockInput, - metrics: Metrics | null + metrics: Metrics | null, + blobAndProofBuffers?: Uint8Array[] ): Promise { - try { - if (running) { - return; - } - running = true; - - // If its not a column block input, exit - if (!isBlockInputColumns(blockInput)) { - return; - } + // If its not a column block input, exit + if (!isBlockInputColumns(blockInput)) { + return; + } - // If already have all columns, exit - if (blockInput.hasAllData()) { - return; - } + // If already have all columns, exit + if (blockInput.hasAllData()) { + return; + } - const versionedHashes = blockInput.getVersionedHashes(); + const versionedHashes = blockInput.getVersionedHashes(); - // If there are no blobs in this block, exit - if (versionedHashes.length === 0) { - return; - } + // If there are no blobs in this block, exit + if (versionedHashes.length === 0) { + return; + } - // Get blobs from execution engine - metrics?.peerDas.getBlobsV2Requests.inc(); - const timer = metrics?.peerDas.getBlobsV2RequestDuration.startTimer(); - if (blobAndProofBuffers) { - for (let i = 0; i < versionedHashes.length; i++) { - if (blobAndProofBuffers[i] === undefined) { - blobAndProofBuffers[i] = new Uint8Array(BLOB_AND_PROOF_V2_RPC_BYTES); - } + // Get blobs from execution engine + metrics?.peerDas.getBlobsV2Requests.inc(); + const timer = metrics?.peerDas.getBlobsV2RequestDuration.startTimer(); + if (blobAndProofBuffers) { + for (let i = 0; i < versionedHashes.length; i++) { + if (blobAndProofBuffers[i] === undefined) { + blobAndProofBuffers[i] = new Uint8Array(BLOB_AND_PROOF_V2_RPC_BYTES); } } - const blobs = await executionEngine.getBlobs( - blockInput.forkName as ForkPostFulu, - versionedHashes, - blobAndProofBuffers - ); - timer?.(); - - // Execution engine was unable to find one or more blobs - if (blobs === null) { - return; - } - metrics?.peerDas.getBlobsV2Responses.inc(); + } + const blobs = await executionEngine.getBlobs( + blockInput.forkName as ForkPostFulu, + versionedHashes, + blobAndProofBuffers + ); + timer?.(); - // Return if we received all data columns while waiting for getBlobs - if (blockInput.hasAllData()) { - return; - } + // Execution engine was unable to find one or more blobs + if (blobs === null) { + return; + } + metrics?.peerDas.getBlobsV2Responses.inc(); - let dataColumnSidecars: fulu.DataColumnSidecars; - const cellsAndProofs = await getCellsAndProofs(blobs); - if (blockInput.hasBlock()) { - dataColumnSidecars = getDataColumnSidecarsFromBlock( - config, - blockInput.getBlock() as fulu.SignedBeaconBlock, - cellsAndProofs - ); - } else { - const firstSidecar = blockInput.getAllColumns()[0]; - dataColumnSidecars = getDataColumnSidecarsFromColumnSidecar(firstSidecar, cellsAndProofs); - } + // Return if we received all data columns while waiting for getBlobs + if (blockInput.hasAllData()) { + return; + } - // Publish columns if and only if subscribed to them - const previouslyMissingColumns = blockInput.getMissingSampledColumnMeta().missing; - const sampledColumns = previouslyMissingColumns.map((columnIndex) => dataColumnSidecars[columnIndex]); + let dataColumnSidecars: fulu.DataColumnSidecars; + const cellsAndProofs = await getCellsAndProofs(blobs); + if (blockInput.hasBlock()) { + dataColumnSidecars = getDataColumnSidecarsFromBlock( + config, + blockInput.getBlock() as fulu.SignedBeaconBlock, + cellsAndProofs + ); + } else { + const firstSidecar = blockInput.getAllColumns()[0]; + dataColumnSidecars = getDataColumnSidecarsFromColumnSidecar(firstSidecar, cellsAndProofs); + } - // for columns that we already seen, it will be ignored through `ignoreDuplicatePublishError` gossip option - emitter.emit(ChainEvent.publishDataColumns, sampledColumns); + // Publish columns if and only if subscribed to them + const previouslyMissingColumns = blockInput.getMissingSampledColumnMeta().missing; + const sampledColumns = previouslyMissingColumns.map((columnIndex) => dataColumnSidecars[columnIndex]); - // add all sampled columns to the block input, even if we didn't sample them - const seenTimestampSec = Date.now() / 1000; - for (const columnSidecar of sampledColumns) { - blockInput.addColumn( - {columnSidecar, blockRootHex: blockInput.blockRootHex, source: BlockInputSource.engine, seenTimestampSec}, - {throwOnDuplicateAdd: false} // columns may have been added while waiting - ); - } + // for columns that we already seen, it will be ignored through `ignoreDuplicatePublishError` gossip option + emitter.emit(ChainEvent.publishDataColumns, sampledColumns); - metrics?.dataColumns.bySource.inc({source: BlockInputSource.engine}, previouslyMissingColumns.length); - } finally { - running = false; + // add all sampled columns to the block input, even if we didn't sample them + const seenTimestampSec = Date.now() / 1000; + for (const columnSidecar of sampledColumns) { + blockInput.addColumn( + {columnSidecar, blockRootHex: blockInput.blockRootHex, source: BlockInputSource.engine, seenTimestampSec}, + {throwOnDuplicateAdd: false} // columns may have been added while waiting + ); } + + metrics?.dataColumns.bySource.inc({source: BlockInputSource.engine}, previouslyMissingColumns.length); } From 369a160a902c30a927154140bfcecd752f2bc2a4 Mon Sep 17 00:00:00 2001 From: Cayman Date: Thu, 11 Sep 2025 10:35:27 -0400 Subject: [PATCH 155/173] feat: add imported columns by source metric (#8380) **Motivation** - Investigation of #8377 **Description** - add `BlockInputColumns.getSampledColumnsWithSource` so we can only measure the sources of column sidecars which contributed to our waiting to import blocks - add `lodestar_import_columns_by_source_total` to track the source of imported column sidecars --- .../src/chain/blocks/blockInput/blockInput.ts | 11 +++++++++++ packages/beacon-node/src/chain/blocks/importBlock.ts | 8 +++++++- packages/beacon-node/src/metrics/metrics/lodestar.ts | 5 +++++ 3 files changed, 23 insertions(+), 1 deletion(-) diff --git a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts index d083d1776483..8f40b8a5d636 100644 --- a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts +++ b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts @@ -799,6 +799,17 @@ export class BlockInputColumns extends AbstractBlockInput({ + name: "lodestar_import_columns_by_source_total", + help: "Total number of imported columns (sampled columns) by source", + labelNames: ["source"], + }), notOverrideFcuReason: register.counter<{reason: NotReorgedReason}>({ name: "lodestar_import_block_not_override_fcu_reason_total", help: "Reason why the fcu call is not suppressed during block import", From 83fcfec89eae1153b621d3eaa7801927d0335ea2 Mon Sep 17 00:00:00 2001 From: Nazar Hussain Date: Thu, 11 Sep 2025 17:40:41 +0200 Subject: [PATCH 156/173] fix: add valid request window check for each fork (#8379) **Motivation** Add valid request window check for each fork **Description** - Add check for request window - Add unit tests Closes #8370 **Steps to test or reproduce** - Run all tests --- .../src/chain/blocks/blockInput/utils.ts | 13 ++- packages/beacon-node/src/sync/range/batch.ts | 21 +++-- packages/beacon-node/src/sync/range/chain.ts | 8 +- packages/beacon-node/src/sync/range/range.ts | 8 +- .../test/unit/sync/range/batch.test.ts | 80 +++++++++++++++---- .../test/unit/sync/range/chain.test.ts | 7 +- .../unit/sync/range/utils/batches.test.ts | 3 +- .../sync/range/utils/peerBalancer.test.ts | 13 +-- .../beacon-node/test/utils/blocksAndData.ts | 8 ++ 9 files changed, 125 insertions(+), 36 deletions(-) diff --git a/packages/beacon-node/src/chain/blocks/blockInput/utils.ts b/packages/beacon-node/src/chain/blocks/blockInput/utils.ts index c02911ad0b83..9e85b41c05e9 100644 --- a/packages/beacon-node/src/chain/blocks/blockInput/utils.ts +++ b/packages/beacon-node/src/chain/blocks/blockInput/utils.ts @@ -1,5 +1,5 @@ import {ChainForkConfig} from "@lodestar/config"; -import {ForkName, isForkPostDeneb} from "@lodestar/params"; +import {ForkName, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; import {computeEpochAtSlot} from "@lodestar/state-transition"; import {Epoch, Slot} from "@lodestar/types"; @@ -9,8 +9,13 @@ export function isDaOutOfRange( blockSlot: Slot, currentEpoch: Epoch ): boolean { - if (!isForkPostDeneb(forkName)) { - return true; + if (isForkPostFulu(forkName)) { + return computeEpochAtSlot(blockSlot) < currentEpoch - config.MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS; } - return computeEpochAtSlot(blockSlot) < currentEpoch - config.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS; + + if (isForkPostDeneb(forkName)) { + return computeEpochAtSlot(blockSlot) < currentEpoch - config.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS; + } + + return true; } diff --git a/packages/beacon-node/src/sync/range/batch.ts b/packages/beacon-node/src/sync/range/batch.ts index f1647bb73d13..724778cfaa8b 100644 --- a/packages/beacon-node/src/sync/range/batch.ts +++ b/packages/beacon-node/src/sync/range/batch.ts @@ -11,6 +11,8 @@ import {MAX_BATCH_DOWNLOAD_ATTEMPTS, MAX_BATCH_PROCESSING_ATTEMPTS} from "../con import {DownloadByRangeRequests} from "../utils/downloadByRange.js"; import {getBatchSlotRange, hashBlocks} from "./utils/index.js"; import {PeerSyncMeta} from "../../network/peers/peersData.js"; +import {IClock} from "../../util/clock.js"; +import {isDaOutOfRange} from "../../chain/blocks/blockInput/utils.js"; /** * Current state of a batch @@ -93,10 +95,12 @@ export class Batch { /** The number of download retries this batch has undergone due to a failed request. */ private readonly failedDownloadAttempts: PeerIdStr[] = []; private readonly config: ChainForkConfig; + private readonly clock: IClock; private readonly custodyConfig: CustodyConfig; - constructor(startEpoch: Epoch, config: ChainForkConfig, custodyConfig: CustodyConfig) { + constructor(startEpoch: Epoch, config: ChainForkConfig, clock: IClock, custodyConfig: CustodyConfig) { this.config = config; + this.clock = clock; this.custodyConfig = custodyConfig; const {startSlot, count} = getBatchSlotRange(startEpoch); @@ -111,6 +115,13 @@ export class Batch { * Builds ByRange requests for block, blobs and columns */ private getRequests(blocks: IBlockInput[]): DownloadByRangeRequests { + const withinValidRequestWindow = !isDaOutOfRange( + this.config, + this.forkName, + this.startSlot, + this.clock.currentEpoch + ); + // fresh request where no blocks have started to be pulled yet if (!blocks.length) { const blocksRequest: phase0.BeaconBlocksByRangeRequest = { @@ -118,7 +129,7 @@ export class Batch { count: this.count, step: 1, }; - if (isForkPostFulu(this.forkName)) { + if (isForkPostFulu(this.forkName) && withinValidRequestWindow) { return { blocksRequest, columnsRequest: { @@ -128,7 +139,7 @@ export class Batch { }, }; } - if (isForkPostDeneb(this.forkName)) { + if (isForkPostDeneb(this.forkName) && withinValidRequestWindow) { return { blocksRequest, blobsRequest: { @@ -190,13 +201,13 @@ export class Batch { if (dataStartSlot <= endSlot) { // range of 40 - 63, startSlot will be inclusive but subtraction will exclusive so need to + 1 const count = endSlot - dataStartSlot + 1; - if (isForkPostFulu(this.forkName)) { + if (isForkPostFulu(this.forkName) && withinValidRequestWindow) { requests.columnsRequest = { count, startSlot: dataStartSlot, columns: Array.from(neededColumns), }; - } else if (isForkPostDeneb(this.forkName)) { + } else if (isForkPostDeneb(this.forkName) && withinValidRequestWindow) { requests.blobsRequest = { count, startSlot: dataStartSlot, diff --git a/packages/beacon-node/src/sync/range/chain.ts b/packages/beacon-node/src/sync/range/chain.ts index c784d9fea347..76fbe6859b3a 100644 --- a/packages/beacon-node/src/sync/range/chain.ts +++ b/packages/beacon-node/src/sync/range/chain.ts @@ -29,9 +29,11 @@ import { toBeDownloadedStartEpoch, validateBatchesStatus, } from "./utils/index.js"; +import {IClock} from "../../util/clock.js"; export type SyncChainModules = { config: ChainForkConfig; + clock: IClock; custodyConfig: CustodyConfig; logger: Logger; metrics: Metrics | null; @@ -130,6 +132,7 @@ export class SyncChain { private readonly logger: Logger; private readonly config: ChainForkConfig; + private readonly clock: IClock; private readonly metrics: Metrics | null; private readonly custodyConfig: CustodyConfig; @@ -140,7 +143,7 @@ export class SyncChain { fns: SyncChainFns, modules: SyncChainModules ) { - const {config, custodyConfig, logger, metrics} = modules; + const {config, clock, custodyConfig, logger, metrics} = modules; this.firstBatchEpoch = initialBatchEpoch; this.lastEpochWithProcessBlocks = initialBatchEpoch; this.target = initialTarget; @@ -151,6 +154,7 @@ export class SyncChain { this.pruneBlockInputs = fns.pruneBlockInputs; this.getConnectedPeerSyncMeta = fns.getConnectedPeerSyncMeta; this.config = config; + this.clock = clock; this.metrics = metrics; this.custodyConfig = custodyConfig; this.logger = logger; @@ -441,7 +445,7 @@ export class SyncChain { return null; } - const batch = new Batch(startEpoch, this.config, this.custodyConfig); + const batch = new Batch(startEpoch, this.config, this.clock, this.custodyConfig); this.batches.set(startEpoch, batch); return batch; } diff --git a/packages/beacon-node/src/sync/range/range.ts b/packages/beacon-node/src/sync/range/range.ts index 238ef12b1765..270db6e8924b 100644 --- a/packages/beacon-node/src/sync/range/range.ts +++ b/packages/beacon-node/src/sync/range/range.ts @@ -259,7 +259,13 @@ export class RangeSync extends (EventEmitter as {new (): RangeSyncEmitter}) { pruneBlockInputs: this.pruneBlockInputs, onEnd: this.onSyncChainEnd, }, - {config: this.config, logger: this.logger, custodyConfig: this.chain.custodyConfig, metrics: this.metrics} + { + config: this.config, + clock: this.chain.clock, + logger: this.logger, + custodyConfig: this.chain.custodyConfig, + metrics: this.metrics, + } ); this.chains.set(syncType, syncChain); diff --git a/packages/beacon-node/test/unit/sync/range/batch.test.ts b/packages/beacon-node/test/unit/sync/range/batch.test.ts index 3b989f5108e6..7a2b19a6c930 100644 --- a/packages/beacon-node/test/unit/sync/range/batch.test.ts +++ b/packages/beacon-node/test/unit/sync/range/batch.test.ts @@ -1,13 +1,13 @@ import {generateKeyPair} from "@libp2p/crypto/keys"; import {ForkName} from "@lodestar/params"; import {ssz} from "@lodestar/types"; -import {beforeEach, describe, expect, it} from "vitest"; +import {afterEach, beforeEach, describe, expect, it, vi} from "vitest"; import {BlockInputPreData} from "../../../../src/chain/blocks/blockInput/blockInput.js"; import {BlockInputSource} from "../../../../src/chain/blocks/blockInput/types.js"; import {computeNodeIdFromPrivateKey} from "../../../../src/network/subnets/index.js"; import {Batch, BatchError, BatchErrorCode, BatchStatus} from "../../../../src/sync/range/batch.js"; import {CustodyConfig} from "../../../../src/util/dataColumns.js"; -import {config} from "../../../utils/blocksAndData.js"; +import {clock, config} from "../../../utils/blocksAndData.js"; import {expectThrowsLodestarError} from "../../../utils/errors.js"; import {validPeerIdStr} from "../../../utils/peer.js"; @@ -124,13 +124,17 @@ describe("sync / range / batch", async () => { const custodyConfig = new CustodyConfig({config, nodeId}); const peer = validPeerIdStr; + afterEach(() => { + vi.restoreAllMocks(); + }); + describe("getRequests", () => { describe("PreDeneb", () => { let batch: Batch; const startEpoch = config.CAPELLA_FORK_EPOCH + 1; it("should make default pre-deneb requests if no existing blocks are passed", () => { - batch = new Batch(startEpoch, config, custodyConfig); + batch = new Batch(startEpoch, config, clock, custodyConfig); expect(batch.requests.blocksRequest).toEqual({startSlot: batch.startSlot, count: batch.count, step: 1}); expect(batch.requests.blobsRequest).toBeUndefined(); expect(batch.requests.columnsRequest).toBeUndefined(); @@ -143,15 +147,35 @@ describe("sync / range / batch", async () => { let batch: Batch; const startEpoch = config.DENEB_FORK_EPOCH + 1; - beforeEach(() => { - batch = new Batch(startEpoch, config, custodyConfig); + it("should make default ForkDABlobs requests if no existing blocks are passed", () => { + batch = new Batch(startEpoch, config, clock, custodyConfig); + + expect(batch.requests.blocksRequest).toEqual({startSlot: batch.startSlot, count: batch.count, step: 1}); + expect(batch.requests.blobsRequest).toEqual({startSlot: batch.startSlot, count: batch.count}); + expect(batch.requests.columnsRequest).toBeUndefined(); }); - it("should make default ForkDABlobs requests if no existing blocks are passed", () => { + it("should make default ForkDABlobs requests if current epoch is the last in request range", () => { + vi.spyOn(clock, "currentEpoch", "get").mockReturnValue( + startEpoch + config.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS + ); + batch = new Batch(startEpoch, config, clock, custodyConfig); + expect(batch.requests.blocksRequest).toEqual({startSlot: batch.startSlot, count: batch.count, step: 1}); expect(batch.requests.blobsRequest).toEqual({startSlot: batch.startSlot, count: batch.count}); expect(batch.requests.columnsRequest).toBeUndefined(); }); + + it("should not make ForkDABlobs requests if current epoch is ahead of request range", () => { + vi.spyOn(clock, "currentEpoch", "get").mockReturnValue( + startEpoch + config.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS + 1 + ); + batch = new Batch(startEpoch, config, clock, custodyConfig); + + expect(batch.requests.blocksRequest).toEqual({startSlot: batch.startSlot, count: batch.count, step: 1}); + expect(batch.requests.blobsRequest).toBeUndefined(); + expect(batch.requests.columnsRequest).toBeUndefined(); + }); }); describe("ForkDAColumns", () => { @@ -159,10 +183,10 @@ describe("sync / range / batch", async () => { const startEpoch = config.FULU_FORK_EPOCH + 1; beforeEach(() => { - batch = new Batch(startEpoch, config, custodyConfig); + batch = new Batch(startEpoch, config, clock, custodyConfig); }); - it("should make default pre-deneb requests if no existing blocks are passed", () => { + it("should make ForkDAColumns requests if no existing blocks are passed", () => { expect(batch.requests.blocksRequest).toEqual({startSlot: batch.startSlot, count: batch.count, step: 1}); expect(batch.requests.blobsRequest).toBeUndefined(); expect(batch.requests.columnsRequest).toEqual({ @@ -171,11 +195,37 @@ describe("sync / range / batch", async () => { columns: custodyConfig.sampledColumns, }); }); + + it("should make ForkDAColumns requests if current epoch is the last in request range", () => { + vi.spyOn(clock, "currentEpoch", "get").mockReturnValue( + startEpoch + config.MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS + ); + batch = new Batch(startEpoch, config, clock, custodyConfig); + + expect(batch.requests.blocksRequest).toEqual({startSlot: batch.startSlot, count: batch.count, step: 1}); + expect(batch.requests.blobsRequest).toBeUndefined(); + expect(batch.requests.columnsRequest).toEqual({ + startSlot: batch.startSlot, + count: batch.count, + columns: custodyConfig.sampledColumns, + }); + }); + + it("should not make ForkDAColumns if current epoch is the last in request range", () => { + vi.spyOn(clock, "currentEpoch", "get").mockReturnValue( + startEpoch + config.MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS + 1 + ); + batch = new Batch(startEpoch, config, clock, custodyConfig); + + expect(batch.requests.blocksRequest).toEqual({startSlot: batch.startSlot, count: batch.count, step: 1}); + expect(batch.requests.blobsRequest).toBeUndefined(); + expect(batch.requests.columnsRequest).toBeUndefined(); + }); }); it("should not request data pre-deneb", () => { const startEpoch = config.CAPELLA_FORK_EPOCH - 1; - const batch = new Batch(startEpoch, config, custodyConfig); + const batch = new Batch(startEpoch, config, clock, custodyConfig); expect(batch.requests.blocksRequest).toEqual({startSlot: batch.startSlot, count: batch.count, step: 1}); expect(batch.requests.blobsRequest).toBeUndefined(); expect(batch.requests.columnsRequest).toBeUndefined(); @@ -191,7 +241,7 @@ describe("sync / range / batch", async () => { it("should request columns post-fulu", () => { const startEpoch = config.FULU_FORK_EPOCH + 1; - const batch = new Batch(startEpoch, config, custodyConfig); + const batch = new Batch(startEpoch, config, clock, custodyConfig); expect(batch.requests.blocksRequest).toEqual({startSlot: batch.startSlot, count: batch.count, step: 1}); expect(batch.requests.blobsRequest).toBeUndefined(); expect(batch.requests.columnsRequest).toEqual({ @@ -203,7 +253,7 @@ describe("sync / range / batch", async () => { it("should have same start slot and count for blocks and data requests", () => { const startEpoch = config.FULU_FORK_EPOCH + 1; - const batch = new Batch(startEpoch, config, custodyConfig); + const batch = new Batch(startEpoch, config, clock, custodyConfig); expect(batch.requests.blocksRequest?.startSlot).toEqual(batch.requests.columnsRequest?.startSlot); expect(batch.requests.blocksRequest?.count).toEqual(batch.requests.columnsRequest?.count); }); @@ -215,7 +265,7 @@ describe("sync / range / batch", async () => { it("Complete state flow", () => { const startEpoch = 0; - const batch = new Batch(startEpoch, config, custodyConfig); + const batch = new Batch(startEpoch, config, clock, custodyConfig); // Instantion: AwaitingDownload expect(batch.state.status).toBe(BatchStatus.AwaitingDownload); @@ -281,7 +331,7 @@ describe("sync / range / batch", async () => { it("Should throw on inconsistent state - downloadingSuccess", () => { const startEpoch = 0; - const batch = new Batch(startEpoch, config, custodyConfig); + const batch = new Batch(startEpoch, config, clock, custodyConfig); expectThrowsLodestarError( () => batch.downloadingSuccess(peer, []), @@ -296,7 +346,7 @@ describe("sync / range / batch", async () => { it("Should throw on inconsistent state - startProcessing", () => { const startEpoch = 0; - const batch = new Batch(startEpoch, config, custodyConfig); + const batch = new Batch(startEpoch, config, clock, custodyConfig); expectThrowsLodestarError( () => batch.startProcessing(), @@ -311,7 +361,7 @@ describe("sync / range / batch", async () => { it("Should throw on inconsistent state - processingSuccess", () => { const startEpoch = 0; - const batch = new Batch(startEpoch, config, custodyConfig); + const batch = new Batch(startEpoch, config, clock, custodyConfig); expectThrowsLodestarError( () => batch.processingSuccess(), diff --git a/packages/beacon-node/test/unit/sync/range/chain.test.ts b/packages/beacon-node/test/unit/sync/range/chain.test.ts index 9053161d43c9..76b07e360aea 100644 --- a/packages/beacon-node/test/unit/sync/range/chain.test.ts +++ b/packages/beacon-node/test/unit/sync/range/chain.test.ts @@ -13,6 +13,7 @@ import {CustodyConfig} from "../../../../src/util/dataColumns.js"; import {linspace} from "../../../../src/util/numpy.js"; import {testLogger} from "../../../utils/logger.js"; import {validPeerIdStr} from "../../../utils/peer.js"; +import {Clock} from "../../../../src/util/clock.js"; describe("sync / range / chain", () => { const testCases: { @@ -124,6 +125,7 @@ describe("sync / range / chain", () => { await new Promise((resolve, reject) => { const onEnd: SyncChainFns["onEnd"] = (err) => (err ? reject(err) : resolve()); + const clock = new Clock({config, genesisTime: 0, signal: new AbortController().signal}); const initialSync = new SyncChain( startEpoch, target, @@ -136,7 +138,7 @@ describe("sync / range / chain", () => { pruneBlockInputs, onEnd, }), - {config, logger, custodyConfig, metrics: null} + {config, logger, clock, custodyConfig, metrics: null} ); const peers = [peer]; @@ -178,6 +180,7 @@ describe("sync / range / chain", () => { await new Promise((resolve, reject) => { const onEnd: SyncChainFns["onEnd"] = (err) => (err ? reject(err) : resolve()); + const clock = new Clock({config, genesisTime: 0, signal: new AbortController().signal}); const initialSync = new SyncChain( startEpoch, target, @@ -190,7 +193,7 @@ describe("sync / range / chain", () => { getConnectedPeerSyncMeta, onEnd, }), - {config, logger, custodyConfig, metrics: null} + {config, logger, clock, custodyConfig, metrics: null} ); // Add peers after some time diff --git a/packages/beacon-node/test/unit/sync/range/utils/batches.test.ts b/packages/beacon-node/test/unit/sync/range/utils/batches.test.ts index b4bfe1b03b1c..66c484998bd7 100644 --- a/packages/beacon-node/test/unit/sync/range/utils/batches.test.ts +++ b/packages/beacon-node/test/unit/sync/range/utils/batches.test.ts @@ -11,6 +11,7 @@ import { } from "../../../../../src/sync/range/utils/batches.js"; import {CustodyConfig} from "../../../../../src/util/dataColumns.js"; import {validPeerIdStr} from "../../../../utils/peer.js"; +import {clock} from "../../../../utils/blocksAndData.js"; describe("sync / range / batches", () => { const peer = validPeerIdStr; @@ -221,7 +222,7 @@ describe("sync / range / batches", () => { }); function createBatch(status: BatchStatus, startEpoch = 0): Batch { - const batch = new Batch(startEpoch, config, new CustodyConfig({config, nodeId: Buffer.alloc(32)})); + const batch = new Batch(startEpoch, config, clock, new CustodyConfig({config, nodeId: Buffer.alloc(32)})); if (status === BatchStatus.AwaitingDownload) return batch; diff --git a/packages/beacon-node/test/unit/sync/range/utils/peerBalancer.test.ts b/packages/beacon-node/test/unit/sync/range/utils/peerBalancer.test.ts index 8c0301e1cbe6..08306f53ab42 100644 --- a/packages/beacon-node/test/unit/sync/range/utils/peerBalancer.test.ts +++ b/packages/beacon-node/test/unit/sync/range/utils/peerBalancer.test.ts @@ -13,6 +13,7 @@ import {RangeSyncType} from "../../../../../src/sync/utils/remoteSyncType.js"; import {CustodyConfig} from "../../../../../src/util/dataColumns.js"; import {PeerIdStr} from "../../../../../src/util/peerId.js"; import {getRandPeerSyncMeta} from "../../../../utils/peer.js"; +import {clock} from "../../../../utils/blocksAndData.js"; describe("sync / range / peerBalancer", () => { const custodyConfig = {sampledColumns: [0, 1, 2, 3]} as CustodyConfig; @@ -144,8 +145,8 @@ describe("sync / range / peerBalancer", () => { ? createChainForkConfig({...chainConfig, FULU_FORK_EPOCH: 0}) : createChainForkConfig(chainConfig); - const batch0 = new Batch(1, config, custodyConfig); - const batch1 = new Batch(2, config, custodyConfig); + const batch0 = new Batch(1, config, clock, custodyConfig); + const batch1 = new Batch(2, config, clock, custodyConfig); // Batch zero has a failedDownloadAttempt with peer1 batch0.startDownloading(peer1.peerId); @@ -167,7 +168,7 @@ describe("sync / range / peerBalancer", () => { it("should not retry the batch with a not as up-to-date peer", async () => { const config = createChainForkConfig({...chainConfig, FULU_FORK_EPOCH: 0}); - const batch0 = new Batch(1, config, custodyConfig); + const batch0 = new Batch(1, config, clock, custodyConfig); const blocksRequest = batch0.requests.blocksRequest as {startSlot: number; count: number}; // Batch zero has a failedDownloadAttempt with peer1 batch0.startDownloading(peer1.peerId); @@ -304,13 +305,13 @@ describe("sync / range / peerBalancer", () => { ? createChainForkConfig({...chainConfig, FULU_FORK_EPOCH: 0}) : createChainForkConfig(chainConfig); - const batch0 = new Batch(1, config, custodyConfig); - const batch1 = new Batch(2, config, custodyConfig); + const batch0 = new Batch(1, config, clock, custodyConfig); + const batch1 = new Batch(2, config, clock, custodyConfig); // peer1 and peer2 are busy downloading batch0.startDownloading(peer1.peerId); batch1.startDownloading(peer2.peerId); - const newBatch = new Batch(3, config, custodyConfig); + const newBatch = new Batch(3, config, clock, custodyConfig); const peerBalancer = new ChainPeersBalancer(peerInfos, [batch0, batch1], custodyConfig, RangeSyncType.Head); const idlePeer = peerBalancer.idlePeerForBatch(newBatch); expect(idlePeer?.peerId).toBe(expected); diff --git a/packages/beacon-node/test/utils/blocksAndData.ts b/packages/beacon-node/test/utils/blocksAndData.ts index c1e2463b60bb..fa90e0a25aea 100644 --- a/packages/beacon-node/test/utils/blocksAndData.ts +++ b/packages/beacon-node/test/utils/blocksAndData.ts @@ -8,6 +8,7 @@ import { ForkPostDeneb, ForkPostFulu, NUMBER_OF_COLUMNS, + SLOTS_PER_EPOCH, isForkPostDeneb, isForkPostFulu, } from "@lodestar/params"; @@ -20,6 +21,7 @@ import {getBlobSidecars, kzgCommitmentToVersionedHash} from "../../src/util/blob import {CustodyConfig, computePostFuluKzgCommitmentsInclusionProof} from "../../src/util/dataColumns.js"; import {kzg} from "../../src/util/kzg.js"; import {ROOT_SIZE} from "../../src/util/sszBytes.js"; +import {Clock} from "../../src/util/clock.js"; export const CAPELLA_FORK_EPOCH = 0; export const DENEB_FORK_EPOCH = 10; @@ -34,6 +36,12 @@ export const config = createChainForkConfig({ FULU_FORK_EPOCH, GLOAS_FORK_EPOCH, }); +export const clock = new Clock({ + config, + // For our testing we want the clock to be at head of the latest fork + genesisTime: Date.now() / 1000 - SLOTS_PER_EPOCH * GLOAS_FORK_EPOCH * config.SECONDS_PER_SLOT, + signal: new AbortController().signal, +}); export const privateKey = await generateKeyPair("secp256k1"); export const nodeId = computeNodeIdFromPrivateKey(privateKey); export const custodyConfig = new CustodyConfig({config, nodeId}); From 70d969e1765730e38cf4913d34e7c0ba6ae9fa59 Mon Sep 17 00:00:00 2001 From: Cayman Date: Thu, 11 Sep 2025 14:58:40 -0400 Subject: [PATCH 157/173] feat: add gossip data column sidecar verification skip (#8382) **Motivation** - investigating the block input refactor **Description** - Add possibility of skipping processing if the column already exists (will monitor metrics to see if this happens) --- .../beacon-node/src/metrics/metrics/beacon.ts | 4 ++++ .../src/network/processor/gossipHandlers.ts | 19 ++++++++++++++++--- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/packages/beacon-node/src/metrics/metrics/beacon.ts b/packages/beacon-node/src/metrics/metrics/beacon.ts index 00aaa7f75680..b3d7b58ddeb0 100644 --- a/packages/beacon-node/src/metrics/metrics/beacon.ts +++ b/packages/beacon-node/src/metrics/metrics/beacon.ts @@ -293,6 +293,10 @@ export function createBeaconMetrics(register: RegistryMetricCreator) { name: "beacon_data_column_sidecar_processing_requests_total", help: "Number of data column sidecars submitted for processing", }), + dataColumnSidecarProcessingSkip: register.counter({ + name: "beacon_data_column_sidecar_processing_skip_total", + help: "Number of data column sidecars with processing skipped for gossip", + }), dataColumnSidecarProcessingSuccesses: register.counter({ name: "beacon_data_column_sidecar_processing_successes_total", help: "Number of data column sidecars verified for gossip", diff --git a/packages/beacon-node/src/network/processor/gossipHandlers.ts b/packages/beacon-node/src/network/processor/gossipHandlers.ts index 5e2aed1aeb17..3abf68688a29 100644 --- a/packages/beacon-node/src/network/processor/gossipHandlers.ts +++ b/packages/beacon-node/src/network/processor/gossipHandlers.ts @@ -22,7 +22,7 @@ import { sszTypesFor, } from "@lodestar/types"; import {LogLevel, Logger, prettyBytes, toHex, toRootHex} from "@lodestar/utils"; -import {BlockInput, BlockInputSource, IBlockInput} from "../../chain/blocks/blockInput/index.js"; +import {BlockInput, BlockInputSource, IBlockInput, isBlockInputColumns} from "../../chain/blocks/blockInput/index.js"; import {BlobSidecarValidation} from "../../chain/blocks/types.js"; import {ChainEvent} from "../../chain/emitter.js"; import { @@ -284,12 +284,25 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand seenTimestampSec: number ): Promise { metrics?.peerDas.dataColumnSidecarProcessingRequests.inc(); - const verificationTimer = metrics?.peerDas.dataColumnSidecarGossipVerificationTime.startTimer(); - const dataColumnBlockHeader = dataColumnSidecar.signedBlockHeader.message; const slot = dataColumnBlockHeader.slot; const blockRootHex = toRootHex(ssz.phase0.BeaconBlockHeader.hashTreeRoot(dataColumnBlockHeader)); + // first check if we should even process this column (we may have already processed it via getBlobsV2) + { + const blockInput = chain.seenBlockInputCache.get(blockRootHex); + if (blockInput && isBlockInputColumns(blockInput) && blockInput.hasColumn(dataColumnSidecar.index)) { + metrics?.peerDas.dataColumnSidecarProcessingSkip.inc(); + logger.debug("Already have column sidecar, skipping processing", { + ...blockInput.getLogMeta(), + index: dataColumnSidecar.index, + }); + return blockInput; + } + } + + const verificationTimer = metrics?.peerDas.dataColumnSidecarGossipVerificationTime.startTimer(); + const delaySec = chain.clock.secFromSlot(slot, seenTimestampSec); const recvToValLatency = Date.now() / 1000 - seenTimestampSec; From 0dc23bf4f19149d1e34ec7a83fe5da661ddb6eeb Mon Sep 17 00:00:00 2001 From: Cayman Date: Thu, 11 Sep 2025 16:54:46 -0400 Subject: [PATCH 158/173] feat: add ColumnReconstructionTracker (#8383) **Motivation** - investigation of block input metrics **Description** - Add ColumnReconstructionTracker (to avoid duplicate and multiple in-flight calls) - Add delay before reconstruction (configurable via file constants) - Publish all newly seen columns from reconstruction - Track in metrics when reconstruction is skipped due to having all columns --- .../src/chain/ColumnReconstructionTracker.ts | 72 +++++++++++++++++++ packages/beacon-node/src/chain/chain.ts | 8 +++ packages/beacon-node/src/chain/interface.ts | 2 + .../src/metrics/metrics/lodestar.ts | 5 -- .../src/network/processor/gossipHandlers.ts | 37 ++++------ packages/beacon-node/src/util/dataColumns.ts | 49 ++++++++----- 6 files changed, 124 insertions(+), 49 deletions(-) create mode 100644 packages/beacon-node/src/chain/ColumnReconstructionTracker.ts diff --git a/packages/beacon-node/src/chain/ColumnReconstructionTracker.ts b/packages/beacon-node/src/chain/ColumnReconstructionTracker.ts new file mode 100644 index 000000000000..88e7d3508514 --- /dev/null +++ b/packages/beacon-node/src/chain/ColumnReconstructionTracker.ts @@ -0,0 +1,72 @@ +import {Logger, sleep} from "@lodestar/utils"; +import {ChainEventEmitter} from "./emitter.js"; +import {Metrics} from "../metrics/metrics.js"; +import {ChainForkConfig} from "@lodestar/config"; +import {BlockInputColumns} from "./blocks/blockInput/index.js"; +import {recoverDataColumnSidecars} from "../util/dataColumns.js"; + +/** + * Minimum time to wait before attempting reconstruction + */ +const RECONSTRUCTION_DELAY_MIN_MS = 800; + +/** + * Maximum time to wait before attempting reconstruction + */ +const RECONSTRUCTION_DELAY_MAX_MS = 1200; + +export type ColumnReconstructionTrackerInit = { + logger: Logger; + emitter: ChainEventEmitter; + metrics: Metrics | null; + config: ChainForkConfig; +}; + +/** + * Tracks column reconstruction attempts to avoid duplicate and multiple in-flight calls + */ +export class ColumnReconstructionTracker { + logger: Logger; + emitter: ChainEventEmitter; + metrics: Metrics | null; + config: ChainForkConfig; + + /** + * Track last attempted block root + * + * This is sufficient to avoid duplicate calls since we only call this + * function when we see a new data column sidecar from gossip. + */ + lastBlockRootHex: string | null = null; + /** Track if a reconstruction attempt is in-flight */ + running = false; + + constructor(init: ColumnReconstructionTrackerInit) { + this.logger = init.logger; + this.emitter = init.emitter; + this.metrics = init.metrics; + this.config = init.config; + } + + triggerColumnReconstruction(blockInput: BlockInputColumns): void { + if (this.running) { + return; + } + + if (this.lastBlockRootHex === blockInput.blockRootHex) { + return; + } + + // We don't care about the outcome of this call, + // just that it has been triggered for this block root. + this.running = true; + this.lastBlockRootHex = blockInput.blockRootHex; + const delay = + RECONSTRUCTION_DELAY_MIN_MS + Math.random() * (RECONSTRUCTION_DELAY_MAX_MS - RECONSTRUCTION_DELAY_MIN_MS); + sleep(delay).then(() => { + recoverDataColumnSidecars(blockInput, this.emitter, this.metrics).finally(() => { + this.running = false; + }); + }); + } +} diff --git a/packages/beacon-node/src/chain/chain.ts b/packages/beacon-node/src/chain/chain.ts index 24fef2a54b3d..3d8f0371b6c8 100644 --- a/packages/beacon-node/src/chain/chain.ts +++ b/packages/beacon-node/src/chain/chain.ts @@ -101,6 +101,7 @@ import {InMemoryCheckpointStateCache} from "./stateCache/inMemoryCheckpointsCach import {PersistentCheckpointStateCache} from "./stateCache/persistentCheckpointsCache.js"; import {ValidatorMonitor} from "./validatorMonitor.js"; import {GetBlobsTracker} from "./GetBlobsTracker.js"; +import {ColumnReconstructionTracker} from "./ColumnReconstructionTracker.js"; /** * The maximum number of cached produced results to keep in memory. @@ -175,6 +176,7 @@ export class BeaconChain implements IBeaconChain { readonly serializedCache: SerializedCache; readonly getBlobsTracker: GetBlobsTracker; + readonly columnReconstructionTracker: ColumnReconstructionTracker; readonly opts: IChainOptions; @@ -404,6 +406,12 @@ export class BeaconChain implements IBeaconChain { metrics, config, }); + this.columnReconstructionTracker = new ColumnReconstructionTracker({ + logger, + emitter, + metrics, + config, + }); this.archiveStore = new ArchiveStore( {db, chain: this, logger: logger as LoggerNode, metrics}, diff --git a/packages/beacon-node/src/chain/interface.ts b/packages/beacon-node/src/chain/interface.ts index 9b8c9e239401..dfbf64e674b1 100644 --- a/packages/beacon-node/src/chain/interface.ts +++ b/packages/beacon-node/src/chain/interface.ts @@ -64,6 +64,7 @@ import {SeenBlockInput} from "./seenCache/seenGossipBlockInput.js"; import {ShufflingCache} from "./shufflingCache.js"; import {ValidatorMonitor} from "./validatorMonitor.js"; import {GetBlobsTracker} from "./GetBlobsTracker.js"; +import {ColumnReconstructionTracker} from "./ColumnReconstructionTracker.js"; export {BlockType, type AssembledBlockType}; export {type ProposerPreparationData}; @@ -142,6 +143,7 @@ export interface IBeaconChain { readonly serializedCache: SerializedCache; readonly getBlobsTracker: GetBlobsTracker; + readonly columnReconstructionTracker: ColumnReconstructionTracker; readonly opts: IChainOptions; diff --git a/packages/beacon-node/src/metrics/metrics/lodestar.ts b/packages/beacon-node/src/metrics/metrics/lodestar.ts index d199e3f74d40..dfddc517b510 100644 --- a/packages/beacon-node/src/metrics/metrics/lodestar.ts +++ b/packages/beacon-node/src/metrics/metrics/lodestar.ts @@ -769,11 +769,6 @@ export function createLodestarMetrics( }), }, recoverDataColumnSidecars: { - elapsedTimeTillReconstructed: register.histogram({ - name: "lodestar_data_column_sidecar_elapsed_time_till_reconstructed_seconds", - help: "Time elapsed between block slot time and the time data column sidecar reconstructed", - buckets: [2, 4, 6, 8, 10, 12], - }), recoverTime: register.histogram({ name: "lodestar_recover_data_column_sidecar_recover_time_seconds", help: "Time elapsed to recover data column sidecar", diff --git a/packages/beacon-node/src/network/processor/gossipHandlers.ts b/packages/beacon-node/src/network/processor/gossipHandlers.ts index 3abf68688a29..92e1c9e57c2e 100644 --- a/packages/beacon-node/src/network/processor/gossipHandlers.ts +++ b/packages/beacon-node/src/network/processor/gossipHandlers.ts @@ -22,7 +22,13 @@ import { sszTypesFor, } from "@lodestar/types"; import {LogLevel, Logger, prettyBytes, toHex, toRootHex} from "@lodestar/utils"; -import {BlockInput, BlockInputSource, IBlockInput, isBlockInputColumns} from "../../chain/blocks/blockInput/index.js"; +import { + BlockInput, + BlockInputColumns, + BlockInputSource, + IBlockInput, + isBlockInputColumns, +} from "../../chain/blocks/blockInput/index.js"; import {BlobSidecarValidation} from "../../chain/blocks/types.js"; import {ChainEvent} from "../../chain/emitter.js"; import { @@ -73,8 +79,6 @@ import {sszDeserialize} from "../gossip/topic.js"; import {INetwork} from "../interface.js"; import {PeerAction} from "../peers/index.js"; import {AggregatorTracker} from "./aggregatorTracker.js"; -import {DataColumnReconstructionError, recoverDataColumnSidecars} from "../../util/dataColumns.js"; -import {callInNextEventLoop} from "../../util/eventLoop.js"; /** * Gossip handler options as part of network options @@ -282,7 +286,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand gossipSubnet: SubnetID, peerIdStr: string, seenTimestampSec: number - ): Promise { + ): Promise { metrics?.peerDas.dataColumnSidecarProcessingRequests.inc(); const dataColumnBlockHeader = dataColumnSidecar.signedBlockHeader.message; const slot = dataColumnBlockHeader.slot; @@ -316,27 +320,6 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand peerIdStr, }); - // only triggers reconstruction on the 64th column to deduplicate the expensive request - if (blockInput.columnCount === NUMBER_OF_COLUMNS / 2) { - // do not await to block gossip handler - callInNextEventLoop(() => { - recoverDataColumnSidecars(blockInput, chain.clock, metrics).catch((err) => { - if (err instanceof DataColumnReconstructionError) { - metrics?.recoverDataColumnSidecars.reconstructionResult.inc({ - result: err.type.code, - }); - } - logger.debug( - "Error recovering column sidecars", - { - blockRoot: blockRootHex, - }, - err - ); - }); - }); - } - const recvToValidation = Date.now() / 1000 - seenTimestampSec; const validationTime = recvToValidation - recvToValLatency; @@ -578,6 +561,10 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand }); // immediately attempt fetch of data columns from execution engine chain.getBlobsTracker.triggerGetBlobs(blockInput); + // if we've received at least half of the columns, trigger reconstruction of the rest + if (blockInput.columnCount >= NUMBER_OF_COLUMNS / 2) { + chain.columnReconstructionTracker.triggerColumnReconstruction(blockInput); + } } }, diff --git a/packages/beacon-node/src/util/dataColumns.ts b/packages/beacon-node/src/util/dataColumns.ts index 4e2da079801f..f35dd7f4d444 100644 --- a/packages/beacon-node/src/util/dataColumns.ts +++ b/packages/beacon-node/src/util/dataColumns.ts @@ -20,8 +20,8 @@ import {kzg} from "./kzg.js"; import {dataColumnMatrixRecovery} from "./blobs.js"; import {BlockInputColumns} from "../chain/blocks/blockInput/blockInput.js"; import {Metrics} from "../metrics/metrics.js"; -import {IClock} from "./clock.js"; import {BlockInputSource} from "../chain/blocks/blockInput/types.js"; +import {ChainEvent, ChainEventEmitter} from "../chain/emitter.js"; export enum RecoverResult { // the recover is not attempted because we have less than `NUMBER_OF_COLUMNS / 2` columns @@ -342,19 +342,25 @@ export function getDataColumnSidecarsFromColumnSidecar( */ export async function recoverDataColumnSidecars( blockInput: BlockInputColumns, - clock: IClock, + emitter: ChainEventEmitter, metrics: Metrics | null ): Promise { const existingColumns = blockInput.getAllColumns(); const columnCount = existingColumns.length; if (columnCount >= NUMBER_OF_COLUMNS) { // We have all columns - throw new DataColumnReconstructionError({code: DataColumnReconstructionCode.NotAttemptedAlreadyFull}); + metrics?.recoverDataColumnSidecars.reconstructionResult.inc({ + result: DataColumnReconstructionCode.NotAttemptedAlreadyFull, + }); + return; } if (columnCount < NUMBER_OF_COLUMNS / 2) { // We don't have enough columns to recover - throw new DataColumnReconstructionError({code: DataColumnReconstructionCode.NotAttemptedHaveLessThanHalf}); + metrics?.recoverDataColumnSidecars.reconstructionResult.inc({ + result: DataColumnReconstructionCode.NotAttemptedHaveLessThanHalf, + }); + return; } metrics?.recoverDataColumnSidecars.custodyBeforeReconstruction.set(columnCount); @@ -369,28 +375,32 @@ export async function recoverDataColumnSidecars( const timer = metrics?.recoverDataColumnSidecars.recoverTime.startTimer(); // if this function throws, we catch at the consumer side - const fullSidecars = await dataColumnMatrixRecovery(partialSidecars); + const fullSidecars = await dataColumnMatrixRecovery(partialSidecars).catch(() => null); timer?.(); if (fullSidecars == null) { - throw new DataColumnReconstructionError( - {code: DataColumnReconstructionCode.ReconstructionFailed}, - "No sidecars rebuilt via dataColumnMatrixRecovery" - ); - } - - const firstDataColumn = existingColumns.at(0); - if (firstDataColumn) { - const slot = firstDataColumn.signedBlockHeader.message.slot; - const secFromSlot = clock.secFromSlot(slot); - metrics?.recoverDataColumnSidecars.elapsedTimeTillReconstructed.observe(secFromSlot); + metrics?.recoverDataColumnSidecars.reconstructionResult.inc({ + result: DataColumnReconstructionCode.ReconstructionFailed, + }); + return; } if (blockInput.getAllColumns().length === NUMBER_OF_COLUMNS) { // either gossip or getBlobsV2 resolved availability while we were recovering - throw new DataColumnReconstructionError({code: DataColumnReconstructionCode.ReceivedAllDuringReconstruction}); + metrics?.recoverDataColumnSidecars.reconstructionResult.inc({ + result: DataColumnReconstructionCode.ReceivedAllDuringReconstruction, + }); + return; } - // We successfully recovered the data columns, update the cache + // Once the node obtains a column through reconstruction, + // the node MUST expose the new column as if it had received it over the network. + // If the node is subscribed to the subnet corresponding to the column, + // it MUST send the reconstructed DataColumnSidecar to its topic mesh neighbors. + // If instead the node is not subscribed to the corresponding subnet, + // it SHOULD still expose the availability of the DataColumnSidecar as part of the gossip emission process. + // After exposing the reconstructed DataColumnSidecar to the network, + // the node MAY delete the DataColumnSidecar if it is not part of the node's custody requirement. + const sidecarsToPublish = []; for (const columnSidecar of fullSidecars) { if (!blockInput.hasColumn(columnSidecar.index)) { blockInput.addColumn({ @@ -399,8 +409,10 @@ export async function recoverDataColumnSidecars( seenTimestampSec: Date.now(), source: BlockInputSource.recovery, }); + sidecarsToPublish.push(columnSidecar); } } + emitter.emit(ChainEvent.publishDataColumns, sidecarsToPublish); metrics?.recoverDataColumnSidecars.reconstructionResult.inc({result: DataColumnReconstructionCode.Success}); } @@ -415,7 +427,6 @@ export enum DataColumnReconstructionCode { type DataColumnReconstructionErrorType = { code: - | DataColumnReconstructionCode.NotAttemptedAlreadyFull | DataColumnReconstructionCode.NotAttemptedHaveLessThanHalf | DataColumnReconstructionCode.ReceivedAllDuringReconstruction | DataColumnReconstructionCode.ReconstructionFailed; From dbe9b746f9ac63e1441c284f678758fd8ec6a309 Mon Sep 17 00:00:00 2001 From: Cayman Date: Thu, 11 Sep 2025 16:58:56 -0400 Subject: [PATCH 159/173] fix: wait until next event loop for getBlobsV2 (#8384) **Motivation** - investigation of block-input branch **Description** - simple change --- .../beacon-node/src/chain/GetBlobsTracker.ts | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/packages/beacon-node/src/chain/GetBlobsTracker.ts b/packages/beacon-node/src/chain/GetBlobsTracker.ts index cae981000f88..db3b7454c191 100644 --- a/packages/beacon-node/src/chain/GetBlobsTracker.ts +++ b/packages/beacon-node/src/chain/GetBlobsTracker.ts @@ -5,6 +5,7 @@ import {Metrics} from "../metrics/metrics.js"; import {ChainForkConfig} from "@lodestar/config"; import {IBlockInput} from "./blocks/blockInput/index.js"; import {getDataColumnSidecarsFromExecution} from "../util/execution.js"; +import {callInNextEventLoop} from "../util/eventLoop.js"; export type GetBlobsTrackerInit = { logger: Logger; @@ -58,15 +59,17 @@ export class GetBlobsTracker { // just that it has been triggered for this block root. this.running = true; this.lastBlockRootHex = blockInput.blockRootHex; - getDataColumnSidecarsFromExecution( - this.config, - this.executionEngine, - this.emitter, - blockInput, - this.metrics, - this.blobAndProofBuffers - ).finally(() => { - this.running = false; + callInNextEventLoop(() => { + getDataColumnSidecarsFromExecution( + this.config, + this.executionEngine, + this.emitter, + blockInput, + this.metrics, + this.blobAndProofBuffers + ).finally(() => { + this.running = false; + }); }); } } From f8e36f566887f2585437f7d021df868c2dc2e210 Mon Sep 17 00:00:00 2001 From: twoeths <10568965+twoeths@users.noreply.github.com> Date: Fri, 12 Sep 2025 20:36:04 +0700 Subject: [PATCH 160/173] feat: relaxed data column response handler (#8387) **Motivation** - the DownloadByRoot and DownloadByRange are quite strict: all DataColumnSidecars have to present, and no extra DataColumnSidecars received even they may pass validation - the reality is that node should not expect that perfection, otherwise we'll never be able to fulfill downloading a BlockInput, see https://github.com/ChainSafe/lodestar/issues/8375#issuecomment-3283482077 - also we may evict downloaded good blocks which cause performance issue **Description** - accept DataColumnSidecars response with missing columns or extra columns - we only have functions to either throw error or return result, introduce a new `WarnResult` which is a result with some warns (typed as Error) ```typescript export type WarnResult = {result: T; warn: E[] | null}; ``` - new `warn` metrics in DownloadByRoot and DownloadByRange to track - handle respective warned result in `BlockInputSync` and `SyncChain`: - log - track in the new metrics --------- Co-authored-by: Tuyen Nguyen Co-authored-by: Cayman --- .../src/metrics/metrics/lodestar.ts | 10 ++ packages/beacon-node/src/sync/range/chain.ts | 30 +++- packages/beacon-node/src/sync/range/range.ts | 6 +- packages/beacon-node/src/sync/unknownBlock.ts | 34 +++-- .../src/sync/utils/downloadByRange.ts | 118 +++++++++------ .../src/sync/utils/downloadByRoot.ts | 143 ++++++++++++------ packages/beacon-node/src/util/wrapError.ts | 5 + 7 files changed, 231 insertions(+), 115 deletions(-) diff --git a/packages/beacon-node/src/metrics/metrics/lodestar.ts b/packages/beacon-node/src/metrics/metrics/lodestar.ts index dfddc517b510..818a9c899354 100644 --- a/packages/beacon-node/src/metrics/metrics/lodestar.ts +++ b/packages/beacon-node/src/metrics/metrics/lodestar.ts @@ -506,6 +506,11 @@ export function createLodestarMetrics( help: "Total number of errored downloadByRange calls", labelNames: ["code", "client"], }), + warn: register.gauge<{code: string; client: string}>({ + name: "lodestar_sync_range_download_by_range_warn_total", + help: "Total number of downloadByRange call warnings", + labelNames: ["code", "client"], + }), }, }, @@ -573,6 +578,11 @@ export function createLodestarMetrics( help: "Total number of errored downloadByRoot calls", labelNames: ["code", "client"], }), + warn: register.gauge<{code: string; client: string}>({ + name: "lodestar_sync_unknown_block_download_by_root_warn_total", + help: "Total number of downloadByRoot call warnings", + labelNames: ["code", "client"], + }), }, peerBalancer: { peersMetaCount: register.gauge({ diff --git a/packages/beacon-node/src/sync/range/chain.ts b/packages/beacon-node/src/sync/range/chain.ts index 76fbe6859b3a..48fdee091bec 100644 --- a/packages/beacon-node/src/sync/range/chain.ts +++ b/packages/beacon-node/src/sync/range/chain.ts @@ -12,9 +12,9 @@ import {PeerSyncMeta} from "../../network/peers/peersData.js"; import {CustodyConfig} from "../../util/dataColumns.js"; import {ItTrigger} from "../../util/itTrigger.js"; import {PeerIdStr} from "../../util/peerId.js"; -import {wrapError} from "../../util/wrapError.js"; +import {WarnResult, wrapError} from "../../util/wrapError.js"; import {BATCH_BUFFER_SIZE, EPOCHS_PER_BATCH, MAX_LOOK_AHEAD_EPOCHS} from "../constants.js"; -import {DownloadByRangeErrorCode} from "../utils/downloadByRange.js"; +import {DownloadByRangeError, DownloadByRangeErrorCode} from "../utils/downloadByRange.js"; import {RangeSyncType} from "../utils/remoteSyncType.js"; import {Batch, BatchError, BatchErrorCode, BatchMetadata, BatchStatus} from "./batch.js"; import { @@ -46,7 +46,11 @@ export type SyncChainFns = { */ processChainSegment: (blocks: IBlockInput[], syncType: RangeSyncType) => Promise; /** Must download blocks, and validate their range */ - downloadByRange: (peer: PeerSyncMeta, batch: Batch, syncType: RangeSyncType) => Promise; + downloadByRange: ( + peer: PeerSyncMeta, + batch: Batch, + syncType: RangeSyncType + ) => Promise>; /** Report peer for negative actions. Decouples from the full network instance */ reportPeer: (peer: PeerIdStr, action: PeerAction, actionName: string) => void; /** Gets current peer custodyColumns and earliestAvailableSlot */ @@ -505,11 +509,29 @@ export class SyncChain { ); batch.downloadingError(peer.peerId); // Throws after MAX_DOWNLOAD_ATTEMPTS } else { + this.logger.verbose("Batch download success", { + id: this.logId, + ...batch.getMetadata(), + peer: prettyPrintPeerIdStr(peer.peerId), + }); this.metrics?.syncRange.downloadByRange.success.inc(); - const downloadSuccessOutput = batch.downloadingSuccess(peer.peerId, res.result); + const {warnings, result} = res.result; + const downloadSuccessOutput = batch.downloadingSuccess(peer.peerId, result); const logMeta: Record = { blockCount: downloadSuccessOutput.blocks.length, }; + + if (warnings && warnings.length > 0) { + for (const warning of warnings) { + this.metrics?.syncRange.downloadByRange.warn.inc({client: peer.client, code: warning.type.code}); + this.logger.debug( + "Batch downloaded with warning", + {id: this.logId, epoch: batch.startEpoch, ...logMeta, peer: prettyPrintPeerIdStr(peer.peerId)}, + warning + ); + } + } + for (const block of downloadSuccessOutput.blocks) { if (isBlockInputBlobs(block)) { const blockLogMeta = block.getLogMeta(); diff --git a/packages/beacon-node/src/sync/range/range.ts b/packages/beacon-node/src/sync/range/range.ts index 270db6e8924b..f6bbb28e09a9 100644 --- a/packages/beacon-node/src/sync/range/range.ts +++ b/packages/beacon-node/src/sync/range/range.ts @@ -202,7 +202,7 @@ export class RangeSync extends (EventEmitter as {new (): RangeSyncEmitter}) { private downloadByRange: SyncChainFns["downloadByRange"] = async (peer, batch) => { const batchBlocks = batch.getBlocks(); - const responses = await downloadByRange({ + const {result, warnings} = await downloadByRange({ config: this.config, network: this.network, logger: this.logger, @@ -213,10 +213,10 @@ export class RangeSync extends (EventEmitter as {new (): RangeSyncEmitter}) { const cached = cacheByRangeResponses({ cache: this.chain.seenBlockInputCache, peerIdStr: peer.peerId, - responses, + responses: result, batchBlocks, }); - return cached; + return {result: cached, warnings}; }; private pruneBlockInputs: SyncChainFns["pruneBlockInputs"] = (blocks: IBlockInput[]) => { diff --git a/packages/beacon-node/src/sync/unknownBlock.ts b/packages/beacon-node/src/sync/unknownBlock.ts index 21f1c97d6800..ffa01206b7e1 100644 --- a/packages/beacon-node/src/sync/unknownBlock.ts +++ b/packages/beacon-node/src/sync/unknownBlock.ts @@ -307,9 +307,9 @@ export class BlockInputSync { const rootHex = getBlockInputSyncCacheItemRootHex(block); const logCtx = { - blockRoot: prettyBytes(rootHex), - pendingBlocks: this.pendingBlocks.size, slot: getBlockInputSyncCacheItemSlot(block), + blockRoot: rootHex, + pendingBlocks: this.pendingBlocks.size, }; this.logger.verbose("BlockInputSync.downloadBlock()", logCtx); @@ -328,16 +328,17 @@ export class BlockInputSync { this.metrics?.blockInputSync.elapsedTimeTillReceived.observe(delaySec); const parentInForkChoice = this.chain.forkChoice.hasBlockHex(pending.blockInput.parentRootHex); - this.logger.verbose("Downloaded unknown block", { - blockRoot: rootHex, - pendingBlocks: this.pendingBlocks.size, + const logCtx2 = { + ...logCtx, + slot: blockSlot, parentInForkChoice, - }); + }; + this.logger.verbose("Downloaded unknown block", logCtx2); if (parentInForkChoice) { // Bingo! Process block. Add to pending blocks anyway for recycle the cache that prevents duplicate processing this.processBlock(pending).catch((e) => { - this.logger.debug("Unexpected error - process newly downloaded block", {}, e); + this.logger.debug("Unexpected error - process newly downloaded block", logCtx2, e); }); } else if (blockSlot <= finalizedSlot) { // the common ancestor of the downloading chain and canonical chain should be at least the finalized slot and @@ -346,9 +347,8 @@ export class BlockInputSync { // \ // parent 1 - parent 2 - ... - unknownParent block this.logger.debug("Downloaded block is before finalized slot", { + ...logCtx2, finalizedSlot, - blockSlot, - blockRoot: pending.blockInput.blockRootHex, }); this.removeAndDownScoreAllDescendants(block); } else { @@ -356,7 +356,7 @@ export class BlockInputSync { } } else { this.metrics?.blockInputSync.downloadedBlocksError.inc(); - this.logger.debug("Ignoring unknown block root after many failed downloads", {blockRoot: rootHex}, res.err); + this.logger.debug("Ignoring unknown block root after many failed downloads", logCtx, res.err); this.removeAndDownScoreAllDescendants(block); } } @@ -510,14 +510,25 @@ export class BlockInputSync { cacheItem.peerIdStrings.add(peerId); try { - cacheItem = await downloadByRoot({ + const downloadResult = await downloadByRoot({ config: this.config, network: this.network, seenCache: this.chain.seenBlockInputCache, peerMeta, cacheItem, }); + cacheItem = downloadResult.result; + const logCtx = {slot: cacheItem.blockInput.slot, rootHex, peerId, peerClient}; + this.logger.verbose("BlockInputSync.fetchBlockInput: successful download", logCtx); this.metrics?.blockInputSync.downloadByRoot.success.inc(); + const warnings = downloadResult.warnings; + if (warnings) { + for (const warning of warnings) { + this.logger.debug("BlockInputSync.fetchBlockInput: downloaded with warning", logCtx, warning); + this.metrics?.blockInputSync.downloadByRoot.warn.inc({code: warning.type.code, client: peerClient}); + } + // TODO: penalize peer? + } } catch (e) { this.logger.debug( "Error downloading in BlockInputSync.fetchBlockInput", @@ -525,6 +536,7 @@ export class BlockInputSync { e as Error ); const downloadByRootMetrics = this.metrics?.blockInputSync.downloadByRoot; + // TODO: penalize peer? if (e instanceof DownloadByRootError) { const errorCode = e.type.code; downloadByRootMetrics?.error.inc({code: errorCode, client: peerClient}); diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts index 0fbf8f5a8d59..e0fad8f63664 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRange.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -1,7 +1,7 @@ import {ChainForkConfig} from "@lodestar/config"; import {ForkPostDeneb, ForkPostFulu} from "@lodestar/params"; import {SignedBeaconBlock, Slot, deneb, fulu, phase0} from "@lodestar/types"; -import {LodestarError, Logger, fromHex, prettyBytes, toRootHex} from "@lodestar/utils"; +import {LodestarError, Logger, fromHex, prettyBytes, prettyPrintIndices, toRootHex} from "@lodestar/utils"; import { BlockInputSource, DAType, @@ -15,6 +15,7 @@ import {validateBlockDataColumnSidecars} from "../../chain/validation/dataColumn import {INetwork} from "../../network/index.js"; import {PeerIdStr} from "../../util/peerId.js"; import {DownloadByRootErrorCode} from "./downloadByRoot.js"; +import {WarnResult} from "../../util/wrapError.js"; export type DownloadByRangeRequests = { blocksRequest?: phase0.BeaconBlocksByRangeRequest; @@ -184,7 +185,7 @@ export async function downloadByRange({ blocksRequest, blobsRequest, columnsRequest, -}: Omit): Promise { +}: Omit): Promise> { let response: DownloadByRangeResponses; try { response = await requestByRange({ @@ -282,7 +283,7 @@ export async function validateResponses({ DownloadByRangeResponses & { config: ChainForkConfig; batchBlocks?: IBlockInput[]; - }): Promise { + }): Promise> { // Blocks are always required for blob/column validation // If a blocksRequest is provided, blocks have just been downloaded // If no blocksRequest is provided, batchBlocks must have been provided from cache @@ -297,6 +298,7 @@ export async function validateResponses({ } const validatedResponses: ValidatedResponses = {}; + let warnings: DownloadByRangeError[] | null = null; if (blocksRequest) { validatedResponses.validatedBlocks = validateBlockByRangeResponse(config, blocksRequest, blocks ?? []); @@ -304,7 +306,7 @@ export async function validateResponses({ const dataRequest = blobsRequest ?? columnsRequest; if (!dataRequest) { - return validatedResponses; + return {result: validatedResponses, warnings: null}; } const dataRequestBlocks = getBlocksForDataValidation( @@ -348,14 +350,16 @@ export async function validateResponses({ ); } - validatedResponses.validatedColumnSidecars = await validateColumnsByRangeResponse( + const validatedColumnSidecarsResult = await validateColumnsByRangeResponse( columnsRequest, dataRequestBlocks, columnSidecars ); + validatedResponses.validatedColumnSidecars = validatedColumnSidecarsResult.result; + warnings = validatedColumnSidecarsResult.warnings; } - return validatedResponses; + return {result: validatedResponses, warnings}; } /** @@ -526,7 +530,7 @@ export async function validateColumnsByRangeResponse( request: fulu.DataColumnSidecarsByRangeRequest, dataRequestBlocks: ValidatedBlock[], columnSidecars: fulu.DataColumnSidecars -): Promise { +): Promise> { // Expected column count considering currently-validated batch blocks const expectedColumnCount = dataRequestBlocks.reduce((acc, {block}) => { return (block as SignedBeaconBlock).message.body.blobKzgCommitments.length > 0 @@ -543,74 +547,84 @@ export async function validateColumnsByRangeResponse( const maxColumnCount = expectedColumnCount + possiblyMissingBlocks * request.columns.length; if (columnSidecars.length > maxColumnCount) { + // this never happens on devnet, so throw error for now throw new DownloadByRangeError( { - code: DownloadByRangeErrorCode.EXTRA_COLUMNS, + code: DownloadByRangeErrorCode.OVER_COLUMNS, max: maxColumnCount, actual: columnSidecars.length, }, "Extra data columns received in DataColumnSidecarsByRange response" ); } - if (columnSidecars.length < expectedColumnCount) { - throw new DownloadByRangeError( - { - code: DownloadByRangeErrorCode.MISSING_COLUMNS, - expected: expectedColumnCount, - actual: columnSidecars.length, - }, - "Missing data columns in DataColumnSidecarsByRange response" - ); - } + const warnings: DownloadByRangeError[] = []; + // no need to check for columnSidecars.length vs expectedColumnCount here, will be checked per-block below + const requestedColumns = new Set(request.columns); const validateSidecarsPromises: Promise[] = []; for (let blockIndex = 0, columnSidecarIndex = 0; blockIndex < dataRequestBlocks.length; blockIndex++) { const {block, blockRoot} = dataRequestBlocks[blockIndex]; + const slot = block.message.slot; + const blockRootHex = toRootHex(blockRoot); const blockKzgCommitments = (block as SignedBeaconBlock).message.body.blobKzgCommitments; const expectedColumns = blockKzgCommitments.length ? request.columns.length : 0; if (expectedColumns === 0) { continue; } - const blockColumnSidecars = columnSidecars.slice(columnSidecarIndex, columnSidecarIndex + expectedColumns); - columnSidecarIndex += expectedColumns; + const blockColumnSidecars: fulu.DataColumnSidecar[] = []; + while (columnSidecarIndex < columnSidecars.length) { + const columnSidecar = columnSidecars[columnSidecarIndex]; + if (columnSidecar.signedBlockHeader.message.slot !== block.message.slot) { + // We've reached columns for the next block + break; + } + blockColumnSidecars.push(columnSidecar); + columnSidecarIndex++; + } - // Validate that all requested columns are present and in order - if (blockColumnSidecars.length !== expectedColumns) { - throw new DownloadByRangeError( - { - code: DownloadByRangeErrorCode.MISSING_COLUMNS, - expected: expectedColumns, - actual: blockColumnSidecars.length, - }, - "Missing data columns in DataColumnSidecarsByRange response" + const returnedColumns = new Set(blockColumnSidecars.map((c) => c.index)); + const missingIndices = request.columns.filter((i) => !returnedColumns.has(i)); + if (missingIndices.length > 0) { + warnings.push( + new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.MISSING_COLUMNS, + slot, + blockRoot: blockRootHex, + missingIndices: prettyPrintIndices(missingIndices), + }, + "Missing data columns in DataColumnSidecarsByRange response" + ) ); } - for (let i = 0; i < blockColumnSidecars.length; i++) { - if (blockColumnSidecars[i].index !== request.columns[i]) { - throw new DownloadByRangeError( + + const extraIndices = [...returnedColumns].filter((i) => !requestedColumns.has(i)); + if (extraIndices.length > 0) { + warnings.push( + new DownloadByRangeError( { - code: DownloadByRangeErrorCode.MISSING_COLUMNS, - expected: expectedColumns, - actual: blockColumnSidecars.length, + code: DownloadByRangeErrorCode.EXTRA_COLUMNS, + slot, + blockRoot: blockRootHex, + invalidIndices: prettyPrintIndices(extraIndices), }, - "Data columns not in order or do not match requested columns in DataColumnSidecarsByRange response" - ); - } + "Data column in not in requested columns in DataColumnSidecarsByRange response" + ) + ); } validateSidecarsPromises.push( - validateBlockDataColumnSidecars( - block.message.slot, + validateBlockDataColumnSidecars(slot, blockRoot, blockKzgCommitments.length, blockColumnSidecars).then(() => ({ blockRoot, - blockKzgCommitments.length, - blockColumnSidecars - ).then(() => ({blockRoot, columnSidecars: blockColumnSidecars})) + columnSidecars: blockColumnSidecars, + })) ); } // Await all sidecar validations in parallel - return Promise.all(validateSidecarsPromises); + const result = await Promise.all(validateSidecarsPromises); + return {result, warnings: warnings.length ? warnings : null}; } /** @@ -696,6 +710,7 @@ export enum DownloadByRangeErrorCode { EXTRA_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_EXTRA_BLOBS", MISSING_COLUMNS = "DOWNLOAD_BY_RANGE_ERROR_MISSING_COLUMNS", + OVER_COLUMNS = "DOWNLOAD_BY_RANGE_ERROR_OVER_COLUMNS", EXTRA_COLUMNS = "DOWNLOAD_BY_RANGE_ERROR_EXTRA_COLUMNS", /** Cached block input type mismatches new data */ @@ -766,14 +781,21 @@ export type DownloadByRangeErrorType = actual: number; } | { - code: DownloadByRangeErrorCode.MISSING_COLUMNS; - expected: number; + code: DownloadByRangeErrorCode.OVER_COLUMNS; + max: number; actual: number; } + | { + code: DownloadByRangeErrorCode.MISSING_COLUMNS; + slot: Slot; + blockRoot: string; + missingIndices: string; + } | { code: DownloadByRangeErrorCode.EXTRA_COLUMNS; - max: number; - actual: number; + slot: Slot; + blockRoot: string; + invalidIndices: string; } | { code: DownloadByRangeErrorCode.MISMATCH_BLOCK_INPUT_TYPE; diff --git a/packages/beacon-node/src/sync/utils/downloadByRoot.ts b/packages/beacon-node/src/sync/utils/downloadByRoot.ts index 25c9f960f29a..c50a28300443 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRoot.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRoot.ts @@ -1,6 +1,6 @@ import {ChainForkConfig} from "@lodestar/config"; import {ForkPostDeneb, ForkPostFulu, ForkPreFulu, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; -import {SignedBeaconBlock, deneb, fulu} from "@lodestar/types"; +import {SignedBeaconBlock, Slot, deneb, fulu} from "@lodestar/types"; import {LodestarError, fromHex, prettyBytes, prettyPrintIndices, toRootHex} from "@lodestar/utils"; import {isBlockInputBlobs, isBlockInputColumns} from "../../chain/blocks/blockInput/blockInput.js"; import {BlobMeta, BlockInputSource, IBlockInput, MissingColumnMeta} from "../../chain/blocks/blockInput/types.js"; @@ -20,6 +20,7 @@ import { } from "../types.js"; import {PeerSyncMeta} from "../../network/peers/peersData.js"; import {PeerIdStr} from "../../util/peerId.js"; +import {WarnResult} from "../../util/wrapError.js"; export type FetchByRootCoreProps = { config: ChainForkConfig; @@ -61,12 +62,15 @@ export async function downloadByRoot({ network, peerMeta, cacheItem, -}: DownloadByRootProps): Promise { +}: DownloadByRootProps): Promise> { const rootHex = getBlockInputSyncCacheItemRootHex(cacheItem); const blockRoot = fromHex(rootHex); const {peerId: peerIdStr} = peerMeta; - const {block, blobSidecars, columnSidecars} = await fetchByRoot({ + const { + result: {block, blobSidecars, columnSidecars}, + warnings, + } = await fetchByRoot({ config, network, cacheItem, @@ -152,11 +156,14 @@ export async function downloadByRoot({ } return { - status, - blockInput, - timeSyncedSec, - timeAddedSec: cacheItem.timeAddedSec, - peerIdStrings: cacheItem.peerIdStrings, + result: { + status, + blockInput, + timeSyncedSec, + timeAddedSec: cacheItem.timeAddedSec, + peerIdStrings: cacheItem.peerIdStrings, + }, + warnings, }; } @@ -166,10 +173,10 @@ export async function fetchByRoot({ peerMeta, blockRoot, cacheItem, -}: FetchByRootProps): Promise { +}: FetchByRootProps): Promise> { let block: SignedBeaconBlock; let blobSidecars: deneb.BlobSidecars | undefined; - let columnSidecars: fulu.DataColumnSidecars | undefined; + let columnSidecarResult: WarnResult | undefined; const {peerId: peerIdStr} = peerMeta; if (isPendingBlockInput(cacheItem)) { @@ -198,7 +205,7 @@ export async function fetchByRoot({ }); } if (isBlockInputColumns(cacheItem.blockInput)) { - columnSidecars = await fetchAndValidateColumns({ + columnSidecarResult = await fetchAndValidateColumns({ config, network, peerMeta, @@ -218,7 +225,7 @@ export async function fetchByRoot({ }); const forkName = config.getForkName(block.message.slot); if (isForkPostFulu(forkName)) { - columnSidecars = await fetchAndValidateColumns({ + columnSidecarResult = await fetchAndValidateColumns({ config, network, peerMeta, @@ -252,9 +259,12 @@ export async function fetchByRoot({ } return { - block, - blobSidecars, - columnSidecars, + result: { + block, + blobSidecars, + columnSidecars: columnSidecarResult?.result, + }, + warnings: columnSidecarResult?.warnings ?? null, }; } @@ -329,52 +339,73 @@ export async function fetchAndValidateColumns({ block, blockRoot, columnMeta, -}: FetchByRootAndValidateColumnsProps): Promise { +}: FetchByRootAndValidateColumnsProps): Promise> { const {peerId: peerIdStr} = peerMeta; const slot = block.message.slot; const blobCount = block.message.body.blobKzgCommitments.length; if (blobCount === 0) { - return []; + return {result: [], warnings: null}; } + const blockRootHex = toRootHex(blockRoot); const peerColumns = new Set(peerMeta.custodyGroups ?? []); const requestedColumns = columnMeta.missing.filter((c) => peerColumns.has(c)); const columnSidecars = await network.sendDataColumnSidecarsByRoot(peerIdStr, [ {blockRoot, columns: requestedColumns}, ]); - // sanity check if peer returned correct number of columnSidecars - if (columnSidecars.length < requestedColumns.length) { - const returnedColumns = new Set(columnSidecars.map((c) => c.index)); - throw new DownloadByRootError( - { - code: DownloadByRootErrorCode.NOT_ENOUGH_SIDECARS_RECEIVED, - peer: prettyPrintPeerIdStr(peerIdStr), - blockRoot: prettyBytes(blockRoot), - missingIndices: prettyPrintIndices(requestedColumns.filter((c) => !returnedColumns.has(c))), - }, - "Did not receive all of the requested columnSidecars" + const warnings: DownloadByRootError[] = []; + + // it's not acceptable if no sidecar is returned with >0 blobCount + if (columnSidecars.length === 0) { + throw new DownloadByRootError({ + code: DownloadByRootErrorCode.NO_SIDECAR_RECEIVED, + peer: prettyPrintPeerIdStr(peerIdStr), + slot, + blockRoot: blockRootHex, + }); + } + + // it's ok if only some sidecars are returned, we will try to get the rest from other peers + const requestedColumnsSet = new Set(requestedColumns); + const returnedColumns = columnSidecars.map((c) => c.index); + const returnedColumnsSet = new Set(returnedColumns); + const missingIndices = requestedColumns.filter((c) => !returnedColumnsSet.has(c)); + if (missingIndices.length > 0) { + warnings.push( + new DownloadByRootError( + { + code: DownloadByRootErrorCode.NOT_ENOUGH_SIDECARS_RECEIVED, + peer: prettyPrintPeerIdStr(peerIdStr), + slot, + blockRoot: blockRootHex, + missingIndices: prettyPrintIndices(missingIndices), + }, + "Did not receive all of the requested columnSidecars" + ) ); } - // check each returned columnSidecar - for (let i = 0; i < requestedColumns.length; i++) { - const columnSidecar = columnSidecars[i]; - if (columnSidecar.index !== requestedColumns[i]) { - throw new DownloadByRootError( + // check extra returned columnSidecar + const extraIndices = returnedColumns.filter((c) => !requestedColumnsSet.has(c)); + if (extraIndices.length > 0) { + warnings.push( + new DownloadByRootError( { code: DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED, peer: prettyPrintPeerIdStr(peerIdStr), - blockRoot: prettyBytes(blockRoot), - invalidIndex: columnSidecar.index, + slot, + blockRoot: blockRootHex, + invalidIndices: prettyPrintIndices(extraIndices), }, - "Received a columnSidecar that was not requested" - ); - } + "Received columnSidecars that were not requested" + ) + ); } + await validateBlockDataColumnSidecars(slot, blockRoot, blobCount, columnSidecars); - return columnSidecars; + return {result: columnSidecars, warnings: warnings.length > 0 ? warnings : null}; } // TODO(fulu) not in use, remove? @@ -412,25 +443,31 @@ export async function validateColumnSidecars({ needToPublish = [], }: ValidateColumnSidecarsProps): Promise { const requestedIndices = columnMeta.missing; + const extraIndices: number[] = []; for (const columnSidecar of needed) { if (!requestedIndices.includes(columnSidecar.index)) { - throw new DownloadByRootError( - { - code: DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED, - peer: prettyPrintPeerIdStr(peerMeta.peerId), - blockRoot: prettyBytes(blockRoot), - invalidIndex: columnSidecar.index, - }, - "Received a columnSidecar that was not requested" - ); + extraIndices.push(columnSidecar.index); } } + if (extraIndices.length > 0) { + throw new DownloadByRootError( + { + code: DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED, + peer: prettyPrintPeerIdStr(peerMeta.peerId), + slot, + blockRoot: prettyBytes(blockRoot), + invalidIndices: prettyPrintIndices(extraIndices), + }, + "Received a columnSidecar that was not requested" + ); + } await validateBlockDataColumnSidecars(slot, blockRoot, blobCount, [...needed, ...needToPublish]); } export enum DownloadByRootErrorCode { MISMATCH_BLOCK_ROOT = "DOWNLOAD_BY_ROOT_ERROR_MISMATCH_BLOCK_ROOT", EXTRA_SIDECAR_RECEIVED = "DOWNLOAD_BY_ROOT_ERROR_EXTRA_SIDECAR_RECEIVED", + NO_SIDECAR_RECEIVED = "DOWNLOAD_BY_ROOT_ERROR_NO_SIDECAR_RECEIVED", NOT_ENOUGH_SIDECARS_RECEIVED = "DOWNLOAD_BY_ROOT_ERROR_NOT_ENOUGH_SIDECARS_RECEIVED", INVALID_INCLUSION_PROOF = "DOWNLOAD_BY_ROOT_ERROR_INVALID_INCLUSION_PROOF", INVALID_KZG_PROOF = "DOWNLOAD_BY_ROOT_ERROR_INVALID_KZG_PROOF", @@ -449,12 +486,20 @@ export type DownloadByRootErrorType = | { code: DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED; peer: string; + slot: Slot; + blockRoot: string; + invalidIndices: string; + } + | { + code: DownloadByRootErrorCode.NO_SIDECAR_RECEIVED; + peer: string; + slot: Slot; blockRoot: string; - invalidIndex: number; } | { code: DownloadByRootErrorCode.NOT_ENOUGH_SIDECARS_RECEIVED; peer: string; + slot: Slot; blockRoot: string; missingIndices: string; } diff --git a/packages/beacon-node/src/util/wrapError.ts b/packages/beacon-node/src/util/wrapError.ts index 3b25da203c47..61abb9ed0dad 100644 --- a/packages/beacon-node/src/util/wrapError.ts +++ b/packages/beacon-node/src/util/wrapError.ts @@ -20,3 +20,8 @@ export async function wrapError(promise: Promise): Promise> { return {err: err as Error}; } } + +/** + * Some functions may want to return a result and some warning typed as Error + */ +export type WarnResult = {result: T; warnings: E[] | null}; From 4e198eb5022fe061bbd88ab3ec4876829c4d5430 Mon Sep 17 00:00:00 2001 From: Cayman Date: Fri, 12 Sep 2025 11:48:22 -0400 Subject: [PATCH 161/173] chore: fix unit tests (#8392) **Motivation** - clean up of block-input branch **Description** - bix build and unit test issues (many tests no longer relevant) --- .../test/unit/sync/range/chain.test.ts | 4 +- .../unit/sync/utils/downloadByRoot.test.ts | 296 +----------------- 2 files changed, 11 insertions(+), 289 deletions(-) diff --git a/packages/beacon-node/test/unit/sync/range/chain.test.ts b/packages/beacon-node/test/unit/sync/range/chain.test.ts index 76b07e360aea..c9e22c493ebf 100644 --- a/packages/beacon-node/test/unit/sync/range/chain.test.ts +++ b/packages/beacon-node/test/unit/sync/range/chain.test.ts @@ -117,7 +117,7 @@ describe("sync / range / chain", () => { }) ); } - return blocks; + return {result: blocks, warnings: null}; }; const target: ChainTarget = {slot: computeStartSlotAtEpoch(targetEpoch), root: ZERO_HASH}; @@ -172,7 +172,7 @@ describe("sync / range / chain", () => { }) ); } - return blocks; + return {result: blocks, warnings: null}; }; const target: ChainTarget = {slot: computeStartSlotAtEpoch(targetEpoch), root: ZERO_HASH}; diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts index 11ab17d9f929..f2cf578912f9 100644 --- a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts @@ -1,7 +1,6 @@ import {randomBytes} from "node:crypto"; import {ForkName, NUMBER_OF_COLUMNS} from "@lodestar/params"; import {ssz} from "@lodestar/types"; -import {prettyBytes} from "@lodestar/utils"; import {afterAll, afterEach, beforeAll, beforeEach, describe, expect, it, vi} from "vitest"; import {BlobMeta, MissingColumnMeta} from "../../../../src/chain/blocks/blockInput/types.js"; import {BlobSidecarValidationError} from "../../../../src/chain/errors/blobSidecarError.js"; @@ -15,7 +14,6 @@ import { fetchColumnsByRoot, } from "../../../../src/sync/utils/downloadByRoot.js"; import {kzgCommitmentToVersionedHash} from "../../../../src/util/blobs.js"; -import {CustodyConfig} from "../../../../src/util/dataColumns.js"; import {ROOT_SIZE} from "../../../../src/util/sszBytes.js"; import { config, @@ -24,6 +22,7 @@ import { generateBlockWithColumnSidecars, } from "../../../utils/blocksAndData.js"; import {PeerSyncMeta} from "../../../../src/network/peers/peersData.js"; +import {DataColumnSidecarValidationError} from "../../../../src/chain/errors/dataColumnSidecarError.js"; describe("downloadByRoot.ts", () => { const peerIdStr = "1234567890abcdef1234567890abcdef"; @@ -110,25 +109,6 @@ describe("downloadByRoot.ts", () => { vi.resetAllMocks(); }); - it("should successfully fetch blobs from execution engine only", async () => { - const sendBlobSidecarsByRootMock = vi.fn(() => Promise.resolve([])); - network = { - sendBlobSidecarsByRoot: sendBlobSidecarsByRootMock, - } as unknown as INetwork; - - const response = await fetchAndValidateBlobs({ - config, - network, - forkName, - peerIdStr, - blockRoot: denebBlockWithBlobs.blockRoot, - block: denebBlockWithBlobs.block, - blobMeta, - }); - - expect(response.map((b) => b.index)).toEqual(denebBlockWithBlobs.blobSidecars.map((b) => b.index)); - }); - it("should successfully fetch blobs from network only", async () => { const sendBlobSidecarsByRootMock = vi.fn(() => Promise.resolve(denebBlockWithBlobs.blobSidecars)); network = { @@ -148,7 +128,7 @@ describe("downloadByRoot.ts", () => { expect(response).toEqual(denebBlockWithBlobs.blobSidecars); }); - it("should fetch remaining blobs from network when execution engine is incomplete", async () => { + it("should not error if unable to fetch all blobs from network", async () => { const sendBlobSidecarsByRootMock = vi.fn(() => Promise.resolve([ denebBlockWithBlobs.blobSidecars[1], @@ -170,55 +150,16 @@ describe("downloadByRoot.ts", () => { blobMeta, }); - expect(sendBlobSidecarsByRootMock).toHaveBeenCalledExactlyOnceWith(peerIdStr, [ - {blockRoot: denebBlockWithBlobs.blockRoot, index: 1}, - {blockRoot: denebBlockWithBlobs.blockRoot, index: 3}, - {blockRoot: denebBlockWithBlobs.blockRoot, index: 5}, - ]); - - const returnedIndices = response.map((b) => b.index); - expect(returnedIndices).toEqual(returnedIndices.sort()); - expect(returnedIndices).toEqual(denebBlockWithBlobs.blobSidecars.map((b) => b.index)); - }); - - it("should gracefully handle getBlobsV1 failure", async () => { - const rejectedError = new Error("TESTING_ERROR"); - - const sendBlobSidecarsByRootMock = vi.fn(() => Promise.resolve(denebBlockWithBlobs.blobSidecars)); - const loggerMock = { - error: vi.fn(), - }; - network = { - logger: loggerMock, - sendBlobSidecarsByRoot: sendBlobSidecarsByRootMock, - } as unknown as INetwork; - - const response = await fetchAndValidateBlobs({ - config, - network, - forkName, - peerIdStr, - blockRoot: denebBlockWithBlobs.blockRoot, - block: denebBlockWithBlobs.block, - blobMeta, - }); - - expect(loggerMock.error).toHaveBeenCalledExactlyOnceWith( - `error fetching/building blobSidecars for blockRoot=${prettyBytes(denebBlockWithBlobs.blockRoot)} via getBlobsV1`, - {}, - rejectedError - ); expect(sendBlobSidecarsByRootMock).toHaveBeenCalledExactlyOnceWith( peerIdStr, - denebBlockWithBlobs.blobSidecars.map((b) => ({ - blockRoot: denebBlockWithBlobs.blockRoot, - index: b.index, - })) + denebBlockWithBlobs.blobSidecars.map(({index}) => ({blockRoot: denebBlockWithBlobs.blockRoot, index})) ); - expect(response).toEqual(denebBlockWithBlobs.blobSidecars); + + const returnedIndices = response.map((b) => b.index); + expect(returnedIndices).toEqual([1, 3, 5]); }); - it("should throw error if blob validation fails", async () => { + it.todo("should throw error if no blobs are returned", async () => { const sendBlobSidecarsByRootMock = vi.fn(() => Promise.resolve([])); network = { sendBlobSidecarsByRoot: sendBlobSidecarsByRootMock, @@ -296,7 +237,6 @@ describe("downloadByRoot.ts", () => { let fuluBlockWithColumns: ReturnType; let columnMeta: MissingColumnMeta; let versionedHashes: Uint8Array[]; - let custodyConfig: CustodyConfig; beforeEach(() => { fuluBlockWithColumns = generateBlockWithColumnSidecars({forkName, returnBlobs: true}); @@ -307,120 +247,17 @@ describe("downloadByRoot.ts", () => { missing: [0, 1, 2, 3, 4, 5, 6, 7], // Sample a subset of columns versionedHashes, }; - custodyConfig = { - custodyColumns: [0, 1, 2, 3], - sampledColumns: [0, 1, 2, 3, 4, 5, 6, 7], - } as CustodyConfig; }); afterEach(() => { vi.resetAllMocks(); }); - it("should successfully fetch columns from execution engine only", async () => { - const sendDataColumnSidecarsByRootMock = vi.fn(() => Promise.resolve([])); - const publishDataColumnSidecarMock = vi.fn(() => Promise.resolve()); - network = { - sendDataColumnSidecarsByRoot: sendDataColumnSidecarsByRootMock, - publishDataColumnSidecar: publishDataColumnSidecarMock, - custodyConfig, - logger: { - error: vi.fn(), - }, - } as unknown as INetwork; - - const response = await fetchAndValidateColumns({ - config, - network, - forkName, - peerMeta, - blockRoot: fuluBlockWithColumns.blockRoot, - block: fuluBlockWithColumns.block, - columnMeta, - }); - - expect(sendDataColumnSidecarsByRootMock).not.toHaveBeenCalled(); - // Should only return the columns we need (missing) - expect(response.map((c) => c.index)).toEqual(columnMeta.missing); - // Should publish columns we custody that weren't already published - expect(publishDataColumnSidecarMock).toHaveBeenCalled(); - }); - - it("should only publish columns that have not already been published", async () => { - const publishDataColumnSidecarMock = vi.fn(() => Promise.resolve()); - network = { - sendDataColumnSidecarsByRoot: vi.fn(() => Promise.resolve([])), - publishDataColumnSidecar: publishDataColumnSidecarMock, - custodyConfig, - logger: { - error: vi.fn(), - }, - } as unknown as INetwork; - - // Columns 0, 1 are already published (not in missing) - // Columns 2, 3, 4, 5, 6, 7 are missing sampledColumns and need to be fetched - // After reconstruction, we should publish columns 2, 3 (we custody them and they weren't published) - // Column 5, 6, 7 we sample but do not custody so we don't need to publish - const testColumnMeta = { - missing: [2, 3, 4, 5, 6, 7], - versionedHashes, - }; - - await fetchAndValidateColumns({ - config, - network, - forkName, - peerMeta, - blockRoot: fuluBlockWithColumns.blockRoot, - block: fuluBlockWithColumns.block, - columnMeta: testColumnMeta, - }); - - // Should publish columns 2, 3, 4 (custody and were missing) - const publishedIndices = publishDataColumnSidecarMock.mock.calls.map((call) => (call as any)[0]?.index); - expect(publishedIndices).toEqual([2, 3]); - }); - - it("should only return columns that are needed from reconstruction", async () => { - network = { - sendDataColumnSidecarsByRoot: vi.fn(() => Promise.resolve([])), - publishDataColumnSidecar: vi.fn(() => Promise.resolve()), - custodyConfig: { - custodyColumns: [0, 2, 4, 6], - sampledColumns: [0, 2, 4, 6, 8, 10, 12], - }, - logger: { - error: vi.fn(), - }, - } as unknown as INetwork; - - const missing = [0, 4, 6, 10, 12]; - const testColumnMeta = { - missing, // Only need these columns - versionedHashes, - }; - - const response = await fetchAndValidateColumns({ - config, - network, - forkName, - peerMeta, - blockRoot: fuluBlockWithColumns.blockRoot, - block: fuluBlockWithColumns.block, - columnMeta: testColumnMeta, - }); - - // Even though reconstruction produces all columns, we should only return what we need - expect(response.length).toBe(5); - expect(response.map((c) => c.index)).toEqual(missing); - }); - it("should successfully fetch columns from network only", async () => { const neededColumns = fuluBlockWithColumns.columnSidecars.filter((c) => columnMeta.missing.includes(c.index)); const sendDataColumnSidecarsByRootMock = vi.fn(() => Promise.resolve(neededColumns)); network = { sendDataColumnSidecarsByRoot: sendDataColumnSidecarsByRootMock, - publishDataColumnSidecar: vi.fn(() => Promise.resolve()), custodyConfig: { custodyColumns: [0, 1, 2, 3, 4, 5], sampledColumns: columnMeta.missing, @@ -443,49 +280,7 @@ describe("downloadByRoot.ts", () => { expect(sendDataColumnSidecarsByRootMock).toHaveBeenCalledExactlyOnceWith(peerIdStr, [ {blockRoot: fuluBlockWithColumns.blockRoot, columns: columnMeta.missing}, ]); - expect(response.map((c) => c.index)).toEqual(columnMeta.missing); - }); - - it("should gracefully handle getBlobsV2 failure", async () => { - const rejectedError = new Error("TESTING_ERROR"); - - const neededColumns = fuluBlockWithColumns.columnSidecars.filter((c) => columnMeta.missing.includes(c.index)); - const sendDataColumnSidecarsByRootMock = vi.fn(() => Promise.resolve(neededColumns)); - const loggerMock = { - error: vi.fn(), - }; - network = { - logger: loggerMock, - sendDataColumnSidecarsByRoot: sendDataColumnSidecarsByRootMock, - publishDataColumnSidecar: vi.fn(() => Promise.resolve()), - custodyConfig: { - custodyColumns: [0, 1, 2, 3, 4, 5], - sampledColumns: columnMeta.missing, - }, - } as unknown as INetwork; - - const response = await fetchAndValidateColumns({ - config, - network, - forkName, - peerMeta, - blockRoot: fuluBlockWithColumns.blockRoot, - block: fuluBlockWithColumns.block, - columnMeta, - }); - - expect(loggerMock.error).toHaveBeenCalledExactlyOnceWith( - "error building columnSidecars via getBlobsV2", - { - blockRoot: prettyBytes(fuluBlockWithColumns.blockRoot), - slot: fuluBlockWithColumns.block.message.slot, - }, - rejectedError - ); - expect(sendDataColumnSidecarsByRootMock).toHaveBeenCalledExactlyOnceWith(peerIdStr, [ - {blockRoot: fuluBlockWithColumns.blockRoot, columns: columnMeta.missing}, - ]); - expect(response.map((c) => c.index)).toEqual(columnMeta.missing); + expect(response.result.map((c) => c.index)).toEqual(columnMeta.missing); }); it("should throw error if column validation fails", async () => { @@ -503,7 +298,6 @@ describe("downloadByRoot.ts", () => { ); network = { sendDataColumnSidecarsByRoot: sendDataColumnSidecarsByRootMock, - publishDataColumnSidecar: vi.fn(() => Promise.resolve()), custodyConfig: { custodyColumns: [0, 1, 2, 3, 4, 5], sampledColumns: [0, 1, 2, 3, 4, 5], @@ -526,79 +320,7 @@ describe("downloadByRoot.ts", () => { versionedHashes, }, }) - ).rejects.toThrow(DownloadByRootError); - }); - - it("should handle error when publishing reconstructed columns", async () => { - const publishError = new Error("PUBLISH_ERROR"); - const publishDataColumnSidecarMock = vi.fn(() => Promise.reject(publishError)); - const loggerMock = { - error: vi.fn(), - }; - network = { - sendDataColumnSidecarsByRoot: vi.fn(() => Promise.resolve([])), - publishDataColumnSidecar: publishDataColumnSidecarMock, - custodyConfig: { - custodyColumns: [0, 1, 2, 3], - sampledColumns: [0, 1, 2, 3, 4, 5, 6, 7], - }, - logger: loggerMock, - } as unknown as INetwork; - - const response = await fetchAndValidateColumns({ - config, - network, - forkName, - peerMeta, - blockRoot: fuluBlockWithColumns.blockRoot, - block: fuluBlockWithColumns.block, - columnMeta: { - missing: [0, 1, 2, 3, 4, 5, 6, 7], - versionedHashes, - }, - }); - - // Should still return the columns even if publishing fails - expect(response.map((c) => c.index)).toEqual([0, 1, 2, 3, 4, 5, 6, 7]); - - // Should log the publishing error - expect(loggerMock.error).toHaveBeenCalledTimes(4); - expect(loggerMock.error).toHaveBeenNthCalledWith( - 1, - "Error publishing column after getBlobsV2 reconstruct", - { - index: 0, - blockRoot: prettyBytes(fuluBlockWithColumns.blockRoot), - }, - publishError - ); - expect(loggerMock.error).toHaveBeenNthCalledWith( - 2, - "Error publishing column after getBlobsV2 reconstruct", - { - index: 1, - blockRoot: prettyBytes(fuluBlockWithColumns.blockRoot), - }, - publishError - ); - expect(loggerMock.error).toHaveBeenNthCalledWith( - 3, - "Error publishing column after getBlobsV2 reconstruct", - { - index: 2, - blockRoot: prettyBytes(fuluBlockWithColumns.blockRoot), - }, - publishError - ); - expect(loggerMock.error).toHaveBeenNthCalledWith( - 4, - "Error publishing column after getBlobsV2 reconstruct", - { - index: 3, - blockRoot: prettyBytes(fuluBlockWithColumns.blockRoot), - }, - publishError - ); + ).rejects.toThrow(DataColumnSidecarValidationError); }); }); From b041e321878e6cb50a6173373907a15f2b134010 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Mon, 15 Sep 2025 13:18:20 +0700 Subject: [PATCH 162/173] fix: pass testCaseName to spec testFunction to allow for case specific logic in a test case --- packages/beacon-node/test/spec/utils/types.ts | 2 +- packages/spec-test-util/src/single.ts | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/beacon-node/test/spec/utils/types.ts b/packages/beacon-node/test/spec/utils/types.ts index 00bebffd7d7e..f148674c7bca 100644 --- a/packages/beacon-node/test/spec/utils/types.ts +++ b/packages/beacon-node/test/spec/utils/types.ts @@ -11,7 +11,7 @@ export type TestRunnerFn = ( testHandler: string, testSuite: string ) => { - testFunction: (testCase: TestCase, directoryName: string) => Result | Promise; + testFunction: (testCase: TestCase, directoryName: string, testCaseName: string) => Result | Promise; options: Partial>; }; diff --git a/packages/spec-test-util/src/single.ts b/packages/spec-test-util/src/single.ts index 645567e2ae65..ad131bf94f18 100644 --- a/packages/spec-test-util/src/single.ts +++ b/packages/spec-test-util/src/single.ts @@ -87,7 +87,7 @@ const defaultOptions: SpecTestOptions = { export function describeDirectorySpecTest( name: string, testCaseDirectoryPath: string, - testFunction: (testCase: TestCase, directoryName: string) => Result | Promise, + testFunction: (testCase: TestCase, directoryName: string, testCaseName: string) => Result | Promise, options: Partial> ): void { options = {...defaultOptions, ...options}; @@ -124,12 +124,12 @@ export function describeDirectorySpecTest if (options.shouldError?.(testCase)) { try { - await testFunction(testCase, name); + await testFunction(testCase, name, testSubDirname); } catch (_e) { return; } } else { - const result = await testFunction(testCase, name); + const result = await testFunction(testCase, name, testSubDirname); if (!options.getExpected) throw Error("getExpected is not defined"); if (!options.expectFunc) throw Error("expectFunc is not defined"); const expected = options.getExpected(testCase); From d2b90db1048ce06e20306aa7be94bc9d82782f67 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Mon, 15 Sep 2025 13:19:08 +0700 Subject: [PATCH 163/173] fix: add default custody to test for unavailable peerDas data --- .../test/spec/presets/fork_choice.test.ts | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/packages/beacon-node/test/spec/presets/fork_choice.test.ts b/packages/beacon-node/test/spec/presets/fork_choice.test.ts index 8aa890bf1cee..ff4cc3181c35 100644 --- a/packages/beacon-node/test/spec/presets/fork_choice.test.ts +++ b/packages/beacon-node/test/spec/presets/fork_choice.test.ts @@ -63,7 +63,7 @@ const forkChoiceTest = (opts: {onlyPredefinedResponses: boolean}): TestRunnerFn => (fork) => { return { - testFunction: async (testcase) => { + testFunction: async (testcase, _directoryName, testCaseName) => { const {steps, anchorState} = testcase; const currentSlot = anchorState.slot; const config = getConfig(fork); @@ -228,8 +228,12 @@ const forkChoiceTest = forkName: fork, block: signedBlock as SignedBeaconBlock, blockRootHex, - custodyColumns: columns.map((c) => c.index), - sampledColumns: columns.map((c) => c.index), + custodyColumns: + testCaseName !== "on_block_peerdas__not_available" ? columns.map((c) => c.index) : [2, 4, 6, 8], + sampledColumns: + testCaseName !== "on_block_peerdas__not_available" + ? columns.map((c) => c.index) + : [2, 4, 6, 8, 10, 12, 14, 16], source: BlockInputSource.gossip, seenTimestampSec: 0, daOutOfRange: false, @@ -489,7 +493,7 @@ const forkChoiceTest = attesterSlashings, }; }, - timeout: 10000, + timeout: 15000, expectFunc: () => {}, // Do not manually skip tests here, do it in packages/beacon-node/test/spec/presets/index.test.ts // EXCEPTION : this test skipped here because prefix match can't be don't for this particular test From d090d5457413a9e80c6d9263b7960b1a74d498af Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Mon, 15 Sep 2025 13:23:17 +0700 Subject: [PATCH 164/173] docs: add not to spec test about test case for future us to know what is going on --- packages/beacon-node/test/spec/presets/fork_choice.test.ts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/beacon-node/test/spec/presets/fork_choice.test.ts b/packages/beacon-node/test/spec/presets/fork_choice.test.ts index ff4cc3181c35..f04e8289c51e 100644 --- a/packages/beacon-node/test/spec/presets/fork_choice.test.ts +++ b/packages/beacon-node/test/spec/presets/fork_choice.test.ts @@ -229,6 +229,10 @@ const forkChoiceTest = block: signedBlock as SignedBeaconBlock, blockRootHex, custodyColumns: + // in most test case instances we do not want to assign any custody as there are no columns provided + // with the test case. For on_block_peerdas__not_available the exact situation that is being tested + // is no availability so block processing should fail. For this one test case add some default + // custody so that the await will fail in verifyBlocksDataAvailability.ts testCaseName !== "on_block_peerdas__not_available" ? columns.map((c) => c.index) : [2, 4, 6, 8], sampledColumns: testCaseName !== "on_block_peerdas__not_available" From 14a8ae3ac6451bff4f62397251bd5d427ae59f79 Mon Sep 17 00:00:00 2001 From: matthewkeil Date: Mon, 15 Sep 2025 13:24:40 +0700 Subject: [PATCH 165/173] docs: add note to spec test about test case for future us to know what is going on --- packages/beacon-node/test/spec/presets/fork_choice.test.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/beacon-node/test/spec/presets/fork_choice.test.ts b/packages/beacon-node/test/spec/presets/fork_choice.test.ts index f04e8289c51e..a80fc26f2c7f 100644 --- a/packages/beacon-node/test/spec/presets/fork_choice.test.ts +++ b/packages/beacon-node/test/spec/presets/fork_choice.test.ts @@ -497,6 +497,7 @@ const forkChoiceTest = attesterSlashings, }; }, + // timeout needs to be set longer than BLOB_AVAILABILITY_TIMEOUT so that on_block_peerdas__not_available fails timeout: 15000, expectFunc: () => {}, // Do not manually skip tests here, do it in packages/beacon-node/test/spec/presets/index.test.ts From b8294842210589661c6d5125712ea4aee9dab82a Mon Sep 17 00:00:00 2001 From: twoeths <10568965+twoeths@users.noreply.github.com> Date: Mon, 15 Sep 2025 13:51:17 +0700 Subject: [PATCH 166/173] fix: e2e tests for blockinput refactor branch (#8394) **Motivation** - make e2e CI green for #8200 **Description** - fix e2e for unknownBlockSync - support fulu for other e2e tests --------- Co-authored-by: Tuyen Nguyen --- .../src/sync/utils/downloadByRoot.ts | 8 ++--- .../test/e2e/api/lodestar/lodestar.test.ts | 30 +++++++++++++------ .../test/e2e/chain/proposerBoostReorg.test.ts | 23 +++++++++++--- .../stateCache/nHistoricalStates.test.ts | 27 +++++++++++++---- 4 files changed, 65 insertions(+), 23 deletions(-) diff --git a/packages/beacon-node/src/sync/utils/downloadByRoot.ts b/packages/beacon-node/src/sync/utils/downloadByRoot.ts index c50a28300443..8c9260f077c2 100644 --- a/packages/beacon-node/src/sync/utils/downloadByRoot.ts +++ b/packages/beacon-node/src/sync/utils/downloadByRoot.ts @@ -100,9 +100,9 @@ export async function downloadByRoot({ }); } - const hasAllData = blockInput.hasBlockAndAllData(); + const hasAllDataPreDownload = blockInput.hasBlockAndAllData(); - if (isBlockInputBlobs(blockInput) && !hasAllData) { + if (isBlockInputBlobs(blockInput) && !hasAllDataPreDownload) { // blobSidecars could be undefined if gossip resulted in full block+blobs so we don't download any if (!blobSidecars) { throw new DownloadByRootError({ @@ -122,7 +122,7 @@ export async function downloadByRoot({ } } - if (isBlockInputColumns(blockInput) && !hasAllData) { + if (isBlockInputColumns(blockInput) && !hasAllDataPreDownload) { // columnSidecars could be undefined if gossip resulted in full block+columns so we don't download any if (!columnSidecars) { throw new DownloadByRootError({ @@ -148,7 +148,7 @@ export async function downloadByRoot({ let status: PendingBlockInputStatus; let timeSyncedSec: number | undefined; - if (hasAllData) { + if (blockInput.hasBlockAndAllData()) { status = PendingBlockInputStatus.downloaded; timeSyncedSec = Date.now() / 1000; } else { diff --git a/packages/beacon-node/test/e2e/api/lodestar/lodestar.test.ts b/packages/beacon-node/test/e2e/api/lodestar/lodestar.test.ts index 7501460779f3..310143202dfb 100644 --- a/packages/beacon-node/test/e2e/api/lodestar/lodestar.test.ts +++ b/packages/beacon-node/test/e2e/api/lodestar/lodestar.test.ts @@ -15,23 +15,35 @@ describe("api / impl / validator", () => { describe("getLiveness endpoint", () => { let bn: BeaconNode | undefined; - const SECONDS_PER_SLOT = 2; - const ALTAIR_FORK_EPOCH = 0; - const validatorCount = 8; const restPort = 9596; - const testParams: Pick = { - SECONDS_PER_SLOT: SECONDS_PER_SLOT, - ALTAIR_FORK_EPOCH: ALTAIR_FORK_EPOCH, + const validatorCount = 8; + const ELECTRA_FORK_EPOCH = 0; + const FULU_FORK_EPOCH = 1; + const SECONDS_PER_SLOT = 2; + const testParams: Partial = { + SECONDS_PER_SLOT, + ALTAIR_FORK_EPOCH: ELECTRA_FORK_EPOCH, + BELLATRIX_FORK_EPOCH: ELECTRA_FORK_EPOCH, + CAPELLA_FORK_EPOCH: ELECTRA_FORK_EPOCH, + DENEB_FORK_EPOCH: ELECTRA_FORK_EPOCH, + ELECTRA_FORK_EPOCH: ELECTRA_FORK_EPOCH, + FULU_FORK_EPOCH: FULU_FORK_EPOCH, + BLOB_SCHEDULE: [ + { + EPOCH: 1, + MAX_BLOBS_PER_BLOCK: 3, + }, + ], }; const genesisSlotsDelay = 5; - const timeout = (SLOTS_PER_EPOCH + genesisSlotsDelay) * testParams.SECONDS_PER_SLOT * 1000; + const timeout = (SLOTS_PER_EPOCH + genesisSlotsDelay) * SECONDS_PER_SLOT * 1000; afterEach(async () => { if (bn) await bn.close(); }); it("Should return validator indices that are live", async () => { - const chainConfig: ChainConfig = {...chainConfigDef, SECONDS_PER_SLOT, ALTAIR_FORK_EPOCH}; + const chainConfig: ChainConfig = {...chainConfigDef, ...testParams}; const genesisValidatorsRoot = Buffer.alloc(32, 0xaa); const config = createBeaconConfig(chainConfig, genesisValidatorsRoot); @@ -72,7 +84,7 @@ describe("api / impl / validator", () => { }); it("Should return only for previous, current and next epoch", async () => { - const chainConfig: ChainConfig = {...chainConfigDef, SECONDS_PER_SLOT, ALTAIR_FORK_EPOCH}; + const chainConfig: ChainConfig = {...chainConfigDef, ...testParams}; const genesisValidatorsRoot = Buffer.alloc(32, 0xaa); const config = createBeaconConfig(chainConfig, genesisValidatorsRoot); diff --git a/packages/beacon-node/test/e2e/chain/proposerBoostReorg.test.ts b/packages/beacon-node/test/e2e/chain/proposerBoostReorg.test.ts index 70f2d87e2456..313c9bc718ee 100644 --- a/packages/beacon-node/test/e2e/chain/proposerBoostReorg.test.ts +++ b/packages/beacon-node/test/e2e/chain/proposerBoostReorg.test.ts @@ -16,12 +16,27 @@ describe("proposer boost reorg", () => { vi.setConfig({testTimeout: 60000}); const validatorCount = 8; - const testParams: Pick = { - SECONDS_PER_SLOT: 2, + const ELECTRA_FORK_EPOCH = 0; + const FULU_FORK_EPOCH = 1; + const SECONDS_PER_SLOT = 2; + const testParams: Partial = { + SECONDS_PER_SLOT, + ALTAIR_FORK_EPOCH: ELECTRA_FORK_EPOCH, + BELLATRIX_FORK_EPOCH: ELECTRA_FORK_EPOCH, + CAPELLA_FORK_EPOCH: ELECTRA_FORK_EPOCH, + DENEB_FORK_EPOCH: ELECTRA_FORK_EPOCH, + ELECTRA_FORK_EPOCH: ELECTRA_FORK_EPOCH, + FULU_FORK_EPOCH: FULU_FORK_EPOCH, // need this to make block `reorgSlot - 1` strong enough REORG_PARENT_WEIGHT_THRESHOLD: 80, // need this to make block `reorgSlot + 1` to become the head PROPOSER_SCORE_BOOST: 120, + BLOB_SCHEDULE: [ + { + EPOCH: 1, + MAX_BLOBS_PER_BLOCK: 3, + }, + ], }; const afterEachCallbacks: (() => Promise | void)[] = []; @@ -46,14 +61,14 @@ describe("proposer boost reorg", () => { it(`should reorg a late block at slot ${reorgSlot}`, async () => { // the node needs time to transpile/initialize bls worker threads const genesisSlotsDelay = 7; - const genesisTime = Math.floor(Date.now() / 1000) + genesisSlotsDelay * testParams.SECONDS_PER_SLOT; + const genesisTime = Math.floor(Date.now() / 1000) + genesisSlotsDelay * SECONDS_PER_SLOT; const testLoggerOpts: TestLoggerOpts = { level: LogLevel.debug, timestampFormat: { format: TimestampFormatCode.EpochSlot, genesisTime, slotsPerEpoch: SLOTS_PER_EPOCH, - secondsPerSlot: testParams.SECONDS_PER_SLOT, + secondsPerSlot: SECONDS_PER_SLOT, }, }; const logger = testLogger("BeaconNode", testLoggerOpts); diff --git a/packages/beacon-node/test/e2e/chain/stateCache/nHistoricalStates.test.ts b/packages/beacon-node/test/e2e/chain/stateCache/nHistoricalStates.test.ts index 875e3fe13b03..c8fb59ad68b5 100644 --- a/packages/beacon-node/test/e2e/chain/stateCache/nHistoricalStates.test.ts +++ b/packages/beacon-node/test/e2e/chain/stateCache/nHistoricalStates.test.ts @@ -23,8 +23,23 @@ describe("regen/reload states with n-historical states configuration", () => { vi.setConfig({testTimeout: 96_000}); const validatorCount = 8; - const testParams: Pick = { - SECONDS_PER_SLOT: 2, + const ELECTRA_FORK_EPOCH = 0; + const FULU_FORK_EPOCH = 1; + const SECONDS_PER_SLOT = 2; + const testParams: Partial = { + SECONDS_PER_SLOT, + ALTAIR_FORK_EPOCH: ELECTRA_FORK_EPOCH, + BELLATRIX_FORK_EPOCH: ELECTRA_FORK_EPOCH, + CAPELLA_FORK_EPOCH: ELECTRA_FORK_EPOCH, + DENEB_FORK_EPOCH: ELECTRA_FORK_EPOCH, + ELECTRA_FORK_EPOCH: ELECTRA_FORK_EPOCH, + FULU_FORK_EPOCH: FULU_FORK_EPOCH, + BLOB_SCHEDULE: [ + { + EPOCH: 1, + MAX_BLOBS_PER_BLOCK: 3, + }, + ], }; const afterEachCallbacks: (() => Promise | void)[] = []; @@ -269,14 +284,14 @@ describe("regen/reload states with n-historical states configuration", () => { wrappedIt(`${name} reorgedSlot=${reorgedSlot} reorgDistance=${reorgDistance}`, async () => { // the node needs time to transpile/initialize bls worker threads const genesisSlotsDelay = 7; - const genesisTime = Math.floor(Date.now() / 1000) + genesisSlotsDelay * testParams.SECONDS_PER_SLOT; + const genesisTime = Math.floor(Date.now() / 1000) + genesisSlotsDelay * SECONDS_PER_SLOT; const testLoggerOpts: TestLoggerOpts = { level: LogLevel.debug, timestampFormat: { format: TimestampFormatCode.EpochSlot, genesisTime, slotsPerEpoch: SLOTS_PER_EPOCH, - secondsPerSlot: testParams.SECONDS_PER_SLOT, + secondsPerSlot: SECONDS_PER_SLOT, }, }; @@ -354,7 +369,7 @@ describe("regen/reload states with n-historical states configuration", () => { waitForEvent( bn.chain.emitter, ChainEvent.checkpoint, - (cpSlot + genesisSlotsDelay + 1) * testParams.SECONDS_PER_SLOT * 1000, + (cpSlot + genesisSlotsDelay + 1) * SECONDS_PER_SLOT * 1000, (cp) => cp.epoch === cpEpoch ) ) @@ -377,7 +392,7 @@ describe("regen/reload states with n-historical states configuration", () => { bn.chain.emitter, routes.events.EventType.chainReorg, // reorged event happens at reorgedSlot + 1 - (reorgedSlot + 1 - cpSlot + 1) * testParams.SECONDS_PER_SLOT * 1000, + (reorgedSlot + 1 - cpSlot + 1) * SECONDS_PER_SLOT * 1000, (reorgData) => reorgData.slot === reorgedSlot + 1 ) ) From a6dd9e7e02377335c5347b9ce71c251011e75f6d Mon Sep 17 00:00:00 2001 From: Nico Flaig Date: Mon, 15 Sep 2025 13:54:35 +0100 Subject: [PATCH 167/173] chore: review block input refactor (#8398) Review https://github.com/ChainSafe/lodestar/pull/8200 --- .../beacon-node/src/api/impl/beacon/blocks/index.ts | 12 ++++++++---- packages/beacon-node/src/chain/blocks/importBlock.ts | 4 +--- packages/beacon-node/src/network/interface.ts | 2 -- packages/beacon-node/src/network/network.ts | 2 +- packages/beacon-node/src/sync/unknownBlock.ts | 5 ++--- .../beacon-node/src/sync/utils/pendingBlocksTree.ts | 1 - 6 files changed, 12 insertions(+), 14 deletions(-) diff --git a/packages/beacon-node/src/api/impl/beacon/blocks/index.ts b/packages/beacon-node/src/api/impl/beacon/blocks/index.ts index abef77ef9823..a1045ac66e80 100644 --- a/packages/beacon-node/src/api/impl/beacon/blocks/index.ts +++ b/packages/beacon-node/src/api/impl/beacon/blocks/index.ts @@ -123,7 +123,7 @@ export function getBeaconBlockApi({ dataColumnSidecars = []; } - if (dataColumnSidecars.length > 0 && isBlockInputColumns(blockForImport)) { + if (isBlockInputColumns(blockForImport)) { for (const dataColumnSidecar of dataColumnSidecars) { blockForImport.addColumn({ blockRootHex: blockRoot, @@ -132,10 +132,14 @@ export function getBeaconBlockApi({ seenTimestampSec, }); } - } - if (blobSidecars.length > 0 && isBlockInputBlobs(blockForImport)) { + } else if (isBlockInputBlobs(blockForImport)) { for (const blobSidecar of blobSidecars) { - blockForImport.addBlob({blockRootHex: blockRoot, blobSidecar, source: BlockInputSource.api, seenTimestampSec}); + blockForImport.addBlob({ + blockRootHex: blockRoot, + blobSidecar, + source: BlockInputSource.api, + seenTimestampSec, + }); } } diff --git a/packages/beacon-node/src/chain/blocks/importBlock.ts b/packages/beacon-node/src/chain/blocks/importBlock.ts index 46efad286255..934c215d6ca9 100644 --- a/packages/beacon-node/src/chain/blocks/importBlock.ts +++ b/packages/beacon-node/src/chain/blocks/importBlock.ts @@ -516,9 +516,7 @@ export async function importBlock( for (const {source} of blockInput.getSampledColumnsWithSource()) { this.metrics?.importBlock.columnsBySource.inc({source}); } - } - - if (isBlockInputBlobs(blockInput)) { + } else if (isBlockInputBlobs(blockInput)) { for (const {source} of blockInput.getAllBlobsWithSource()) { this.metrics?.importBlock.blobsBySource.inc({blobsSource: source}); } diff --git a/packages/beacon-node/src/network/interface.ts b/packages/beacon-node/src/network/interface.ts index 70887c9f61c9..4b69deae4a01 100644 --- a/packages/beacon-node/src/network/interface.ts +++ b/packages/beacon-node/src/network/interface.ts @@ -15,7 +15,6 @@ import { Upgrader, } from "@libp2p/interface"; import type {AddressManager, ConnectionManager, Registrar, TransportManager} from "@libp2p/interface-internal"; -import {LoggerNode} from "@lodestar/logger/node"; import { AttesterSlashing, LightClientFinalityUpdate, @@ -59,7 +58,6 @@ export interface INetwork extends INetworkCorePublic { readonly peerId: PeerId; readonly custodyConfig: CustodyConfig; readonly closed: boolean; - readonly logger: LoggerNode; events: INetworkEventBus; getConnectedPeers(): PeerIdStr[]; diff --git a/packages/beacon-node/src/network/network.ts b/packages/beacon-node/src/network/network.ts index 457e464ce9cb..c0251dde2b83 100644 --- a/packages/beacon-node/src/network/network.ts +++ b/packages/beacon-node/src/network/network.ts @@ -97,10 +97,10 @@ export type NetworkInitModules = { export class Network implements INetwork { readonly peerId: PeerId; readonly custodyConfig: CustodyConfig; - readonly logger: LoggerNode; // TODO: Make private readonly events: INetworkEventBus; + private readonly logger: LoggerNode; private readonly config: BeaconConfig; private readonly clock: IClock; private readonly chain: IBeaconChain; diff --git a/packages/beacon-node/src/sync/unknownBlock.ts b/packages/beacon-node/src/sync/unknownBlock.ts index ffa01206b7e1..85102d645f24 100644 --- a/packages/beacon-node/src/sync/unknownBlock.ts +++ b/packages/beacon-node/src/sync/unknownBlock.ts @@ -20,7 +20,6 @@ import { PendingBlockInput, PendingBlockInputStatus, PendingBlockType, - PendingRootHex, getBlockInputSyncCacheItemRootHex, getBlockInputSyncCacheItemSlot, isPendingBlockInput, @@ -178,7 +177,7 @@ export class BlockInputSync { }; private addByRootHex = (rootHex: RootHex, peerIdStr?: PeerIdStr): void => { - let pendingBlock = this.pendingBlocks.get(rootHex) as PendingRootHex; + let pendingBlock = this.pendingBlocks.get(rootHex); if (!pendingBlock) { pendingBlock = { status: PendingBlockInputStatus.pending, @@ -207,7 +206,7 @@ export class BlockInputSync { }; private addByBlockInput = (blockInput: IBlockInput, peerIdStr?: string): void => { - let pendingBlock = this.pendingBlocks.get(blockInput.blockRootHex) as PendingBlockInput; + let pendingBlock = this.pendingBlocks.get(blockInput.blockRootHex); // if entry is missing or was added via rootHex and now we have more complete information overwrite // the existing information with the more complete cache entry if (!pendingBlock || !isPendingBlockInput(pendingBlock)) { diff --git a/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts b/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts index dadb05861205..04c4d1346a3c 100644 --- a/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts +++ b/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts @@ -1,6 +1,5 @@ import {RootHex} from "@lodestar/types"; import {MapDef} from "@lodestar/utils"; -// import {DownloadedBlock, PendingBlock, PendingBlockStatus, UnknownBlock} from "../interface.js"; import { BlockInputSyncCacheItem, PendingBlockInput, From b982584b0ead5b48d57b74ddc76449a9c6137144 Mon Sep 17 00:00:00 2001 From: Cayman Date: Tue, 16 Sep 2025 08:01:16 -0400 Subject: [PATCH 168/173] feat: allow block import after NUMBER_OF_COLUMNS / 2 --- .../src/chain/ColumnReconstructionTracker.ts | 18 +++++++----------- .../src/chain/blocks/blockInput/blockInput.ts | 10 ++++++++-- .../src/network/processor/gossipHandlers.ts | 6 +++++- 3 files changed, 20 insertions(+), 14 deletions(-) diff --git a/packages/beacon-node/src/chain/ColumnReconstructionTracker.ts b/packages/beacon-node/src/chain/ColumnReconstructionTracker.ts index 88e7d3508514..738977cdba4b 100644 --- a/packages/beacon-node/src/chain/ColumnReconstructionTracker.ts +++ b/packages/beacon-node/src/chain/ColumnReconstructionTracker.ts @@ -6,14 +6,12 @@ import {BlockInputColumns} from "./blocks/blockInput/index.js"; import {recoverDataColumnSidecars} from "../util/dataColumns.js"; /** - * Minimum time to wait before attempting reconstruction + * Maximum added delay before attempting reconstruction + * + * From the spec: + * If delaying reconstruction, nodes may use a random delay in order to desynchronize reconstruction among nodes, thus reducing overall CPU load. */ -const RECONSTRUCTION_DELAY_MIN_MS = 800; - -/** - * Maximum time to wait before attempting reconstruction - */ -const RECONSTRUCTION_DELAY_MAX_MS = 1200; +const RECONSTRUCTION_RANDOM_DELAY_MAX_MS = 200; export type ColumnReconstructionTrackerInit = { logger: Logger; @@ -48,7 +46,7 @@ export class ColumnReconstructionTracker { this.config = init.config; } - triggerColumnReconstruction(blockInput: BlockInputColumns): void { + triggerColumnReconstruction(delay: number, blockInput: BlockInputColumns): void { if (this.running) { return; } @@ -61,9 +59,7 @@ export class ColumnReconstructionTracker { // just that it has been triggered for this block root. this.running = true; this.lastBlockRootHex = blockInput.blockRootHex; - const delay = - RECONSTRUCTION_DELAY_MIN_MS + Math.random() * (RECONSTRUCTION_DELAY_MAX_MS - RECONSTRUCTION_DELAY_MIN_MS); - sleep(delay).then(() => { + sleep(delay + Math.random() * RECONSTRUCTION_RANDOM_DELAY_MAX_MS).then(() => { recoverDataColumnSidecars(blockInput, this.emitter, this.metrics).finally(() => { this.running = false; }); diff --git a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts index 8f40b8a5d636..7654c0bc275c 100644 --- a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts +++ b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts @@ -1,4 +1,4 @@ -import {ForkName, ForkPreDeneb} from "@lodestar/params"; +import {ForkName, ForkPreDeneb, NUMBER_OF_COLUMNS} from "@lodestar/params"; import {BlobIndex, ColumnIndex, SignedBeaconBlock, Slot, deneb, fulu} from "@lodestar/types"; import {fromHex, prettyBytes, toRootHex, withTimeout} from "@lodestar/utils"; import {VersionedHashes} from "../../../execution/index.js"; @@ -767,7 +767,13 @@ export class BlockInputColumns extends AbstractBlockInput= NUMBER_OF_COLUMNS / 2; this.state = { ...this.state, diff --git a/packages/beacon-node/src/network/processor/gossipHandlers.ts b/packages/beacon-node/src/network/processor/gossipHandlers.ts index 92e1c9e57c2e..0c983fc6767b 100644 --- a/packages/beacon-node/src/network/processor/gossipHandlers.ts +++ b/packages/beacon-node/src/network/processor/gossipHandlers.ts @@ -563,7 +563,11 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand chain.getBlobsTracker.triggerGetBlobs(blockInput); // if we've received at least half of the columns, trigger reconstruction of the rest if (blockInput.columnCount >= NUMBER_OF_COLUMNS / 2) { - chain.columnReconstructionTracker.triggerColumnReconstruction(blockInput); + chain.columnReconstructionTracker.triggerColumnReconstruction( + // wait to reconstruct until after head vote + getCutoffTimeMs(chain, dataColumnSlot, 4000), + blockInput + ); } } }, From 42042c1e08a3b1b60bbb625510209c10ccce36fd Mon Sep 17 00:00:00 2001 From: Cayman Date: Tue, 16 Sep 2025 08:15:41 -0400 Subject: [PATCH 169/173] Update packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts index 7654c0bc275c..59e4e042996c 100644 --- a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts +++ b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts @@ -773,7 +773,7 @@ export class BlockInputColumns extends AbstractBlockInput= NUMBER_OF_COLUMNS / 2; + this.columnsCache.size >= NUMBER_OF_COLUMNS / 2; this.state = { ...this.state, From a1af740a40de21450bce374aa6e08be4ad228b6e Mon Sep 17 00:00:00 2001 From: Cayman Date: Tue, 16 Sep 2025 08:17:18 -0400 Subject: [PATCH 170/173] chore: use params to configure delay --- packages/beacon-node/src/network/processor/gossipHandlers.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/beacon-node/src/network/processor/gossipHandlers.ts b/packages/beacon-node/src/network/processor/gossipHandlers.ts index 0c983fc6767b..c194be145fc3 100644 --- a/packages/beacon-node/src/network/processor/gossipHandlers.ts +++ b/packages/beacon-node/src/network/processor/gossipHandlers.ts @@ -5,6 +5,7 @@ import { ForkPostElectra, ForkPreElectra, ForkSeq, + INTERVALS_PER_SLOT, isForkPostElectra, NUMBER_OF_COLUMNS, } from "@lodestar/params"; @@ -565,7 +566,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand if (blockInput.columnCount >= NUMBER_OF_COLUMNS / 2) { chain.columnReconstructionTracker.triggerColumnReconstruction( // wait to reconstruct until after head vote - getCutoffTimeMs(chain, dataColumnSlot, 4000), + getCutoffTimeMs(chain, dataColumnSlot, config.SECONDS_PER_SLOT / INTERVALS_PER_SLOT), blockInput ); } From 7ffe6b0c1c3a0d91b03743631abe90c24f4665aa Mon Sep 17 00:00:00 2001 From: Cayman Date: Tue, 16 Sep 2025 09:09:48 -0400 Subject: [PATCH 171/173] Update packages/beacon-node/src/network/processor/gossipHandlers.ts Co-authored-by: Nico Flaig --- packages/beacon-node/src/network/processor/gossipHandlers.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/beacon-node/src/network/processor/gossipHandlers.ts b/packages/beacon-node/src/network/processor/gossipHandlers.ts index c194be145fc3..1a632bdb5f54 100644 --- a/packages/beacon-node/src/network/processor/gossipHandlers.ts +++ b/packages/beacon-node/src/network/processor/gossipHandlers.ts @@ -566,7 +566,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand if (blockInput.columnCount >= NUMBER_OF_COLUMNS / 2) { chain.columnReconstructionTracker.triggerColumnReconstruction( // wait to reconstruct until after head vote - getCutoffTimeMs(chain, dataColumnSlot, config.SECONDS_PER_SLOT / INTERVALS_PER_SLOT), + getCutoffTimeMs(chain, dataColumnSlot, (config.SECONDS_PER_SLOT / INTERVALS_PER_SLOT) * 1000), blockInput ); } From 22ee657909f6f4926cf91d76aab214a7fe89d246 Mon Sep 17 00:00:00 2001 From: Cayman Date: Tue, 16 Sep 2025 16:32:09 -0400 Subject: [PATCH 172/173] chore: add additional promise to BlockInputColumns --- .../src/chain/blocks/blockInput/blockInput.ts | 37 +++++++++++++++++++ .../src/chain/blocks/writeBlockInputToDb.ts | 8 ++++ 2 files changed, 45 insertions(+) diff --git a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts index 59e4e042996c..aa66e9de2573 100644 --- a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts +++ b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts @@ -557,6 +557,7 @@ type BlockInputColumnsState = | { hasBlock: true; hasAllData: true; + hasComputedAllData: boolean; versionedHashes: VersionedHashes; block: SignedBeaconBlock; source: SourceMeta; @@ -565,6 +566,7 @@ type BlockInputColumnsState = | { hasBlock: true; hasAllData: false; + hasComputedAllData: false; versionedHashes: VersionedHashes; block: SignedBeaconBlock; source: SourceMeta; @@ -572,11 +574,13 @@ type BlockInputColumnsState = | { hasBlock: false; hasAllData: true; + hasComputedAllData: boolean; versionedHashes: VersionedHashes; } | { hasBlock: false; hasAllData: false; + hasComputedAllData: false; versionedHashes: VersionedHashes; }; /** @@ -594,6 +598,12 @@ export class BlockInputColumns extends AbstractBlockInput(); private readonly sampledColumns: ColumnIndex[]; private readonly custodyColumns: ColumnIndex[]; + /** + * This promise resolves when all sampled columns are available + * + * This is different from `dataPromise` which resolves when all data is available or could become available (e.g. through reconstruction) + */ + protected computedDataPromise = createPromise(); private constructor( init: BlockInputInit, @@ -622,6 +632,7 @@ export class BlockInputColumns extends AbstractBlockInput= NUMBER_OF_COLUMNS / 2; + const hasComputedAllData = + // already hasAllData + this.state.hasAllData || + // has all sampled columns + sampledColumns.length === this.sampledColumns.length; + this.state = { ...this.state, hasAllData: hasAllData || this.state.hasAllData, + hasComputedAllData: hasComputedAllData, timeCompleteSec: hasAllData ? seenTimestampSec : undefined, } as BlockInputColumnsState; if (hasAllData && sampledColumns !== null) { this.dataPromise.resolve(sampledColumns); } + + if (hasComputedAllData && sampledColumns !== null) { + this.computedDataPromise.resolve(sampledColumns); + } } hasColumn(columnIndex: number): boolean { @@ -854,4 +880,15 @@ export class BlockInputColumns extends AbstractBlockInput { + if (!this.state.hasComputedAllData) { + return withTimeout(() => this.computedDataPromise.promise, timeout, signal); + } + return Promise.resolve(this.getSampledColumns()); + } } diff --git a/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts b/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts index 44396e562b85..2251a7d617d4 100644 --- a/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts +++ b/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts @@ -44,6 +44,14 @@ export async function writeBlockInputToDb(this: BeaconChain, blocksInputs: IBloc // NOTE: Old data is pruned on archive if (isBlockInputColumns(blockInput)) { + if (!blockInput.hasComputedAllData()) { + // Supernodes may only have a subset of the data columns by the time the block begins to be imported + // because full data availability can be assumed after NUMBER_OF_COLUMNS / 2 columns are available. + // Here, however, all data columns must be fully available/reconstructed before persisting to the DB. + this.columnReconstructionTracker.triggerColumnReconstruction(0, blockInput); + await blockInput.waitForComputedAllData(BLOB_AVAILABILITY_TIMEOUT); + } + const {custodyColumns} = this.custodyConfig; const blobsLen = (block.message as fulu.BeaconBlock).body.blobKzgCommitments.length; let dataColumnsLen: number; From f7f36e6d1e70350858f87f7539b1ce4a2bb1fbb0 Mon Sep 17 00:00:00 2001 From: Cayman Date: Tue, 16 Sep 2025 17:15:32 -0400 Subject: [PATCH 173/173] chore: no eager block import --- .../src/chain/blocks/importBlock.ts | 6 +++++- .../src/chain/blocks/writeBlockInputToDb.ts | 6 +++++- .../src/network/processor/gossipHandlers.ts | 19 ++++--------------- packages/beacon-node/src/sync/unknownBlock.ts | 3 ++- packages/beacon-node/src/util/clock.ts | 13 ++++++++++++- 5 files changed, 28 insertions(+), 19 deletions(-) diff --git a/packages/beacon-node/src/chain/blocks/importBlock.ts b/packages/beacon-node/src/chain/blocks/importBlock.ts index 934c215d6ca9..0edd165ad427 100644 --- a/packages/beacon-node/src/chain/blocks/importBlock.ts +++ b/packages/beacon-node/src/chain/blocks/importBlock.ts @@ -95,8 +95,9 @@ export async function importBlock( // 1. Persist block to hot DB (pre-emptively) // If eagerPersistBlock = true we do that in verifyBlocksInEpoch to batch all I/O operations to save block time to head + let writeBlockInputPromise: Promise | undefined; if (!opts.eagerPersistBlock) { - await writeBlockInputToDb.call(this, [blockInput]); + writeBlockInputPromise = writeBlockInputToDb.call(this, [blockInput]); } // 2. Import block to fork choice @@ -512,6 +513,9 @@ export async function importBlock( ); } + // Await writeBlockInputToDb if it was started above + await writeBlockInputPromise; + if (isBlockInputColumns(blockInput)) { for (const {source} of blockInput.getSampledColumnsWithSource()) { this.metrics?.importBlock.columnsBySource.inc({source}); diff --git a/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts b/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts index 2251a7d617d4..8b2040b8e577 100644 --- a/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts +++ b/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts @@ -3,6 +3,8 @@ import {prettyPrintIndices, toRootHex} from "@lodestar/utils"; import {BeaconChain} from "../chain.js"; import {IBlockInput, isBlockInputBlobs, isBlockInputColumns} from "./blockInput/index.js"; import {BLOB_AVAILABILITY_TIMEOUT} from "./verifyBlocksDataAvailability.js"; +import {getCutoffTimeMs} from "../../util/clock.js"; +import {INTERVALS_PER_SLOT} from "@lodestar/params"; /** * Persists block input data to DB. This operation must be eventually completed if a block is imported to the fork-choice. @@ -48,7 +50,9 @@ export async function writeBlockInputToDb(this: BeaconChain, blocksInputs: IBloc // Supernodes may only have a subset of the data columns by the time the block begins to be imported // because full data availability can be assumed after NUMBER_OF_COLUMNS / 2 columns are available. // Here, however, all data columns must be fully available/reconstructed before persisting to the DB. - this.columnReconstructionTracker.triggerColumnReconstruction(0, blockInput); + // Wait for normal gossip to received any missing columns and attempt reconstruction after this delay. + const delay = getCutoffTimeMs(this, slot, (this.config.SECONDS_PER_SLOT / INTERVALS_PER_SLOT) * 1000); + this.columnReconstructionTracker.triggerColumnReconstruction(delay, blockInput); await blockInput.waitForComputedAllData(BLOB_AVAILABILITY_TIMEOUT); } diff --git a/packages/beacon-node/src/network/processor/gossipHandlers.ts b/packages/beacon-node/src/network/processor/gossipHandlers.ts index 1a632bdb5f54..05addcf776bb 100644 --- a/packages/beacon-node/src/network/processor/gossipHandlers.ts +++ b/packages/beacon-node/src/network/processor/gossipHandlers.ts @@ -1,5 +1,5 @@ import {routes} from "@lodestar/api"; -import {BeaconConfig, ChainForkConfig} from "@lodestar/config"; +import {BeaconConfig} from "@lodestar/config"; import { ForkName, ForkPostElectra, @@ -9,14 +9,12 @@ import { isForkPostElectra, NUMBER_OF_COLUMNS, } from "@lodestar/params"; -import {computeTimeAtSlot} from "@lodestar/state-transition"; import { Root, SignedBeaconBlock, SingleAttestation, Slot, SubnetID, - UintNum64, deneb, fulu, ssz, @@ -80,6 +78,7 @@ import {sszDeserialize} from "../gossip/topic.js"; import {INetwork} from "../interface.js"; import {PeerAction} from "../peers/index.js"; import {AggregatorTracker} from "./aggregatorTracker.js"; +import {getCutoffTimeMs} from "../../util/clock.js"; /** * Gossip handler options as part of network options @@ -415,7 +414,8 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand // to track block process steps seenTimestampSec, // gossip block is validated, we want to process it asap - eagerPersistBlock: true, + // however, due to other optimizations, we don't eagerly persist the block + eagerPersistBlock: false, isGossipBlock: true, }) .then(() => { @@ -924,14 +924,3 @@ export async function validateGossipFnRetryUnknownRoot( } } } - -function getCutoffTimeMs( - chain: {config: ChainForkConfig; genesisTime: UintNum64; logger: Logger}, - blockSlot: Slot, - cutoffMsFromSlotStart: number -): number { - return Math.max( - computeTimeAtSlot(chain.config, blockSlot, chain.genesisTime) * 1000 + cutoffMsFromSlotStart - Date.now(), - 0 - ); -} diff --git a/packages/beacon-node/src/sync/unknownBlock.ts b/packages/beacon-node/src/sync/unknownBlock.ts index 85102d645f24..c0d462398f10 100644 --- a/packages/beacon-node/src/sync/unknownBlock.ts +++ b/packages/beacon-node/src/sync/unknownBlock.ts @@ -414,7 +414,8 @@ export class BlockInputSync { ignoreIfFinalized: true, blsVerifyOnMainThread: true, // block is validated with correct root, we want to process it as soon as possible - eagerPersistBlock: true, + // however, due to other optimizations, we don't eagerly persist the block + eagerPersistBlock: false, }) ); diff --git a/packages/beacon-node/src/util/clock.ts b/packages/beacon-node/src/util/clock.ts index 36eb6f6f7f2c..8304db8b36e6 100644 --- a/packages/beacon-node/src/util/clock.ts +++ b/packages/beacon-node/src/util/clock.ts @@ -1,5 +1,5 @@ import EventEmitter from "node:events"; -import {ChainForkConfig} from "@lodestar/config"; +import {ChainConfig, ChainForkConfig} from "@lodestar/config"; import {computeEpochAtSlot, computeTimeAtSlot, getCurrentSlot} from "@lodestar/state-transition"; import type {Epoch, Slot} from "@lodestar/types"; import {ErrorAborted} from "@lodestar/utils"; @@ -202,3 +202,14 @@ export class Clock extends EventEmitter implements IClock { return milliSecondsPerSlot - (diffInMilliSeconds % milliSecondsPerSlot); } } + +export function getCutoffTimeMs( + chain: {config: ChainConfig; genesisTime: number}, + blockSlot: Slot, + cutoffMsFromSlotStart: number +): number { + return Math.max( + computeTimeAtSlot(chain.config, blockSlot, chain.genesisTime) * 1000 + cutoffMsFromSlotStart - Date.now(), + 0 + ); +}