diff --git a/packages/beacon-node/src/api/impl/beacon/blocks/index.ts b/packages/beacon-node/src/api/impl/beacon/blocks/index.ts index 3201fd1c2d57..a1045ac66e80 100644 --- a/packages/beacon-node/src/api/impl/beacon/blocks/index.ts +++ b/packages/beacon-node/src/api/impl/beacon/blocks/index.ts @@ -28,20 +28,11 @@ import { sszTypesFor, } from "@lodestar/types"; import {fromAsync, fromHex, sleep, toHex, toRootHex} from "@lodestar/utils"; -import { - BlobsSource, - BlockInput, - BlockInputAvailableData, - BlockInputBlobs, - BlockInputDataColumns, - BlockInputType, - BlockSource, - DataColumnsSource, - ImportBlockOpts, - getBlockInput, -} from "../../../../chain/blocks/types.js"; +import {BlockInputSource, isBlockInputBlobs, isBlockInputColumns} from "../../../../chain/blocks/blockInput/index.js"; +import {ImportBlockOpts} from "../../../../chain/blocks/types.js"; import {verifyBlocksInEpoch} from "../../../../chain/blocks/verifyBlock.js"; import {BeaconChain} from "../../../../chain/chain.js"; +import {ChainEvent} from "../../../../chain/emitter.js"; import {BlockError, BlockErrorCode, BlockGossipError} from "../../../../chain/errors/index.js"; import { BlockType, @@ -51,7 +42,6 @@ import { } from "../../../../chain/produceBlock/index.js"; import {validateGossipBlock} from "../../../../chain/validation/block.js"; import {OpSource} from "../../../../chain/validatorMonitor.js"; -import {NetworkEvent} from "../../../../network/index.js"; import {getBlobSidecars, kzgCommitmentToVersionedHash, reconstructBlobs} from "../../../../util/blobs.js"; import {getDataColumnSidecarsFromBlock} from "../../../../util/dataColumns.js"; import {isOptimisticBlock} from "../../../../util/forkChoice.js"; @@ -95,10 +85,15 @@ export function getBeaconBlockApi({ const fork = config.getForkName(slot); const blockRoot = toRootHex(chain.config.getForkTypes(slot).BeaconBlock.hashTreeRoot(signedBlock.message)); - let blockForImport: BlockInput, blobSidecars: deneb.BlobSidecars, dataColumnSidecars: fulu.DataColumnSidecars; + const blockForImport = chain.seenBlockInputCache.getByBlock({ + block: signedBlock, + source: BlockInputSource.api, + seenTimestampSec, + blockRootHex: blockRoot, + }); + let blobSidecars: deneb.BlobSidecars, dataColumnSidecars: fulu.DataColumnSidecars; if (isDenebBlockContents(signedBlockContents)) { - let blockData: BlockInputAvailableData; if (isForkPostFulu(fork)) { const timer = metrics?.peerDas.dataColumnSidecarComputationTime.startTimer(); // If the block was produced by this node, we will already have computed cells @@ -116,30 +111,36 @@ export function getBeaconBlockApi({ cellsAndProofs ); timer?.(); - blockData = { - fork, - dataColumns: dataColumnSidecars, - dataColumnsBytes: dataColumnSidecars.map(() => null), - dataColumnsSource: DataColumnsSource.api, - } as BlockInputDataColumns; blobSidecars = []; } else if (isForkPostDeneb(fork)) { blobSidecars = getBlobSidecars(config, signedBlock, signedBlockContents.blobs, signedBlockContents.kzgProofs); - blockData = { - fork, - blobs: blobSidecars, - blobsSource: BlobsSource.api, - } as BlockInputBlobs; dataColumnSidecars = []; } else { throw Error(`Invalid data fork=${fork} for publish`); } - - blockForImport = getBlockInput.availableData(config, signedBlock, BlockSource.api, blockData); } else { blobSidecars = []; dataColumnSidecars = []; - blockForImport = getBlockInput.preData(config, signedBlock, BlockSource.api); + } + + if (isBlockInputColumns(blockForImport)) { + for (const dataColumnSidecar of dataColumnSidecars) { + blockForImport.addColumn({ + blockRootHex: blockRoot, + columnSidecar: dataColumnSidecar, + source: BlockInputSource.api, + seenTimestampSec, + }); + } + } else if (isBlockInputBlobs(blockForImport)) { + for (const blobSidecar of blobSidecars) { + blockForImport.addBlob({ + blockRootHex: blockRoot, + blobSidecar, + source: BlockInputSource.api, + seenTimestampSec, + }); + } } // check what validations have been requested before broadcasting and publishing the block @@ -184,9 +185,10 @@ export function getBeaconBlockApi({ if (!blockLocallyProduced) { const parentBlock = chain.forkChoice.getBlock(signedBlock.message.parentRoot); if (parentBlock === null) { - network.events.emit(NetworkEvent.unknownBlockParent, { + chain.emitter.emit(ChainEvent.unknownParent, { blockInput: blockForImport, peer: IDENTITY_PEER_ID, + source: BlockInputSource.api, }); chain.persistInvalidSszValue( chain.config.getForkTypes(slot).SignedBeaconBlock, @@ -247,18 +249,16 @@ export function getBeaconBlockApi({ // Simple implementation of a pending block queue. Keeping the block here recycles the API logic, and keeps the // REST request promise without any extra infrastructure. - const msToBlockSlot = - computeTimeAtSlot(config, blockForImport.block.message.slot, chain.genesisTime) * 1000 - Date.now(); + const msToBlockSlot = computeTimeAtSlot(config, slot, chain.genesisTime) * 1000 - Date.now(); if (msToBlockSlot <= MAX_API_CLOCK_DISPARITY_MS && msToBlockSlot > 0) { // If block is a bit early, hold it in a promise. Equivalent to a pending queue. await sleep(msToBlockSlot); } // TODO: Validate block - const delaySec = - seenTimestampSec - (chain.genesisTime + blockForImport.block.message.slot * config.SECONDS_PER_SLOT); + const delaySec = seenTimestampSec - (chain.genesisTime + slot * config.SECONDS_PER_SLOT); metrics?.gossipBlock.elapsedTimeTillReceived.observe({source: OpSource.api}, delaySec); - chain.validatorMonitor?.registerBeaconBlock(OpSource.api, delaySec, blockForImport.block.message); + chain.validatorMonitor?.registerBeaconBlock(OpSource.api, delaySec, signedBlock.message); chain.logger.info("Publishing block", valLogMeta); const publishPromises = [ @@ -280,9 +280,10 @@ export function getBeaconBlockApi({ .processBlock(blockForImport, {...opts, eagerPersistBlock: false}) .catch((e) => { if (e instanceof BlockError && e.type.code === BlockErrorCode.PARENT_UNKNOWN) { - network.events.emit(NetworkEvent.unknownBlockParent, { + chain.emitter.emit(ChainEvent.unknownParent, { blockInput: blockForImport, peer: IDENTITY_PEER_ID, + source: BlockInputSource.api, }); } throw e; @@ -315,38 +316,33 @@ export function getBeaconBlockApi({ chain.emitter.emit(routes.events.EventType.blockGossip, {slot, block: blockRoot}); - if (blockForImport.type === BlockInputType.availableData) { - if (isForkPostFulu(blockForImport.blockData.fork)) { - const {dataColumns} = blockForImport.blockData as BlockInputDataColumns; - metrics?.dataColumns.bySource.inc({source: DataColumnsSource.api}, dataColumns.length); - - if (chain.emitter.listenerCount(routes.events.EventType.dataColumnSidecar)) { - for (const dataColumnSidecar of dataColumns) { - chain.emitter.emit(routes.events.EventType.dataColumnSidecar, { - blockRoot, - slot, - index: dataColumnSidecar.index, - kzgCommitments: dataColumnSidecar.kzgCommitments.map(toHex), - }); - } - } - } else if ( - isForkPostDeneb(blockForImport.blockData.fork) && - chain.emitter.listenerCount(routes.events.EventType.blobSidecar) - ) { - const {blobs} = blockForImport.blockData as BlockInputBlobs; - - for (const blobSidecar of blobs) { - const {index, kzgCommitment} = blobSidecar; - chain.emitter.emit(routes.events.EventType.blobSidecar, { + if (isBlockInputColumns(blockForImport)) { + const dataColumns = blockForImport.getAllColumns(); + metrics?.dataColumns.bySource.inc({source: BlockInputSource.api}, dataColumns.length); + + if (chain.emitter.listenerCount(routes.events.EventType.dataColumnSidecar)) { + for (const dataColumnSidecar of dataColumns) { + chain.emitter.emit(routes.events.EventType.dataColumnSidecar, { blockRoot, slot, - index, - kzgCommitment: toHex(kzgCommitment), - versionedHash: toHex(kzgCommitmentToVersionedHash(kzgCommitment)), + index: dataColumnSidecar.index, + kzgCommitments: dataColumnSidecar.kzgCommitments.map(toHex), }); } } + } else if (isBlockInputBlobs(blockForImport) && chain.emitter.listenerCount(routes.events.EventType.blobSidecar)) { + const blobSidecars = blockForImport.getBlobs(); + + for (const blobSidecar of blobSidecars) { + const {index, kzgCommitment} = blobSidecar; + chain.emitter.emit(routes.events.EventType.blobSidecar, { + blockRoot, + slot, + index, + kzgCommitment: toHex(kzgCommitment), + versionedHash: toHex(kzgCommitmentToVersionedHash(kzgCommitment)), + }); + } } }; diff --git a/packages/beacon-node/src/api/impl/lodestar/index.ts b/packages/beacon-node/src/api/impl/lodestar/index.ts index 8393a68e7347..95175fcdd20e 100644 --- a/packages/beacon-node/src/api/impl/lodestar/index.ts +++ b/packages/beacon-node/src/api/impl/lodestar/index.ts @@ -115,7 +115,7 @@ export function getLodestarApi({ data: (chain as BeaconChain)["blockProcessor"].jobQueue.getItems().map((item) => { const [blockInputs, opts] = item.args; return { - blockSlots: blockInputs.map((blockInput) => blockInput.block.message.slot), + blockSlots: blockInputs.map((blockInput) => blockInput.slot), jobOpts: opts, addedTimeMs: item.addedTimeMs, }; diff --git a/packages/beacon-node/src/api/impl/validator/index.ts b/packages/beacon-node/src/api/impl/validator/index.ts index bff64a95084d..d31bd1ed939f 100644 --- a/packages/beacon-node/src/api/impl/validator/index.ts +++ b/packages/beacon-node/src/api/impl/validator/index.ts @@ -57,6 +57,7 @@ import { toRootHex, } from "@lodestar/utils"; import {MAX_BUILDER_BOOST_FACTOR} from "@lodestar/validator"; +import {BlockInputSource} from "../../../chain/blocks/blockInput/types.js"; import { AttestationError, AttestationErrorCode, @@ -978,7 +979,7 @@ export function getValidatorApi( // see https://github.com/ChainSafe/lodestar/issues/5063 if (!chain.forkChoice.hasBlock(beaconBlockRoot)) { const rootHex = toRootHex(beaconBlockRoot); - network.searchUnknownSlotRoot({slot, root: rootHex}); + network.searchUnknownSlotRoot({slot, root: rootHex}, BlockInputSource.api); // if result of this call is false, i.e. block hasn't seen after 1 slot then the below notOnOptimisticBlockRoot call will throw error await chain.waitForBlock(slot, rootHex); } diff --git a/packages/beacon-node/src/chain/ColumnReconstructionTracker.ts b/packages/beacon-node/src/chain/ColumnReconstructionTracker.ts new file mode 100644 index 000000000000..738977cdba4b --- /dev/null +++ b/packages/beacon-node/src/chain/ColumnReconstructionTracker.ts @@ -0,0 +1,68 @@ +import {Logger, sleep} from "@lodestar/utils"; +import {ChainEventEmitter} from "./emitter.js"; +import {Metrics} from "../metrics/metrics.js"; +import {ChainForkConfig} from "@lodestar/config"; +import {BlockInputColumns} from "./blocks/blockInput/index.js"; +import {recoverDataColumnSidecars} from "../util/dataColumns.js"; + +/** + * Maximum added delay before attempting reconstruction + * + * From the spec: + * If delaying reconstruction, nodes may use a random delay in order to desynchronize reconstruction among nodes, thus reducing overall CPU load. + */ +const RECONSTRUCTION_RANDOM_DELAY_MAX_MS = 200; + +export type ColumnReconstructionTrackerInit = { + logger: Logger; + emitter: ChainEventEmitter; + metrics: Metrics | null; + config: ChainForkConfig; +}; + +/** + * Tracks column reconstruction attempts to avoid duplicate and multiple in-flight calls + */ +export class ColumnReconstructionTracker { + logger: Logger; + emitter: ChainEventEmitter; + metrics: Metrics | null; + config: ChainForkConfig; + + /** + * Track last attempted block root + * + * This is sufficient to avoid duplicate calls since we only call this + * function when we see a new data column sidecar from gossip. + */ + lastBlockRootHex: string | null = null; + /** Track if a reconstruction attempt is in-flight */ + running = false; + + constructor(init: ColumnReconstructionTrackerInit) { + this.logger = init.logger; + this.emitter = init.emitter; + this.metrics = init.metrics; + this.config = init.config; + } + + triggerColumnReconstruction(delay: number, blockInput: BlockInputColumns): void { + if (this.running) { + return; + } + + if (this.lastBlockRootHex === blockInput.blockRootHex) { + return; + } + + // We don't care about the outcome of this call, + // just that it has been triggered for this block root. + this.running = true; + this.lastBlockRootHex = blockInput.blockRootHex; + sleep(delay + Math.random() * RECONSTRUCTION_RANDOM_DELAY_MAX_MS).then(() => { + recoverDataColumnSidecars(blockInput, this.emitter, this.metrics).finally(() => { + this.running = false; + }); + }); + } +} diff --git a/packages/beacon-node/src/chain/GetBlobsTracker.ts b/packages/beacon-node/src/chain/GetBlobsTracker.ts new file mode 100644 index 000000000000..db3b7454c191 --- /dev/null +++ b/packages/beacon-node/src/chain/GetBlobsTracker.ts @@ -0,0 +1,75 @@ +import {Logger} from "@lodestar/utils"; +import {IExecutionEngine} from "../execution/index.js"; +import {ChainEventEmitter} from "./emitter.js"; +import {Metrics} from "../metrics/metrics.js"; +import {ChainForkConfig} from "@lodestar/config"; +import {IBlockInput} from "./blocks/blockInput/index.js"; +import {getDataColumnSidecarsFromExecution} from "../util/execution.js"; +import {callInNextEventLoop} from "../util/eventLoop.js"; + +export type GetBlobsTrackerInit = { + logger: Logger; + executionEngine: IExecutionEngine; + emitter: ChainEventEmitter; + metrics: Metrics | null; + config: ChainForkConfig; +}; + +/** + * Tracks getBlobsV2 calls to the execution engine to avoid duplicate and multiple in-flight calls + */ +export class GetBlobsTracker { + logger: Logger; + executionEngine: IExecutionEngine; + emitter: ChainEventEmitter; + metrics: Metrics | null; + config: ChainForkConfig; + + /** + * Track last attempted block root + * + * This is sufficient to avoid duplicate calls since we only call this + * function when we see a new block or data column sidecar from gossip. + */ + lastBlockRootHex: string | null = null; + /** Track if a getBlobsV2 call is in-flight */ + running = false; + // Preallocate buffers for getBlobsV2 RPC calls + // See https://github.com/ChainSafe/lodestar/pull/8282 for context + blobAndProofBuffers: Uint8Array[] = []; + + constructor(init: GetBlobsTrackerInit) { + this.logger = init.logger; + this.executionEngine = init.executionEngine; + this.emitter = init.emitter; + this.metrics = init.metrics; + this.config = init.config; + } + + triggerGetBlobs(blockInput: IBlockInput): void { + if (this.running) { + return; + } + + if (this.lastBlockRootHex === blockInput.blockRootHex) { + return; + } + + // We don't care about the outcome of this call, + // just that it has been triggered for this block root. + this.running = true; + this.lastBlockRootHex = blockInput.blockRootHex; + callInNextEventLoop(() => { + getDataColumnSidecarsFromExecution( + this.config, + this.executionEngine, + this.emitter, + blockInput, + this.metrics, + this.blobAndProofBuffers + ).finally(() => { + this.running = false; + }); + }); + } +} diff --git a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts index 907a0939e6ee..aa66e9de2573 100644 --- a/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts +++ b/packages/beacon-node/src/chain/blocks/blockInput/blockInput.ts @@ -1,4 +1,4 @@ -import {ForkName, ForkPreDeneb} from "@lodestar/params"; +import {ForkName, ForkPreDeneb, NUMBER_OF_COLUMNS} from "@lodestar/params"; import {BlobIndex, ColumnIndex, SignedBeaconBlock, Slot, deneb, fulu} from "@lodestar/types"; import {fromHex, prettyBytes, toRootHex, withTimeout} from "@lodestar/utils"; import {VersionedHashes} from "../../../execution/index.js"; @@ -11,7 +11,6 @@ import { BlobMeta, BlobWithSource, BlockInputInit, - ColumnMeta, ColumnWithSource, CreateBlockInputMeta, DAData, @@ -20,6 +19,7 @@ import { LogMetaBasic, LogMetaBlobs, LogMetaColumns, + MissingColumnMeta, PromiseParts, SourceMeta, } from "./types.js"; @@ -142,8 +142,8 @@ abstract class AbstractBlockInput { private constructor(init: BlockInputInit, state: BlockInputPreDataState) { super(init); this.state = state; + this.dataPromise.resolve(null); + this.blockPromise.resolve(state.block); } static createFromBlock(props: AddBlock & CreateBlockInputMeta): BlockInputPreData { const init: BlockInputInit = { daOutOfRange: props.daOutOfRange, - timeCreated: props.source.seenTimestampSec, + timeCreated: props.seenTimestampSec, forkName: props.forkName, slot: props.block.message.slot, blockRootHex: props.blockRootHex, @@ -216,20 +218,26 @@ export class BlockInputPreData extends AbstractBlockInput { hasBlock: true, hasAllData: true, block: props.block, - source: props.source, - timeCompleteSec: props.source.seenTimestampSec, + source: { + source: props.source, + seenTimestampSec: props.seenTimestampSec, + peerIdStr: props.peerIdStr, + }, + timeCompleteSec: props.seenTimestampSec, }; return new BlockInputPreData(init, state); } - addBlock(_: AddBlock): void { - throw new BlockInputError( - { - code: BlockInputErrorCode.INVALID_CONSTRUCTION, - blockRoot: this.blockRootHex, - }, - "Cannot addBlock to BlockInputPreData" - ); + addBlock(_: AddBlock, opts = {throwOnDuplicateAdd: true}): void { + if (opts.throwOnDuplicateAdd) { + throw new BlockInputError( + { + code: BlockInputErrorCode.INVALID_CONSTRUCTION, + blockRoot: this.blockRootHex, + }, + "Cannot addBlock to BlockInputPreData" + ); + } } } @@ -283,12 +291,16 @@ export class BlockInputBlobs extends AbstractBlockInput): void { - if (this.state.hasBlock) { - throw new BlockInputError( - { - code: BlockInputErrorCode.INVALID_CONSTRUCTION, - blockRoot: this.blockRootHex, - }, - "Cannot addBlock to BlockInputBlobs after it already has a block" - ); - } - + addBlock( + {blockRootHex, block, source, seenTimestampSec, peerIdStr}: AddBlock, + opts = {throwOnDuplicateAdd: true} + ): void { // this check suffices for checking slot, parentRoot, and forkName if (blockRootHex !== this.blockRootHex) { throw new BlockInputError( @@ -353,13 +358,27 @@ export class BlockInputBlobs extends AbstractBlockInput; source: SourceMeta; @@ -543,6 +566,7 @@ type BlockInputColumnsState = | { hasBlock: true; hasAllData: false; + hasComputedAllData: false; versionedHashes: VersionedHashes; block: SignedBeaconBlock; source: SourceMeta; @@ -550,11 +574,13 @@ type BlockInputColumnsState = | { hasBlock: false; hasAllData: true; + hasComputedAllData: boolean; versionedHashes: VersionedHashes; } | { hasBlock: false; hasAllData: false; + hasComputedAllData: false; versionedHashes: VersionedHashes; }; /** @@ -572,6 +598,12 @@ export class BlockInputColumns extends AbstractBlockInput(); private readonly sampledColumns: ColumnIndex[]; private readonly custodyColumns: ColumnIndex[]; + /** + * This promise resolves when all sampled columns are available + * + * This is different from `dataPromise` which resolves when all data is available or could become available (e.g. through reconstruction) + */ + protected computedDataPromise = createPromise(); private constructor( init: BlockInputInit, @@ -585,6 +617,10 @@ export class BlockInputColumns extends AbstractBlockInput & CreateBlockInputMeta & {sampledColumns: ColumnIndex[]; custodyColumns: ColumnIndex[]} @@ -596,15 +632,20 @@ export class BlockInputColumns extends AbstractBlockInput): void { - if (this.state.hasBlock) { - throw new BlockInputError( - { - code: BlockInputErrorCode.INVALID_CONSTRUCTION, - blockRoot: this.blockRootHex, - }, - "Cannot addBlock to BlockInputColumns after it already has a block" - ); - } - + addBlock(props: AddBlock, opts = {throwOnDuplicateAdd: true}): void { if (props.blockRootHex !== this.blockRootHex) { throw new BlockInputError( { code: BlockInputErrorCode.MISMATCHED_ROOT_HEX, blockInputRoot: this.blockRootHex, mismatchedRoot: props.blockRootHex, - source: props.source.source, - peerId: `${props.source.peerIdStr}`, + source: props.source, + peerId: `${props.peerIdStr}`, }, "addBlock blockRootHex does not match BlockInput.blockRootHex" ); } - for (const {columnSidecar} of this.columnsCache.values()) { - if (!blockAndColumnArePaired(props.block, columnSidecar)) { - this.columnsCache.delete(columnSidecar.index); - // this.logger?.error(`Removing columnIndex=${columnSidecar.index} from BlockInput`, {}, err); - } + if (!opts.throwOnDuplicateAdd) { + return; + } + + if (this.state.hasBlock) { + throw new BlockInputError( + { + code: BlockInputErrorCode.INVALID_CONSTRUCTION, + blockRoot: this.blockRootHex, + }, + "Cannot addBlock to BlockInputColumns after it already has a block" + ); } const hasAllData = props.block.message.body.blobKzgCommitments.length === 0 || this.state.hasAllData; + const hasComputedAllData = + props.block.message.body.blobKzgCommitments.length === 0 || this.state.hasComputedAllData; this.state = { ...this.state, hasBlock: true, hasAllData, + hasComputedAllData, block: props.block, - source: props.source, - timeCompleteSec: hasAllData ? props.source.seenTimestampSec : undefined, + source: { + source: props.source, + seenTimestampSec: props.seenTimestampSec, + peerIdStr: props.peerIdStr, + }, + timeCompleteSec: hasAllData ? props.seenTimestampSec : undefined, } as BlockInputColumnsState; this.blockPromise.resolve(props.block); } - addColumn({blockRootHex, columnSidecar, source, seenTimestampSec, peerIdStr}: AddColumn): void { + addColumn( + {blockRootHex, columnSidecar, source, seenTimestampSec, peerIdStr}: AddColumn, + opts = {throwOnDuplicateAdd: true} + ): void { if (blockRootHex !== this.blockRootHex) { throw new BlockInputError( { @@ -715,24 +764,52 @@ export class BlockInputColumns extends AbstractBlockInput= NUMBER_OF_COLUMNS / 2; + + const hasComputedAllData = + // already hasAllData + this.state.hasAllData || + // has all sampled columns + sampledColumns.length === this.sampledColumns.length; this.state = { ...this.state, hasAllData: hasAllData || this.state.hasAllData, + hasComputedAllData: hasComputedAllData, timeCompleteSec: hasAllData ? seenTimestampSec : undefined, } as BlockInputColumnsState; if (hasAllData && sampledColumns !== null) { this.dataPromise.resolve(sampledColumns); } + + if (hasComputedAllData && sampledColumns !== null) { + this.computedDataPromise.resolve(sampledColumns); + } } hasColumn(columnIndex: number): boolean { @@ -754,6 +831,17 @@ export class BlockInputColumns extends AbstractBlockInput columnSidecar); } - getMissingSampledColumnMeta(): ColumnMeta[] { + getMissingSampledColumnMeta(): MissingColumnMeta { if (this.state.hasAllData) { - return []; + return { + missing: [], + versionedHashes: this.state.versionedHashes, + }; } - const needed: ColumnMeta[] = []; - const blockRoot = fromHex(this.blockRootHex); + const missing: number[] = []; for (const index of this.sampledColumns) { if (!this.columnsCache.has(index)) { - needed.push({index, blockRoot}); + missing.push(index); } } - return needed; + return { + missing, + versionedHashes: this.state.versionedHashes, + }; } -} -function blockAndColumnArePaired( - block: SignedBeaconBlock, - columnSidecar: fulu.DataColumnSidecar -): boolean { - return ( - block.message.body.blobKzgCommitments.length === columnSidecar.kzgCommitments.length && - block.message.body.blobKzgCommitments.every((commitment, index) => - Buffer.compare(commitment, columnSidecar.kzgCommitments[index]) - ) - ); -} + hasComputedAllData(): boolean { + return this.state.hasComputedAllData; + } -function assertBlockAndColumnArePaired( - blockRootHex: string, - block: SignedBeaconBlock, - columnSidecar: fulu.DataColumnSidecar -): void { - if (!blockAndColumnArePaired(block, columnSidecar)) { - throw new BlockInputError( - { - code: BlockInputErrorCode.MISMATCHED_KZG_COMMITMENT, - blockRoot: blockRootHex, - slot: block.message.slot, - sidecarIndex: columnSidecar.index, - }, - "DataColumnsSidecar kzgCommitment does not match block kzgCommitment" - ); + waitForComputedAllData(timeout: number, signal?: AbortSignal): Promise { + if (!this.state.hasComputedAllData) { + return withTimeout(() => this.computedDataPromise.promise, timeout, signal); + } + return Promise.resolve(this.getSampledColumns()); } } diff --git a/packages/beacon-node/src/chain/blocks/blockInput/types.ts b/packages/beacon-node/src/chain/blocks/blockInput/types.ts index caed7e6caf43..252457113321 100644 --- a/packages/beacon-node/src/chain/blocks/blockInput/types.ts +++ b/packages/beacon-node/src/chain/blocks/blockInput/types.ts @@ -1,5 +1,6 @@ import {ForkName} from "@lodestar/params"; -import {RootHex, SignedBeaconBlock, Slot, deneb, fulu} from "@lodestar/types"; +import {ColumnIndex, RootHex, SignedBeaconBlock, Slot, deneb, fulu} from "@lodestar/types"; +import {VersionedHashes} from "../../../execution/index.js"; export enum DAType { PreData = "pre-data", @@ -19,6 +20,7 @@ export enum BlockInputSource { engine = "engine", byRange = "req_resp_by_range", byRoot = "req_resp_by_root", + recovery = "recovery", } export type PromiseParts = { @@ -49,6 +51,8 @@ export type SourceMeta = { peerIdStr?: string; }; +export type BlockWithSource = SourceMeta & {block: SignedBeaconBlock; blockRootHex: RootHex}; + export type BlobWithSource = SourceMeta & {blobSidecar: deneb.BlobSidecar}; export type ColumnWithSource = SourceMeta & {columnSidecar: fulu.DataColumnSidecar}; @@ -71,10 +75,9 @@ export type BlockInputInit = BlockHeaderMeta & { timeCreated: number; }; -export type AddBlock = { +export type AddBlock = SourceMeta & { block: SignedBeaconBlock; blockRootHex: string; - source: SourceMeta; }; export type AddBlob = BlobWithSource & { @@ -85,11 +88,15 @@ export type AddColumn = ColumnWithSource & { blockRootHex: RootHex; }; -export type BlobMeta = ColumnMeta & {versionHash: Uint8Array}; - -export type ColumnMeta = { - blockRoot: Uint8Array; +export type BlobMeta = { index: number; + blockRoot: Uint8Array; + versionedHash: Uint8Array; +}; + +export type MissingColumnMeta = { + missing: ColumnIndex[]; + versionedHashes: VersionedHashes; }; /** @@ -111,7 +118,7 @@ export interface IBlockInput): void; + addBlock(props: AddBlock, opts?: {throwOnDuplicateAdd: boolean}): void; /** Whether the block has been seen and validated. If true, `getBlock` is guaranteed to not throw */ hasBlock(): boolean; getBlock(): SignedBeaconBlock; diff --git a/packages/beacon-node/src/chain/blocks/blockInput/utils.ts b/packages/beacon-node/src/chain/blocks/blockInput/utils.ts index 26a9b0dc0957..9e85b41c05e9 100644 --- a/packages/beacon-node/src/chain/blocks/blockInput/utils.ts +++ b/packages/beacon-node/src/chain/blocks/blockInput/utils.ts @@ -1,9 +1,7 @@ import {ChainForkConfig} from "@lodestar/config"; -import {ForkName, isForkPostDeneb} from "@lodestar/params"; +import {ForkName, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; import {computeEpochAtSlot} from "@lodestar/state-transition"; import {Epoch, Slot} from "@lodestar/types"; -import {BlobsSource, BlockSource as BlockSourceOld} from "../types.js"; -import {BlockInputSource as BlockSource} from "./types.js"; export function isDaOutOfRange( config: ChainForkConfig, @@ -11,34 +9,13 @@ export function isDaOutOfRange( blockSlot: Slot, currentEpoch: Epoch ): boolean { - if (!isForkPostDeneb(forkName)) { - return true; + if (isForkPostFulu(forkName)) { + return computeEpochAtSlot(blockSlot) < currentEpoch - config.MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS; } - return computeEpochAtSlot(blockSlot) < currentEpoch - config.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS; -} -export function convertNewToOldBlockSource(source: BlockSource): BlockSourceOld { - switch (source) { - case BlockSource.api: - return BlockSourceOld.api; - case BlockSource.byRoot: - return BlockSourceOld.byRoot; - case BlockSource.byRange: - return BlockSourceOld.byRange; - default: - return BlockSourceOld.gossip; + if (isForkPostDeneb(forkName)) { + return computeEpochAtSlot(blockSlot) < currentEpoch - config.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS; } -} -export function convertNewToOldBlobSource(source: BlockSource): BlobsSource { - switch (source) { - case BlockSource.api: - return BlobsSource.api; - case BlockSource.byRoot: - return BlobsSource.byRoot; - case BlockSource.byRange: - return BlobsSource.byRange; - default: - return BlobsSource.gossip; - } + return true; } diff --git a/packages/beacon-node/src/chain/blocks/importBlock.ts b/packages/beacon-node/src/chain/blocks/importBlock.ts index ca4d0a63b03b..0edd165ad427 100644 --- a/packages/beacon-node/src/chain/blocks/importBlock.ts +++ b/packages/beacon-node/src/chain/blocks/importBlock.ts @@ -8,7 +8,6 @@ import { NotReorgedReason, } from "@lodestar/fork-choice"; import { - ForkName, ForkPostAltair, ForkPostElectra, ForkSeq, @@ -37,7 +36,8 @@ import {ChainEvent, ReorgEventData} from "../emitter.js"; import {ForkchoiceCaller} from "../forkChoice/index.js"; import {REPROCESS_MIN_TIME_TO_NEXT_SLOT_SEC} from "../reprocess.js"; import {toCheckpointHex} from "../stateCache/index.js"; -import {AttestationImportOpt, BlockInputType, FullyVerifiedBlock, ImportBlockOpts} from "./types.js"; +import {isBlockInputBlobs, isBlockInputColumns} from "./blockInput/blockInput.js"; +import {AttestationImportOpt, FullyVerifiedBlock, ImportBlockOpts} from "./types.js"; import {getCheckpointFromState} from "./utils/checkpoint.js"; import {writeBlockInputToDb} from "./writeBlockInputToDb.js"; @@ -76,7 +76,8 @@ export async function importBlock( opts: ImportBlockOpts ): Promise { const {blockInput, postState, parentBlockSlot, executionStatus, dataAvailabilityStatus} = fullyVerifiedBlock; - const {block, source} = blockInput; + const block = blockInput.getBlock(); + const source = blockInput.getBlockSource(); const {slot: blockSlot} = block.message; const blockRoot = this.config.getForkTypes(blockSlot).BeaconBlock.hashTreeRoot(block.message); const blockRootHex = toRootHex(blockRoot); @@ -88,14 +89,15 @@ export async function importBlock( const fork = this.config.getForkSeq(blockSlot); // this is just a type assertion since blockinput with dataPromise type will not end up here - if (blockInput.type === BlockInputType.dataPromise) { + if (!blockInput.hasAllData) { throw Error("Unavailable block can not be imported in forkchoice"); } // 1. Persist block to hot DB (pre-emptively) // If eagerPersistBlock = true we do that in verifyBlocksInEpoch to batch all I/O operations to save block time to head + let writeBlockInputPromise: Promise | undefined; if (!opts.eagerPersistBlock) { - await writeBlockInputToDb.call(this, [blockInput]); + writeBlockInputPromise = writeBlockInputToDb.call(this, [blockInput]); } // 2. Import block to fork choice @@ -115,7 +117,7 @@ export async function importBlock( // Some block event handlers require state being in state cache so need to do this before emitting EventType.block this.regen.processState(blockRootHex, postState); - this.metrics?.importBlock.bySource.inc({source}); + this.metrics?.importBlock.bySource.inc({source: source.source}); this.logger.verbose("Added block to forkchoice and state cache", {slot: blockSlot, root: blockRootHex}); // 3. Import attestations to fork choice @@ -510,15 +512,18 @@ export async function importBlock( fullyVerifiedBlock.postState.epochCtx.currentSyncCommitteeIndexed.validatorIndices ); } - // dataPromise will not end up here, but preDeneb could. In future we might also allow syncing - // out of data range blocks and import then in forkchoice although one would not be able to - // attest and propose with such head similar to optimistic sync - if ( - blockInput.type === BlockInputType.availableData && - (blockInput.blockData.fork === ForkName.deneb || blockInput.blockData.fork === ForkName.electra) - ) { - const {blobsSource} = blockInput.blockData; - this.metrics?.importBlock.blobsBySource.inc({blobsSource}); + + // Await writeBlockInputToDb if it was started above + await writeBlockInputPromise; + + if (isBlockInputColumns(blockInput)) { + for (const {source} of blockInput.getSampledColumnsWithSource()) { + this.metrics?.importBlock.columnsBySource.inc({source}); + } + } else if (isBlockInputBlobs(blockInput)) { + for (const {source} of blockInput.getAllBlobsWithSource()) { + this.metrics?.importBlock.blobsBySource.inc({blobsSource: source}); + } } const advancedSlot = this.clock.slotWithFutureTolerance(REPROCESS_MIN_TIME_TO_NEXT_SLOT_SEC); diff --git a/packages/beacon-node/src/chain/blocks/index.ts b/packages/beacon-node/src/chain/blocks/index.ts index 0ed2cfbecb88..0a08dfaa8874 100644 --- a/packages/beacon-node/src/chain/blocks/index.ts +++ b/packages/beacon-node/src/chain/blocks/index.ts @@ -5,8 +5,9 @@ import {JobItemQueue, isQueueErrorAborted} from "../../util/queue/index.js"; import type {BeaconChain} from "../chain.js"; import {BlockError, BlockErrorCode, isBlockErrorAborted} from "../errors/index.js"; import {BlockProcessOpts} from "../options.js"; +import {IBlockInput} from "./blockInput/types.js"; import {importBlock} from "./importBlock.js"; -import {BlockInput, FullyVerifiedBlock, ImportBlockOpts} from "./types.js"; +import {FullyVerifiedBlock, ImportBlockOpts} from "./types.js"; import {assertLinearChainSegment} from "./utils/chainSegment.js"; import {verifyBlocksInEpoch} from "./verifyBlock.js"; import {verifyBlocksSanityChecks} from "./verifyBlocksSanityChecks.js"; @@ -19,10 +20,10 @@ const QUEUE_MAX_LENGTH = 256; * BlockProcessor processes block jobs in a queued fashion, one after the other. */ export class BlockProcessor { - readonly jobQueue: JobItemQueue<[BlockInput[], ImportBlockOpts], void>; + readonly jobQueue: JobItemQueue<[IBlockInput[], ImportBlockOpts], void>; constructor(chain: BeaconChain, metrics: Metrics | null, opts: BlockProcessOpts, signal: AbortSignal) { - this.jobQueue = new JobItemQueue<[BlockInput[], ImportBlockOpts], void>( + this.jobQueue = new JobItemQueue<[IBlockInput[], ImportBlockOpts], void>( (job, importOpts) => { return processBlocks.call(chain, job, {...opts, ...importOpts}); }, @@ -31,7 +32,7 @@ export class BlockProcessor { ); } - async processBlocksJob(job: BlockInput[], opts: ImportBlockOpts = {}): Promise { + async processBlocksJob(job: IBlockInput[], opts: ImportBlockOpts = {}): Promise { await this.jobQueue.push(job, opts); } } @@ -48,7 +49,7 @@ export class BlockProcessor { */ export async function processBlocks( this: BeaconChain, - blocks: BlockInput[], + blocks: IBlockInput[], opts: BlockProcessOpts & ImportBlockOpts ): Promise { if (blocks.length === 0) { @@ -70,7 +71,7 @@ export async function processBlocks( // Fully verify a block to be imported immediately after. Does not produce any side-effects besides adding intermediate // states in the state cache through regen. - const {postStates, dataAvailabilityStatuses, proposerBalanceDeltas, segmentExecStatus, availableBlockInputs} = + const {postStates, dataAvailabilityStatuses, proposerBalanceDeltas, segmentExecStatus} = await verifyBlocksInEpoch.call(this, parentBlock, relevantBlocks, opts); // If segmentExecStatus has lvhForkchoice then, the entire segment should be invalid @@ -83,7 +84,7 @@ export async function processBlocks( } const {executionStatuses} = segmentExecStatus; - const fullyVerifiedBlocks = availableBlockInputs.map( + const fullyVerifiedBlocks = relevantBlocks.map( (block, i): FullyVerifiedBlock => ({ blockInput: block, postState: postStates[i], @@ -108,7 +109,7 @@ export async function processBlocks( } // above functions should only throw BlockError - const err = getBlockError(e, blocks[0].block); + const err = getBlockError(e, blocks[0].getBlock()); // TODO: De-duplicate with logic above // ChainEvent.errorBlock @@ -152,7 +153,7 @@ export async function processBlocks( await removeEagerlyPersistedBlockInputs.call(this, blocks).catch((e) => { this.logger.warn( "Error pruning eagerly imported block inputs, DB may grow in size if this error happens frequently", - {slot: blocks.map((block) => block.block.message.slot).join(",")}, + {slot: blocks.map((block) => block.getBlock().message.slot).join(",")}, e ); }); diff --git a/packages/beacon-node/src/chain/blocks/types.ts b/packages/beacon-node/src/chain/blocks/types.ts index 87131429dbda..ff21450f9b40 100644 --- a/packages/beacon-node/src/chain/blocks/types.ts +++ b/packages/beacon-node/src/chain/blocks/types.ts @@ -1,25 +1,9 @@ import type {ChainForkConfig} from "@lodestar/config"; import {MaybeValidExecutionStatus} from "@lodestar/fork-choice"; -import {type ForkPostDeneb, ForkPostFulu, ForkPreFulu, ForkSeq} from "@lodestar/params"; +import {ForkSeq} from "@lodestar/params"; import {CachedBeaconStateAllForks, DataAvailabilityStatus, computeEpochAtSlot} from "@lodestar/state-transition"; -import type {ColumnIndex, RootHex, SignedBeaconBlock, Slot, deneb, fulu} from "@lodestar/types"; - -export enum BlockInputType { - // preData is preDeneb - preData = "preData", - // data is out of available window, can be used to sync forward and keep adding to forkchoice - outOfRangeData = "outOfRangeData", - availableData = "availableData", - dataPromise = "dataPromise", -} - -/** Enum to represent where blocks come from */ -export enum BlockSource { - gossip = "gossip", - api = "api", - byRange = "req_resp_by_range", - byRoot = "req_resp_by_root", -} +import type {Slot, fulu} from "@lodestar/types"; +import {IBlockInput} from "./blockInput/types.js"; export enum GossipedInputType { block = "block", @@ -27,103 +11,11 @@ export enum GossipedInputType { dataColumn = "data_column", } -interface CachedDataItem { - cacheId: number; -} -type Availability = { - availabilityPromise: Promise; - resolveAvailability: (data: T) => void; -}; - -/** - * - * Deneb Blob Format Types - * - */ -/** Enum to represent where blobs come from */ -export enum BlobsSource { - gossip = "gossip", - api = "api", - byRange = "req_resp_by_range", - byRoot = "req_resp_by_root", -} -type ForkBlobsInfo = { - fork: ForkPostDeneb & ForkPreFulu; -}; -export type BlockInputBlobs = ForkBlobsInfo & { - blobs: deneb.BlobSidecars; - blobsSource: BlobsSource; -}; -export type BlobsCacheMap = Map; -export type CachedBlobs = CachedDataItem & - ForkBlobsInfo & - Availability & { - blobsCache: BlobsCacheMap; - }; - -/** - * - * PeerDAS Column Format Types - * - */ - -export enum DataColumnsSource { - gossip = "gossip", - api = "api", - engine = "engine", - byRange = "req_resp_by_range", - byRoot = "req_resp_by_root", -} -type ForkDataColumnsInfo = { - fork: ForkPostFulu; -}; type DataColumnData = { dataColumn: fulu.DataColumnSidecar; dataColumnBytes: Uint8Array | null; }; export type DataColumnsCacheMap = Map; -export type BlockInputDataColumns = ForkDataColumnsInfo & { - // marker of that columns are to be custodied - dataColumns: fulu.DataColumnSidecars; - dataColumnsBytes: (Uint8Array | null)[]; - dataColumnsSource: DataColumnsSource; -}; -export type CachedDataColumns = CachedDataItem & - ForkDataColumnsInfo & - Availability & { - dataColumnsCache: DataColumnsCacheMap; - calledRecover: boolean; - }; - -/** - * - * Cross-Fork Data Types - * - */ - -export type BlockInputAvailableData = BlockInputBlobs | BlockInputDataColumns; -export type CachedData = CachedBlobs | CachedDataColumns; - -export type BlockInput = { - block: SignedBeaconBlock; - source: BlockSource; -} & ( - | {type: BlockInputType.preData | BlockInputType.outOfRangeData} - | ({type: BlockInputType.availableData} & { - blockData: BlockInputAvailableData; - }) - // the blobsSource here is added to BlockInputBlobs when availability is resolved - | ({type: BlockInputType.dataPromise} & { - cachedData: CachedData; - }) -); -export type NullBlockInput = { - block: null; - blockRootHex: RootHex; - blockInputPromise: Promise; -} & { - cachedData: CachedData; -}; export function blockRequiresBlobs(config: ChainForkConfig, blockSlot: Slot, clockSlot: Slot): boolean { return ( @@ -133,103 +25,6 @@ export function blockRequiresBlobs(config: ChainForkConfig, blockSlot: Slot, clo ); } -export const getBlockInput = { - preData(config: ChainForkConfig, block: SignedBeaconBlock, source: BlockSource): BlockInput { - if (config.getForkSeq(block.message.slot) >= ForkSeq.deneb) { - throw Error(`Post Deneb block slot ${block.message.slot}`); - } - return { - type: BlockInputType.preData, - block, - source, - }; - }, - - // This isn't used right now but we might enable importing blobs into forkchoice from a point - // where data is not guaranteed to be available to hopefully reach a point where we have - // available data. Hence the validator duties can't be performed on outOfRangeData - // - // This can help with some of the requests of syncing without data for some use cases for e.g. - // building states or where importing data isn't important if valid child exists like ILs - outOfRangeData(config: ChainForkConfig, block: SignedBeaconBlock, source: BlockSource): BlockInput { - if (config.getForkSeq(block.message.slot) < ForkSeq.deneb) { - throw Error(`Pre Deneb block slot ${block.message.slot}`); - } - return { - type: BlockInputType.outOfRangeData, - block, - source, - }; - }, - - availableData( - config: ChainForkConfig, - block: SignedBeaconBlock, - source: BlockSource, - blockData: BlockInputAvailableData - ): BlockInput { - if (config.getForkSeq(block.message.slot) < ForkSeq.deneb) { - throw Error(`Pre Deneb block slot ${block.message.slot}`); - } - return { - type: BlockInputType.availableData, - block, - source, - blockData, - }; - }, - - dataPromise( - config: ChainForkConfig, - block: SignedBeaconBlock, - source: BlockSource, - cachedData: CachedData - ): BlockInput { - if (config.getForkSeq(block.message.slot) < ForkSeq.deneb) { - throw Error(`Pre Deneb block slot ${block.message.slot}`); - } - return { - type: BlockInputType.dataPromise, - block, - source, - cachedData, - }; - }, -}; - -export function getBlockInputBlobs(blobsCache: BlobsCacheMap): Omit { - const blobs = []; - - for (let index = 0; index < blobsCache.size; index++) { - const blobSidecar = blobsCache.get(index); - if (blobSidecar === undefined) { - throw Error(`Missing blobSidecar at index=${index}`); - } - blobs.push(blobSidecar); - } - return {blobs}; -} - -export function getBlockInputDataColumns( - dataColumnsCache: DataColumnsCacheMap, - columnIndexes: ColumnIndex[] -): Omit { - const dataColumns = []; - const dataColumnsBytes = []; - - for (const index of columnIndexes) { - const dataColumnCache = dataColumnsCache.get(index); - if (dataColumnCache === undefined) { - // check if the index is correct as per the custody columns - throw Error(`Missing dataColumnCache at index=${index}`); - } - const {dataColumn: dataColumnSidecar, dataColumnBytes} = dataColumnCache; - dataColumns.push(dataColumnSidecar); - dataColumnsBytes.push(dataColumnBytes); - } - return {dataColumns, dataColumnsBytes}; -} - export enum AttestationImportOpt { Skip, Force, @@ -293,7 +88,7 @@ export type ImportBlockOpts = { * A wrapper around a `SignedBeaconBlock` that indicates that this block is fully verified and ready to import */ export type FullyVerifiedBlock = { - blockInput: BlockInput; + blockInput: IBlockInput; postState: CachedBeaconStateAllForks; parentBlockSlot: Slot; proposerBalanceDelta: number; diff --git a/packages/beacon-node/src/chain/blocks/utils/chainSegment.ts b/packages/beacon-node/src/chain/blocks/utils/chainSegment.ts index eb6a5f622dcd..5c9b4d8b9d56 100644 --- a/packages/beacon-node/src/chain/blocks/utils/chainSegment.ts +++ b/packages/beacon-node/src/chain/blocks/utils/chainSegment.ts @@ -1,15 +1,16 @@ import {ChainForkConfig} from "@lodestar/config"; import {ssz} from "@lodestar/types"; import {BlockError, BlockErrorCode} from "../../errors/index.js"; -import {BlockInput} from "../types.js"; +import {IBlockInput} from "../blockInput/types.js"; /** * Assert this chain segment of blocks is linear with slot numbers and hashes */ -export function assertLinearChainSegment(config: ChainForkConfig, blocks: BlockInput[]): void { + +export function assertLinearChainSegment(config: ChainForkConfig, blocks: IBlockInput[]): void { for (let i = 0; i < blocks.length - 1; i++) { - const block = blocks[i].block; - const child = blocks[i + 1].block; + const block = blocks[i].getBlock(); + const child = blocks[i + 1].getBlock(); // If this block has a child in this chain segment, ensure that its parent root matches // the root of this block. if ( diff --git a/packages/beacon-node/src/chain/blocks/verifyBlock.ts b/packages/beacon-node/src/chain/blocks/verifyBlock.ts index a7d0e80e5ef2..cb3f8fb8df43 100644 --- a/packages/beacon-node/src/chain/blocks/verifyBlock.ts +++ b/packages/beacon-node/src/chain/blocks/verifyBlock.ts @@ -13,7 +13,8 @@ import type {BeaconChain} from "../chain.js"; import {BlockError, BlockErrorCode} from "../errors/index.js"; import {BlockProcessOpts} from "../options.js"; import {RegenCaller} from "../regen/index.js"; -import {BlockInput, BlockInputType, ImportBlockOpts} from "./types.js"; +import {DAType, IBlockInput} from "./blockInput/index.js"; +import {ImportBlockOpts} from "./types.js"; import {DENEB_BLOWFISH_BANNER} from "./utils/blowfishBanner.js"; import {ELECTRA_GIRAFFE_BANNER} from "./utils/giraffeBanner.js"; import {CAPELLA_OWL_BANNER} from "./utils/ownBanner.js"; @@ -39,16 +40,15 @@ import {writeBlockInputToDb} from "./writeBlockInputToDb.js"; export async function verifyBlocksInEpoch( this: BeaconChain, parentBlock: ProtoBlock, - blocksInput: BlockInput[], + blocksInput: IBlockInput[], opts: BlockProcessOpts & ImportBlockOpts ): Promise<{ postStates: CachedBeaconStateAllForks[]; proposerBalanceDeltas: number[]; segmentExecStatus: SegmentExecStatus; dataAvailabilityStatuses: DataAvailabilityStatus[]; - availableBlockInputs: BlockInput[]; }> { - const blocks = blocksInput.map(({block}) => block); + const blocks = blocksInput.map((blockInput) => blockInput.getBlock()); const lastBlock = blocks.at(-1); if (!lastBlock) { throw Error("Empty partiallyVerifiedBlocks"); @@ -95,7 +95,7 @@ export async function verifyBlocksInEpoch( // batch all I/O operations to reduce overhead const [ segmentExecStatus, - {dataAvailabilityStatuses, availableTime, availableBlockInputs}, + {dataAvailabilityStatuses, availableTime}, {postStates, proposerBalanceDeltas, verifyStateTime}, {verifySignaturesTime}, ] = await Promise.all([ @@ -109,7 +109,7 @@ export async function verifyBlocksInEpoch( } as SegmentExecStatus), // data availability for the blobs - verifyBlocksDataAvailability(this, blocksInput, abortController.signal, opts), + verifyBlocksDataAvailability(blocksInput, abortController.signal), // Run state transition only // TODO: Ensure it yields to allow flushing to workers and engine API @@ -193,7 +193,7 @@ export async function verifyBlocksInEpoch( blocksInput.length === 1 && // gossip blocks have seenTimestampSec opts.seenTimestampSec !== undefined && - blocksInput[0].type !== BlockInputType.preData && + blocksInput[0].type !== DAType.PreData && executionStatuses[0] === ExecutionStatus.Valid ) { // Find the max time when the block was actually verified @@ -202,11 +202,12 @@ export async function verifyBlocksInEpoch( this.metrics?.gossipBlock.receivedToFullyVerifiedTime.observe(recvTofullyVerifedTime); const verifiedToBlobsAvailabiltyTime = Math.max(availableTime - fullyVerifiedTime, 0) / 1000; - const numBlobs = (blocksInput[0].block as deneb.SignedBeaconBlock).message.body.blobKzgCommitments.length; + const block = blocksInput[0].getBlock() as deneb.SignedBeaconBlock; + const numBlobs = block.message.body.blobKzgCommitments.length; this.metrics?.gossipBlock.verifiedToBlobsAvailabiltyTime.observe({numBlobs}, verifiedToBlobsAvailabiltyTime); this.logger.verbose("Verified blockInput fully with blobs availability", { - slot: blocksInput[0].block.message.slot, + slot: block.message.slot, recvTofullyVerifedTime, verifiedToBlobsAvailabiltyTime, type: blocksInput[0].type, @@ -221,7 +222,7 @@ export async function verifyBlocksInEpoch( ); } - return {postStates, dataAvailabilityStatuses, proposerBalanceDeltas, segmentExecStatus, availableBlockInputs}; + return {postStates, dataAvailabilityStatuses, proposerBalanceDeltas, segmentExecStatus}; } finally { abortController.abort(); } diff --git a/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts b/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts index da754d1f1245..31f89e6f8362 100644 --- a/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts +++ b/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts @@ -1,177 +1,34 @@ -import {ChainForkConfig} from "@lodestar/config"; -import {isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; -import {DataAvailabilityStatus, computeTimeAtSlot} from "@lodestar/state-transition"; -import {UintNum64, deneb} from "@lodestar/types"; -import {ErrorAborted, Logger} from "@lodestar/utils"; -import {Metrics} from "../../metrics/metrics.js"; -import {BlockError, BlockErrorCode} from "../errors/index.js"; -import {validateBlobSidecars} from "../validation/blobSidecar.js"; -import {validateDataColumnsSidecars} from "../validation/dataColumnSidecar.js"; -import { - BlobSidecarValidation, - BlockInput, - BlockInputAvailableData, - BlockInputBlobs, - BlockInputDataColumns, - BlockInputType, - ImportBlockOpts, - getBlockInput, -} from "./types.js"; +import {DataAvailabilityStatus} from "@lodestar/state-transition"; +import {DAType, IBlockInput} from "./blockInput/index.js"; // we can now wait for full 12 seconds because unavailable block sync will try pulling // the blobs from the network anyway after 500ms of seeing the block -const BLOB_AVAILABILITY_TIMEOUT = 12_000; +export const BLOB_AVAILABILITY_TIMEOUT = 12_000; /** - * Verifies some early cheap sanity checks on the block before running the full state transition. - * - * - Parent is known to the fork-choice - * - Check skipped slots limit - * - check_block_relevancy() - * - Block not in the future - * - Not genesis block - * - Block's slot is < Infinity - * - Not finalized slot - * - Not already known + * Verifies that all block inputs have data available. + * - Waits a max of BLOB_AVAILABILITY_TIMEOUT for all data to be available + * - Returns the time at which all data was available + * - Returns the data availability status for each block input */ export async function verifyBlocksDataAvailability( - chain: {config: ChainForkConfig; genesisTime: UintNum64; logger: Logger; metrics: Metrics | null}, - blocks: BlockInput[], - signal: AbortSignal, - opts: ImportBlockOpts + blocks: IBlockInput[], + signal: AbortSignal ): Promise<{ dataAvailabilityStatuses: DataAvailabilityStatus[]; availableTime: number; - availableBlockInputs: BlockInput[]; }> { - const lastBlock = blocks.at(-1); - if (!lastBlock) { - throw Error("Empty partiallyVerifiedBlocks"); - } - - const dataAvailabilityStatuses: DataAvailabilityStatus[] = []; - const seenTime = opts.seenTimestampSec !== undefined ? opts.seenTimestampSec * 1000 : Date.now(); - - const availableBlockInputs: BlockInput[] = []; - - for (const blockInput of blocks) { - if (signal.aborted) { - throw new ErrorAborted("verifyBlocksDataAvailability"); + await Promise.all(blocks.map((blockInput) => blockInput.waitForAllData(BLOB_AVAILABILITY_TIMEOUT, signal))); + const availableTime = Math.max(0, Math.max(...blocks.map((blockInput) => blockInput.getTimeComplete()))); + const dataAvailabilityStatuses: DataAvailabilityStatus[] = blocks.map((blockInput) => { + if (blockInput.type === DAType.PreData) { + return DataAvailabilityStatus.PreData; } - // Validate status of only not yet finalized blocks, we don't need yet to propogate the status - // as it is not used upstream anywhere - const {dataAvailabilityStatus, availableBlockInput} = await maybeValidateBlobs(chain, blockInput, signal, opts); - dataAvailabilityStatuses.push(dataAvailabilityStatus); - availableBlockInputs.push(availableBlockInput); - } - - const availableTime = lastBlock.type === BlockInputType.dataPromise ? Date.now() : seenTime; - if (blocks.length === 1 && opts.seenTimestampSec !== undefined && blocks[0].type !== BlockInputType.preData) { - const recvToAvailableTime = availableTime / 1000 - opts.seenTimestampSec; - const numBlobs = (blocks[0].block as deneb.SignedBeaconBlock).message.body.blobKzgCommitments.length; - - chain.metrics?.gossipBlock.receivedToBlobsAvailabilityTime.observe({numBlobs}, recvToAvailableTime); - chain.logger.verbose("Verified blobs availability", { - slot: blocks[0].block.message.slot, - recvToAvailableTime, - type: blocks[0].type, - }); - } - - return {dataAvailabilityStatuses, availableTime, availableBlockInputs}; -} - -async function maybeValidateBlobs( - chain: {config: ChainForkConfig; genesisTime: UintNum64; metrics: Metrics | null; logger: Logger}, - blockInput: BlockInput, - signal: AbortSignal, - opts: ImportBlockOpts -): Promise<{dataAvailabilityStatus: DataAvailabilityStatus; availableBlockInput: BlockInput}> { - switch (blockInput.type) { - case BlockInputType.preData: - return {dataAvailabilityStatus: DataAvailabilityStatus.PreData, availableBlockInput: blockInput}; - - case BlockInputType.outOfRangeData: - return {dataAvailabilityStatus: DataAvailabilityStatus.OutOfRange, availableBlockInput: blockInput}; - - // biome-ignore lint/suspicious/noFallthroughSwitchClause: We need fall-through behavior here - case BlockInputType.availableData: - if (opts.validBlobSidecars === BlobSidecarValidation.Full) { - return {dataAvailabilityStatus: DataAvailabilityStatus.Available, availableBlockInput: blockInput}; - } - - case BlockInputType.dataPromise: { - // run full validation - const {block} = blockInput; - const blockSlot = block.message.slot; - const {blobKzgCommitments} = (block as deneb.SignedBeaconBlock).message.body; - const beaconBlockRoot = chain.config.getForkTypes(blockSlot).BeaconBlock.hashTreeRoot(block.message); - const blockData = - blockInput.type === BlockInputType.availableData - ? blockInput.blockData - : await raceWithCutoff( - chain, - blockInput, - blockInput.cachedData.availabilityPromise as Promise, - signal - ); - - if (isForkPostFulu(blockData.fork)) { - const {dataColumns} = blockData as BlockInputDataColumns; - const skipProofsCheck = opts.validBlobSidecars === BlobSidecarValidation.Individual; - await validateDataColumnsSidecars(blockSlot, beaconBlockRoot, blobKzgCommitments, dataColumns, chain.metrics, { - skipProofsCheck, - }); - } else if (isForkPostDeneb(blockData.fork)) { - const {blobs} = blockData as BlockInputBlobs; - - // if the blob sidecars have been individually verified then we can skip kzg proof check - // but other checks to match blobs with block data still need to be performed - const skipProofsCheck = opts.validBlobSidecars === BlobSidecarValidation.Individual; - await validateBlobSidecars(blockSlot, beaconBlockRoot, blobKzgCommitments, blobs, {skipProofsCheck}); - } - - const availableBlockInput = getBlockInput.availableData( - chain.config, - blockInput.block, - blockInput.source, - blockData - ); - return {dataAvailabilityStatus: DataAvailabilityStatus.Available, availableBlockInput: availableBlockInput}; + if (blockInput.daOutOfRange) { + return DataAvailabilityStatus.OutOfRange; } - } -} - -/** - * Wait for blobs to become available with a cutoff time. If fails then throw DATA_UNAVAILABLE error - * which may try unknownblock/blobs fill (by root). - */ -async function raceWithCutoff( - chain: {config: ChainForkConfig; genesisTime: UintNum64; logger: Logger}, - blockInput: BlockInput, - availabilityPromise: Promise, - signal: AbortSignal -): Promise { - const {block} = blockInput; - const blockSlot = block.message.slot; - - const cutoffTime = - computeTimeAtSlot(chain.config, blockSlot, chain.genesisTime) * 1000 + BLOB_AVAILABILITY_TIMEOUT - Date.now(); - const cutoffTimeout = - cutoffTime > 0 - ? new Promise((_resolve, reject) => { - setTimeout(() => reject(new Error("Timeout exceeded")), cutoffTime); - signal.addEventListener("abort", () => reject(signal.reason)); - }) - : Promise.reject(new Error("Cutoff time must be greater than 0")); - chain.logger.debug("Racing for blob availabilityPromise", {blockSlot, cutoffTime}); + return DataAvailabilityStatus.Available; + }); - try { - await Promise.race([availabilityPromise, cutoffTimeout]); - } catch (_e) { - // throw unavailable so that the unknownblock/blobs can be triggered to pull the block - throw new BlockError(block, {code: BlockErrorCode.DATA_UNAVAILABLE}); - } - // we can only be here if availabilityPromise has resolved else an error will be thrown - return availabilityPromise; + return {dataAvailabilityStatuses, availableTime}; } diff --git a/packages/beacon-node/src/chain/blocks/verifyBlocksSanityChecks.ts b/packages/beacon-node/src/chain/blocks/verifyBlocksSanityChecks.ts index 7d52b506bfda..63e5c7b471c5 100644 --- a/packages/beacon-node/src/chain/blocks/verifyBlocksSanityChecks.ts +++ b/packages/beacon-node/src/chain/blocks/verifyBlocksSanityChecks.ts @@ -6,7 +6,8 @@ import {toRootHex} from "@lodestar/utils"; import {IClock} from "../../util/clock.js"; import {BlockError, BlockErrorCode} from "../errors/index.js"; import {IChainOptions} from "../options.js"; -import {BlockInput, ImportBlockOpts} from "./types.js"; +import {IBlockInput} from "./blockInput/types.js"; +import {ImportBlockOpts} from "./types.js"; /** * Verifies some early cheap sanity checks on the block before running the full state transition. @@ -28,10 +29,10 @@ export function verifyBlocksSanityChecks( opts: IChainOptions; blacklistedBlocks: Map; }, - blocks: BlockInput[], + blocks: IBlockInput[], opts: ImportBlockOpts ): { - relevantBlocks: BlockInput[]; + relevantBlocks: IBlockInput[]; parentSlots: Slot[]; parentBlock: ProtoBlock | null; } { @@ -39,12 +40,12 @@ export function verifyBlocksSanityChecks( throw Error("Empty partiallyVerifiedBlocks"); } - const relevantBlocks: BlockInput[] = []; + const relevantBlocks: IBlockInput[] = []; const parentSlots: Slot[] = []; let parentBlock: ProtoBlock | null = null; for (const blockInput of blocks) { - const {block} = blockInput; + const block = blockInput.getBlock(); const blockSlot = block.message.slot; const blockHash = toRootHex(chain.config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message)); @@ -85,7 +86,7 @@ export function verifyBlocksSanityChecks( let parentBlockSlot: Slot; if (relevantLastBlock) { - parentBlockSlot = relevantLastBlock.block.message.slot; + parentBlockSlot = relevantLastBlock.getBlock().message.slot; } else { // When importing a block segment, only the first NON-IGNORED block must be known to the fork-choice. const parentRoot = toRootHex(block.message.parentRoot); diff --git a/packages/beacon-node/src/chain/blocks/verifyBlocksStateTransitionOnly.ts b/packages/beacon-node/src/chain/blocks/verifyBlocksStateTransitionOnly.ts index 6e526f4bcde7..b877dfa0910b 100644 --- a/packages/beacon-node/src/chain/blocks/verifyBlocksStateTransitionOnly.ts +++ b/packages/beacon-node/src/chain/blocks/verifyBlocksStateTransitionOnly.ts @@ -12,7 +12,8 @@ import {nextEventLoop} from "../../util/eventLoop.js"; import {BlockError, BlockErrorCode} from "../errors/index.js"; import {BlockProcessOpts} from "../options.js"; import {ValidatorMonitor} from "../validatorMonitor.js"; -import {BlockInput, ImportBlockOpts} from "./types.js"; +import {IBlockInput} from "./blockInput/index.js"; +import {ImportBlockOpts} from "./types.js"; /** * Verifies 1 or more blocks are fully valid running the full state transition; from a linear sequence of blocks. @@ -24,7 +25,7 @@ import {BlockInput, ImportBlockOpts} from "./types.js"; */ export async function verifyBlocksStateTransitionOnly( preState0: CachedBeaconStateAllForks, - blocks: BlockInput[], + blocks: IBlockInput[], dataAvailabilityStatuses: DataAvailabilityStatus[], logger: Logger, metrics: Metrics | null, @@ -38,7 +39,7 @@ export async function verifyBlocksStateTransitionOnly( for (let i = 0; i < blocks.length; i++) { const {validProposerSignature, validSignatures} = opts; - const {block} = blocks[i]; + const block = blocks[i].getBlock(); const preState = i === 0 ? preState0 : postStates[i - 1]; const dataAvailabilityStatus = dataAvailabilityStatuses[i]; @@ -99,7 +100,7 @@ export async function verifyBlocksStateTransitionOnly( const verifyStateTime = Date.now(); if (blocks.length === 1 && opts.seenTimestampSec !== undefined) { - const slot = blocks[0].block.message.slot; + const slot = blocks[0].getBlock().message.slot; const recvToValidation = verifyStateTime / 1000 - opts.seenTimestampSec; const validationTime = recvToValidation - recvToValLatency; diff --git a/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts b/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts index 62e089e458d8..8b2040b8e577 100644 --- a/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts +++ b/packages/beacon-node/src/chain/blocks/writeBlockInputToDb.ts @@ -1,8 +1,10 @@ -import {ForkName, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; import {fulu} from "@lodestar/types"; -import {prettyPrintIndices, toHex, toRootHex} from "@lodestar/utils"; +import {prettyPrintIndices, toRootHex} from "@lodestar/utils"; import {BeaconChain} from "../chain.js"; -import {BlockInput, BlockInputBlobs, BlockInputDataColumns, BlockInputType} from "./types.js"; +import {IBlockInput, isBlockInputBlobs, isBlockInputColumns} from "./blockInput/index.js"; +import {BLOB_AVAILABILITY_TIMEOUT} from "./verifyBlocksDataAvailability.js"; +import {getCutoffTimeMs} from "../../util/clock.js"; +import {INTERVALS_PER_SLOT} from "@lodestar/params"; /** * Persists block input data to DB. This operation must be eventually completed if a block is imported to the fork-choice. @@ -11,11 +13,15 @@ import {BlockInput, BlockInputBlobs, BlockInputDataColumns, BlockInputType} from * This operation may be performed before, during or after importing to the fork-choice. As long as errors * are handled properly for eventual consistency. */ -export async function writeBlockInputToDb(this: BeaconChain, blocksInput: BlockInput[]): Promise { +export async function writeBlockInputToDb(this: BeaconChain, blocksInputs: IBlockInput[]): Promise { const fnPromises: Promise[] = []; + // track slots for logging + const slots: number[] = []; - for (const blockInput of blocksInput) { - const {block} = blockInput; + for (const blockInput of blocksInputs) { + const block = blockInput.getBlock(); + const slot = block.message.slot; + slots.push(slot); const blockRoot = this.config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message); const blockRootHex = toRootHex(blockRoot); const blockBytes = this.serializedCache.get(block); @@ -27,104 +33,100 @@ export async function writeBlockInputToDb(this: BeaconChain, blocksInput: BlockI this.metrics?.importBlock.persistBlockNoSerializedDataCount.inc(); fnPromises.push(this.db.block.add(block)); } + this.logger.debug("Persist block to hot DB", { slot: block.message.slot, root: blockRootHex, inputType: blockInput.type, }); - if (blockInput.type === BlockInputType.availableData || blockInput.type === BlockInputType.dataPromise) { - const blockData = - blockInput.type === BlockInputType.availableData - ? blockInput.blockData - : await blockInput.cachedData.availabilityPromise; + if (!blockInput.hasAllData()) { + await blockInput.waitForAllData(BLOB_AVAILABILITY_TIMEOUT); + } - // NOTE: Old data is pruned on archive - if (isForkPostFulu(blockData.fork)) { - const {custodyConfig} = this; - const {custodyColumns} = custodyConfig; - const blobsLen = (block.message as fulu.BeaconBlock).body.blobKzgCommitments.length; - let dataColumnsLen: number; - if (blobsLen === 0) { - dataColumnsLen = 0; - } else { - dataColumnsLen = custodyColumns.length; - } + // NOTE: Old data is pruned on archive + if (isBlockInputColumns(blockInput)) { + if (!blockInput.hasComputedAllData()) { + // Supernodes may only have a subset of the data columns by the time the block begins to be imported + // because full data availability can be assumed after NUMBER_OF_COLUMNS / 2 columns are available. + // Here, however, all data columns must be fully available/reconstructed before persisting to the DB. + // Wait for normal gossip to received any missing columns and attempt reconstruction after this delay. + const delay = getCutoffTimeMs(this, slot, (this.config.SECONDS_PER_SLOT / INTERVALS_PER_SLOT) * 1000); + this.columnReconstructionTracker.triggerColumnReconstruction(delay, blockInput); + await blockInput.waitForComputedAllData(BLOB_AVAILABILITY_TIMEOUT); + } - const blockDataColumns = (blockData as BlockInputDataColumns).dataColumns; - const dataColumnSidecars = blockDataColumns.filter((dataColumnSidecar) => - custodyColumns.includes(dataColumnSidecar.index) - ); - if (dataColumnSidecars.length !== dataColumnsLen) { - throw Error( - `Invalid dataColumnSidecars=${dataColumnSidecars.length} for custody expected custodyColumnsLen=${dataColumnsLen}` - ); - } + const {custodyColumns} = this.custodyConfig; + const blobsLen = (block.message as fulu.BeaconBlock).body.blobKzgCommitments.length; + let dataColumnsLen: number; + if (blobsLen === 0) { + dataColumnsLen = 0; + } else { + dataColumnsLen = custodyColumns.length; + } - fnPromises.push(this.db.dataColumnSidecar.putMany(blockRoot, dataColumnSidecars)); - this.logger.debug("Persisted dataColumnSidecars to hot DB", { - slot: block.message.slot, - root: blockRootHex, - blockDataColumns: blockDataColumns.length, - dataColumnSidecars: dataColumnSidecars.length, - numBlobs: blobsLen, - custodyColumns: custodyColumns.length, - }); - } else if (isForkPostDeneb(blockData.fork)) { - const blobSidecars = (blockData as BlockInputBlobs).blobs; - fnPromises.push(this.db.blobSidecars.add({blockRoot, slot: block.message.slot, blobSidecars})); - this.logger.debug("Persisted blobSidecars to hot DB", { - blobsLen: blobSidecars.length, - slot: block.message.slot, - root: blockRootHex, - }); + const dataColumnSidecars = blockInput.getCustodyColumns(); + if (dataColumnSidecars.length !== dataColumnsLen) { + this.logger.debug( + `Invalid dataColumnSidecars=${dataColumnSidecars.length} for custody expected custodyColumnsLen=${dataColumnsLen}` + ); } + + fnPromises.push(this.db.dataColumnSidecar.putMany(blockRoot, dataColumnSidecars)); + this.logger.debug("Persisted dataColumnSidecars to hot DB", { + slot: block.message.slot, + root: blockRootHex, + dataColumnSidecars: dataColumnSidecars.length, + numBlobs: blobsLen, + custodyColumns: custodyColumns.length, + }); + } else if (isBlockInputBlobs(blockInput)) { + const blobSidecars = blockInput.getBlobs(); + fnPromises.push(this.db.blobSidecars.add({blockRoot, slot: block.message.slot, blobSidecars})); + this.logger.debug("Persisted blobSidecars to hot DB", { + blobsLen: blobSidecars.length, + slot: block.message.slot, + root: blockRootHex, + }); } - } - await Promise.all(fnPromises); - this.logger.debug("Persisted blocksInput to db", { - blocksInput: blocksInput.length, - slots: prettyPrintIndices(blocksInput.map((blockInput) => blockInput.block.message.slot)), - }); + await Promise.all(fnPromises); + this.logger.debug("Persisted blocksInput to db", { + blocksInput: blocksInputs.length, + slots: prettyPrintIndices(slots), + }); + } } /** * Prunes eagerly persisted block inputs only if not known to the fork-choice */ -export async function removeEagerlyPersistedBlockInputs(this: BeaconChain, blockInputs: BlockInput[]): Promise { +export async function removeEagerlyPersistedBlockInputs(this: BeaconChain, blockInputs: IBlockInput[]): Promise { const blockToRemove = []; const blobsToRemove = []; const dataColumnsToRemove = []; for (const blockInput of blockInputs) { - const {block, type} = blockInput; + const block = blockInput.getBlock(); const slot = block.message.slot; const blockRoot = this.config.getForkTypes(slot).BeaconBlock.hashTreeRoot(block.message); - const blockRootHex = toHex(blockRoot); + const blockRootHex = toRootHex(blockRoot); if (!this.forkChoice.hasBlockHex(blockRootHex)) { blockToRemove.push(block); - if (type === BlockInputType.availableData) { - const {blockData} = blockInput; - if (blockData.fork === ForkName.deneb || blockData.fork === ForkName.electra) { - const blobSidecars = blockData.blobs; - blobsToRemove.push({blockRoot, slot, blobSidecars}); - } else { - const {custodyConfig} = this; - const {custodyColumns} = custodyConfig; - const dataColumnsLen = custodyColumns.length; - const dataColumnSidecars = (blockData as BlockInputDataColumns).dataColumns.filter((dataColumnSidecar) => - custodyColumns.includes(dataColumnSidecar.index) + if (isBlockInputColumns(blockInput)) { + const {custodyColumns} = this.custodyConfig; + const dataColumnsLen = custodyColumns.length; + const dataColumnSidecars = blockInput.getCustodyColumns(); + if (dataColumnSidecars.length !== dataColumnsLen) { + throw Error( + `Invalid dataColumnSidecars=${dataColumnSidecars.length} for custody expected custodyColumnsLen=${dataColumnsLen}` ); - if (dataColumnSidecars.length !== dataColumnsLen) { - throw Error( - `Invalid dataColumnSidecars=${dataColumnSidecars.length} for custody expected custodyColumnsLen=${dataColumnsLen}` - ); - } - - dataColumnsToRemove.push(blockRoot); } + dataColumnsToRemove.push(blockRoot); + } else if (isBlockInputBlobs(blockInput)) { + const blobSidecars = blockInput.getBlobs(); + blobsToRemove.push({blockRoot, slot, blobSidecars}); } } } diff --git a/packages/beacon-node/src/chain/chain.ts b/packages/beacon-node/src/chain/chain.ts index e9a9cced20dc..3d8f0371b6c8 100644 --- a/packages/beacon-node/src/chain/chain.ts +++ b/packages/beacon-node/src/chain/chain.ts @@ -57,8 +57,8 @@ import {SerializedCache} from "../util/serializedCache.js"; import {ArchiveStore} from "./archiveStore/archiveStore.js"; import {CheckpointBalancesCache} from "./balancesCache.js"; import {BeaconProposerCache} from "./beaconProposerCache.js"; +import {IBlockInput} from "./blocks/blockInput/index.js"; import {BlockProcessor, ImportBlockOpts} from "./blocks/index.js"; -import {BlockInput} from "./blocks/types.js"; import {BlsMultiThreadWorkerPool, BlsSingleThreadVerifier, IBlsVerifier} from "./bls/index.js"; import {ChainEvent, ChainEventEmitter} from "./emitter.js"; import {ForkchoiceCaller, initializeForkChoice} from "./forkChoice/index.js"; @@ -88,11 +88,10 @@ import { SeenContributionAndProof, SeenSyncCommitteeMessages, } from "./seenCache/index.js"; -import {SeenGossipBlockInput} from "./seenCache/index.js"; import {SeenAggregatedAttestations} from "./seenCache/seenAggregateAndProof.js"; import {SeenAttestationDatas} from "./seenCache/seenAttestationData.js"; import {SeenBlockAttesters} from "./seenCache/seenBlockAttesters.js"; -import {SeenBlockInputCache} from "./seenCache/seenBlockInput.js"; +import {SeenBlockInput} from "./seenCache/seenGossipBlockInput.js"; import {ShufflingCache} from "./shufflingCache.js"; import {BlockStateCacheImpl} from "./stateCache/blockStateCacheImpl.js"; import {DbCPStateDatastore} from "./stateCache/datastore/db.js"; @@ -101,6 +100,8 @@ import {FIFOBlockStateCache} from "./stateCache/fifoBlockStateCache.js"; import {InMemoryCheckpointStateCache} from "./stateCache/inMemoryCheckpointsCache.js"; import {PersistentCheckpointStateCache} from "./stateCache/persistentCheckpointsCache.js"; import {ValidatorMonitor} from "./validatorMonitor.js"; +import {GetBlobsTracker} from "./GetBlobsTracker.js"; +import {ColumnReconstructionTracker} from "./ColumnReconstructionTracker.js"; /** * The maximum number of cached produced results to keep in memory. @@ -151,8 +152,7 @@ export class BeaconChain implements IBeaconChain { readonly seenSyncCommitteeMessages = new SeenSyncCommitteeMessages(); readonly seenContributionAndProof: SeenContributionAndProof; readonly seenAttestationDatas: SeenAttestationDatas; - readonly seenGossipBlockInput: SeenGossipBlockInput; - readonly seenBlockInputCache: SeenBlockInputCache; + readonly seenBlockInputCache: SeenBlockInput; // Seen cache for liveness checks readonly seenBlockAttesters = new SeenBlockAttesters(); @@ -175,6 +175,9 @@ export class BeaconChain implements IBeaconChain { readonly serializedCache: SerializedCache; + readonly getBlobsTracker: GetBlobsTracker; + readonly columnReconstructionTracker: ColumnReconstructionTracker; + readonly opts: IChainOptions; protected readonly blockProcessor: BlockProcessor; @@ -282,18 +285,11 @@ export class BeaconChain implements IBeaconChain { initialCustodyGroupCount, }); - this.seenGossipBlockInput = new SeenGossipBlockInput( - this.custodyConfig, - this.executionEngine, - emitter, - clock, - logger - ); - this.beaconProposerCache = new BeaconProposerCache(opts); this.checkpointBalancesCache = new CheckpointBalancesCache(); - this.seenBlockInputCache = new SeenBlockInputCache({ + this.seenBlockInputCache = new SeenBlockInput({ config, + custodyConfig: this.custodyConfig, clock, chainEvents: emitter, signal, @@ -403,6 +399,20 @@ export class BeaconChain implements IBeaconChain { this.serializedCache = new SerializedCache(); + this.getBlobsTracker = new GetBlobsTracker({ + logger, + executionEngine: this.executionEngine, + emitter, + metrics, + config, + }); + this.columnReconstructionTracker = new ColumnReconstructionTracker({ + logger, + emitter, + metrics, + config, + }); + this.archiveStore = new ArchiveStore( {db, chain: this, logger: logger as LoggerNode, metrics}, {...opts, dbName, anchorState: {finalizedCheckpoint: anchorState.finalizedCheckpoint}}, @@ -446,7 +456,7 @@ export class BeaconChain implements IBeaconChain { } seenBlock(blockRoot: RootHex): boolean { - return this.seenGossipBlockInput.hasBlock(blockRoot) || this.forkChoice.hasBlockHex(blockRoot); + return this.seenBlockInputCache.has(blockRoot) || this.forkChoice.hasBlockHex(blockRoot); } regenCanAcceptWork(): boolean { @@ -791,11 +801,11 @@ export class BeaconChain implements IBeaconChain { return {block, executionPayloadValue, consensusBlockValue: gweiToWei(proposerReward), shouldOverrideBuilder}; } - async processBlock(block: BlockInput, opts?: ImportBlockOpts): Promise { + async processBlock(block: IBlockInput, opts?: ImportBlockOpts): Promise { return this.blockProcessor.processBlocksJob([block], opts); } - async processChainSegment(blocks: BlockInput[], opts?: ImportBlockOpts): Promise { + async processChainSegment(blocks: IBlockInput[], opts?: ImportBlockOpts): Promise { return this.blockProcessor.processBlocksJob(blocks, opts); } diff --git a/packages/beacon-node/src/chain/emitter.ts b/packages/beacon-node/src/chain/emitter.ts index 35eef5c0759a..fdaab4f005d0 100644 --- a/packages/beacon-node/src/chain/emitter.ts +++ b/packages/beacon-node/src/chain/emitter.ts @@ -4,7 +4,9 @@ import {StrictEventEmitter} from "strict-event-emitter-types"; import {routes} from "@lodestar/api"; import {CheckpointWithHex} from "@lodestar/fork-choice"; import {CachedBeaconStateAllForks} from "@lodestar/state-transition"; -import {fulu, phase0} from "@lodestar/types"; +import {RootHex, fulu, phase0} from "@lodestar/types"; +import {PeerIdStr} from "../util/peerId.js"; +import {BlockInputSource, IBlockInput} from "./blocks/blockInput/types.js"; /** * Important chain events that occur during normal chain operation. @@ -47,6 +49,18 @@ export enum ChainEvent { * Trigger an update of status so reqresp by peers have current earliestAvailableSlot */ updateStatus = "updateStatus", + /** + * + */ + unknownParent = "unknownParent", + /** + * + */ + unknownBlockRoot = "unknownBlockRoot", + /** + * + */ + incompleteBlockInput = "incompleteBlockInput", } export type HeadEventData = routes.events.EventData[routes.events.EventType.head]; @@ -55,6 +69,12 @@ export type ReorgEventData = routes.events.EventData[routes.events.EventType.cha // API events are emitted through the same ChainEventEmitter for re-use internally type ApiEvents = {[K in routes.events.EventType]: (data: routes.events.EventData[K]) => void}; +export type ChainEventData = { + [ChainEvent.unknownParent]: {blockInput: IBlockInput; peer: PeerIdStr; source: BlockInputSource}; + [ChainEvent.unknownBlockRoot]: {rootHex: RootHex; peer?: PeerIdStr; source: BlockInputSource}; + [ChainEvent.incompleteBlockInput]: {blockInput: IBlockInput; peer: PeerIdStr; source: BlockInputSource}; +}; + export type IChainEvents = ApiEvents & { [ChainEvent.checkpoint]: (checkpoint: phase0.Checkpoint, state: CachedBeaconStateAllForks) => void; @@ -66,6 +86,12 @@ export type IChainEvents = ApiEvents & { [ChainEvent.publishDataColumns]: (sidecars: fulu.DataColumnSidecar[]) => void; [ChainEvent.updateStatus]: () => void; + + // Sync events that are chain->chain. Initiated from network requests but do not cross the network + // barrier so are considered ChainEvent(s). + [ChainEvent.unknownParent]: (data: ChainEventData[ChainEvent.unknownParent]) => void; + [ChainEvent.unknownBlockRoot]: (data: ChainEventData[ChainEvent.unknownBlockRoot]) => void; + [ChainEvent.incompleteBlockInput]: (data: ChainEventData[ChainEvent.incompleteBlockInput]) => void; }; /** diff --git a/packages/beacon-node/src/chain/errors/blobSidecarError.ts b/packages/beacon-node/src/chain/errors/blobSidecarError.ts index bf7628b27881..8bbc8063eb84 100644 --- a/packages/beacon-node/src/chain/errors/blobSidecarError.ts +++ b/packages/beacon-node/src/chain/errors/blobSidecarError.ts @@ -1,4 +1,5 @@ import {RootHex, Slot, SubnetID, ValidatorIndex} from "@lodestar/types"; +import {LodestarError} from "@lodestar/utils"; import {GossipActionError} from "./gossipValidation.js"; export enum BlobSidecarErrorCode { @@ -15,6 +16,15 @@ export enum BlobSidecarErrorCode { /** !bls.KeyValidate(blobs_sidecar.kzg_aggregated_proof) */ INVALID_KZG_PROOF = "BLOBS_SIDECAR_ERROR_INVALID_KZG_PROOF", + // Validation errors when validating against an existing block + + /** Block and sidecars blob count mismatch */ + INCORRECT_SIDECAR_COUNT = "BLOBS_SIDECAR_ERROR_INCORRECT_SIDECAR_COUNT", + /** Sidecar doesn't match block */ + INCORRECT_BLOCK = "BLOBS_SIDECAR_ERROR_INCORRECT_BLOCK", + /** Sidecars proofs not valid */ + INVALID_KZG_PROOF_BATCH = "BLOBS_SIDECAR_ERROR_INVALID_KZG_PROOF_BATCH", + // following errors are adapted from the block errors FUTURE_SLOT = "BLOB_SIDECAR_ERROR_FUTURE_SLOT", WOULD_REVERT_FINALIZED_SLOT = "BLOB_SIDECAR_ERROR_WOULD_REVERT_FINALIZED_SLOT", @@ -34,6 +44,9 @@ export type BlobSidecarErrorType = | {code: BlobSidecarErrorCode.INCORRECT_SLOT; blockSlot: Slot; blobSlot: Slot; blobIdx: number} | {code: BlobSidecarErrorCode.INVALID_BLOB; blobIdx: number} | {code: BlobSidecarErrorCode.INVALID_KZG_PROOF; blobIdx: number} + | {code: BlobSidecarErrorCode.INCORRECT_SIDECAR_COUNT; slot: number; expected: number; actual: number} + | {code: BlobSidecarErrorCode.INCORRECT_BLOCK; slot: number; blobIdx: number; expected: string; actual: string} + | {code: BlobSidecarErrorCode.INVALID_KZG_PROOF_BATCH; slot: number; reason: string} | {code: BlobSidecarErrorCode.FUTURE_SLOT; blockSlot: Slot; currentSlot: Slot} | {code: BlobSidecarErrorCode.WOULD_REVERT_FINALIZED_SLOT; blockSlot: Slot; finalizedSlot: Slot} | {code: BlobSidecarErrorCode.ALREADY_KNOWN; root: RootHex} @@ -44,3 +57,4 @@ export type BlobSidecarErrorType = | {code: BlobSidecarErrorCode.INCORRECT_PROPOSER; proposerIndex: ValidatorIndex}; export class BlobSidecarGossipError extends GossipActionError {} +export class BlobSidecarValidationError extends LodestarError {} diff --git a/packages/beacon-node/src/chain/errors/dataColumnSidecarError.ts b/packages/beacon-node/src/chain/errors/dataColumnSidecarError.ts index a8715156bde8..937a56d507d7 100644 --- a/packages/beacon-node/src/chain/errors/dataColumnSidecarError.ts +++ b/packages/beacon-node/src/chain/errors/dataColumnSidecarError.ts @@ -1,4 +1,5 @@ import {RootHex, Slot, SubnetID} from "@lodestar/types"; +import {LodestarError} from "@lodestar/utils"; import {GossipActionError} from "./gossipValidation.js"; export enum DataColumnSidecarErrorCode { @@ -8,6 +9,19 @@ export enum DataColumnSidecarErrorCode { INVALID_SUBNET = "DATA_COLUMN_SIDECAR_ERROR_INVALID_SUBNET", INVALID_KZG_PROOF = "DATA_COLUMN_SIDECAR_ERROR_INVALID_KZG_PROOF", + // Validation errors when validating against an existing block + + /** Block and sidecars data column count mismatch */ + INCORRECT_SIDECAR_COUNT = "DATA_COLUMN_SIDECAR_ERROR_INCORRECT_SIDECAR_COUNT", + /** Sidecar doesn't match block */ + INCORRECT_BLOCK = "DATA_COLUMN_SIDECAR_ERROR_INCORRECT_BLOCK", + /** Sidecar kzg proof count not as expected */ + INCORRECT_KZG_COMMITMENTS_COUNT = "DATA_COLUMN_SIDECAR_ERROR_INCORRECT_KZG_COMMITMENTS_COUNT", + /** Sidecar kzg proof count not as expected */ + INCORRECT_KZG_PROOF_COUNT = "DATA_COLUMN_SIDECAR_ERROR_INCORRECT_KZG_PROOF_COUNT", + /** Sidecars proofs not valid */ + INVALID_KZG_PROOF_BATCH = "DATA_COLUMN_SIDECAR_ERROR_INVALID_KZG_PROOF_BATCH", + // following errors are adapted from the block errors ALREADY_KNOWN = "DATA_COLUMN_SIDECAR_ERROR_ALREADY_KNOWN", FUTURE_SLOT = "DATA_COLUMN_SIDECAR_ERROR_FUTURE_SLOT", @@ -20,8 +34,8 @@ export enum DataColumnSidecarErrorCode { } export type DataColumnSidecarErrorType = - | {code: DataColumnSidecarErrorCode.INVALID_INDEX; columnIdx: number} - | {code: DataColumnSidecarErrorCode.NO_COMMITMENTS; columnIdx: number} + | {code: DataColumnSidecarErrorCode.INVALID_INDEX; slot: Slot; columnIdx: number} + | {code: DataColumnSidecarErrorCode.NO_COMMITMENTS; slot: Slot; columnIdx: number} | { code: DataColumnSidecarErrorCode.MISMATCHED_LENGTHS; columnLength: number; @@ -37,6 +51,30 @@ export type DataColumnSidecarErrorType = | {code: DataColumnSidecarErrorCode.NOT_LATER_THAN_PARENT; parentSlot: Slot; slot: Slot} | {code: DataColumnSidecarErrorCode.INCLUSION_PROOF_INVALID; slot: Slot; columnIdx: number} | {code: DataColumnSidecarErrorCode.INVALID_KZG_PROOF; slot: Slot; columnIdx: number} + | {code: DataColumnSidecarErrorCode.INCORRECT_SIDECAR_COUNT; slot: number; expected: number; actual: number} + | { + code: DataColumnSidecarErrorCode.INCORRECT_BLOCK; + slot: number; + columnIdx: number; + expected: string; + actual: string; + } + | { + code: DataColumnSidecarErrorCode.INCORRECT_KZG_COMMITMENTS_COUNT; + slot: number; + columnIdx: number; + expected: number; + actual: number; + } + | { + code: DataColumnSidecarErrorCode.INCORRECT_KZG_PROOF_COUNT; + slot: number; + columnIdx: number; + expected: number; + actual: number; + } + | {code: DataColumnSidecarErrorCode.INVALID_KZG_PROOF_BATCH; slot: number; reason: string} | {code: DataColumnSidecarErrorCode.INCORRECT_PROPOSER; actualProposerIndex: number; expectedProposerIndex: number}; export class DataColumnSidecarGossipError extends GossipActionError {} +export class DataColumnSidecarValidationError extends LodestarError {} diff --git a/packages/beacon-node/src/chain/interface.ts b/packages/beacon-node/src/chain/interface.ts index eb60bec71ac7..dfbf64e674b1 100644 --- a/packages/beacon-node/src/chain/interface.ts +++ b/packages/beacon-node/src/chain/interface.ts @@ -35,7 +35,8 @@ import {SerializedCache} from "../util/serializedCache.js"; import {IArchiveStore} from "./archiveStore/interface.js"; import {CheckpointBalancesCache} from "./balancesCache.js"; import {BeaconProposerCache, ProposerPreparationData} from "./beaconProposerCache.js"; -import {BlockInput, ImportBlockOpts} from "./blocks/types.js"; +import {IBlockInput} from "./blocks/blockInput/index.js"; +import {ImportBlockOpts} from "./blocks/types.js"; import {IBlsVerifier} from "./bls/index.js"; import {ChainEventEmitter} from "./emitter.js"; import {ForkchoiceCaller} from "./forkChoice/index.js"; @@ -56,13 +57,14 @@ import { SeenContributionAndProof, SeenSyncCommitteeMessages, } from "./seenCache/index.js"; -import {SeenGossipBlockInput} from "./seenCache/index.js"; import {SeenAggregatedAttestations} from "./seenCache/seenAggregateAndProof.js"; import {SeenAttestationDatas} from "./seenCache/seenAttestationData.js"; import {SeenBlockAttesters} from "./seenCache/seenBlockAttesters.js"; -import {SeenBlockInputCache} from "./seenCache/seenBlockInput.js"; +import {SeenBlockInput} from "./seenCache/seenGossipBlockInput.js"; import {ShufflingCache} from "./shufflingCache.js"; import {ValidatorMonitor} from "./validatorMonitor.js"; +import {GetBlobsTracker} from "./GetBlobsTracker.js"; +import {ColumnReconstructionTracker} from "./ColumnReconstructionTracker.js"; export {BlockType, type AssembledBlockType}; export {type ProposerPreparationData}; @@ -126,8 +128,7 @@ export interface IBeaconChain { readonly seenSyncCommitteeMessages: SeenSyncCommitteeMessages; readonly seenContributionAndProof: SeenContributionAndProof; readonly seenAttestationDatas: SeenAttestationDatas; - readonly seenBlockInputCache: SeenBlockInputCache; - readonly seenGossipBlockInput: SeenGossipBlockInput; + readonly seenBlockInputCache: SeenBlockInput; // Seen cache for liveness checks readonly seenBlockAttesters: SeenBlockAttesters; @@ -141,6 +142,9 @@ export interface IBeaconChain { // Cache for serialized objects readonly serializedCache: SerializedCache; + readonly getBlobsTracker: GetBlobsTracker; + readonly columnReconstructionTracker: ColumnReconstructionTracker; + readonly opts: IChainOptions; /** Start the processing of chain and load state from disk and related actions */ @@ -212,9 +216,9 @@ export interface IBeaconChain { }>; /** Process a block until complete */ - processBlock(block: BlockInput, opts?: ImportBlockOpts): Promise; + processBlock(block: IBlockInput, opts?: ImportBlockOpts): Promise; /** Process a chain of blocks until complete */ - processChainSegment(blocks: BlockInput[], opts?: ImportBlockOpts): Promise; + processChainSegment(blocks: IBlockInput[], opts?: ImportBlockOpts): Promise; getStatus(): Status; diff --git a/packages/beacon-node/src/chain/seenCache/index.ts b/packages/beacon-node/src/chain/seenCache/index.ts index 250e6581c312..2aa218fc20fb 100644 --- a/packages/beacon-node/src/chain/seenCache/index.ts +++ b/packages/beacon-node/src/chain/seenCache/index.ts @@ -2,4 +2,4 @@ export {SeenAggregators, SeenAttesters} from "./seenAttesters.js"; export {SeenBlockProposers} from "./seenBlockProposers.js"; export {SeenSyncCommitteeMessages} from "./seenCommittee.js"; export {SeenContributionAndProof} from "./seenCommitteeContribution.js"; -export {SeenGossipBlockInput} from "./seenGossipBlockInput.js"; +export {SeenBlockInput} from "./seenGossipBlockInput.js"; diff --git a/packages/beacon-node/src/chain/seenCache/seenBlockInput.ts b/packages/beacon-node/src/chain/seenCache/seenBlockInput.ts deleted file mode 100644 index eb0ba6d98114..000000000000 --- a/packages/beacon-node/src/chain/seenCache/seenBlockInput.ts +++ /dev/null @@ -1,304 +0,0 @@ -import {ChainForkConfig} from "@lodestar/config"; -import {CheckpointWithHex} from "@lodestar/fork-choice"; -import {ForkName, isForkPostDeneb} from "@lodestar/params"; -import {computeStartSlotAtEpoch} from "@lodestar/state-transition"; -import {RootHex, SignedBeaconBlock, Slot, deneb} from "@lodestar/types"; -import {LodestarError, Logger, toRootHex} from "@lodestar/utils"; -import {Metrics} from "../../metrics/metrics.js"; -import {IClock} from "../../util/clock.js"; -import { - BlockInputBlobs, - BlockInputPreData, - DAType, - ForkBlobsDA, - IBlockInput, - LogMetaBasic, - LogMetaBlobs, - SourceMeta, - isBlockInputBlobs, - isDaOutOfRange, -} from "../blocks/blockInput/index.js"; -import {ChainEvent, ChainEventEmitter} from "../emitter.js"; - -const MAX_BLOCK_INPUT_CACHE_SIZE = 5; - -export type SeenBlockInputCacheModules = { - config: ChainForkConfig; - clock: IClock; - chainEvents: ChainEventEmitter; - signal: AbortSignal; - // custodyConfig: CustodyConfig; - metrics: Metrics | null; - logger?: Logger; -}; - -export type GetByBlobOptions = { - throwErrorIfAlreadyKnown?: boolean; -}; - -/** - * Consumers that create BlockInputs or change types of old BlockInputs - * - * - gossipHandlers (block and blob) - * - beaconBlocksMaybeBlobsByRange - * - unavailableBeaconBlobsByRoot (beaconBlocksMaybeBlobsByRoot) - * - publishBlock in the beacon/blocks/index.ts API - * https://github.com/ChainSafe/lodestar/blob/unstable/packages/beacon-node/src/api/impl/beacon/blocks/index.ts#L62 - * - maybeValidateBlobs in verifyBlocksDataAvailability (is_data_available spec function) - * https://github.com/ChainSafe/lodestar/blob/unstable/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts#L111 - * - * - * Pruning management for SeenBlockInputCache - * ------------------------------------------ - * There are four cases for how pruning needs to be handled - * - Normal operation following head via gossip (and/or reqresp). For this situation the consumer (process pipeline or - * caller of processBlock) will call the `prune` method to remove any processed BlockInputs from the cache. This will - * also remove any ancestors of the processed BlockInput as that will also need to have been successfully processed - * for import to work correctly - * - onFinalized event handler will help to prune any non-canonical forks once the chain finalizes. Any block-slots that - * are before the finalized checkpoint will be pruned. - * - Range-sync periods. The range process uses this cache to store and sync blocks with DA data as the chain is pulled - * from peers. We pull batches, by epoch, so 32 slots are pulled at a time and several batches are pulled concurrently. - * It is important to set the MAX_BLOCK_INPUT_CACHE_SIZE high enough to support range sync activities. Currently the - * value is set for 5 batches of 32 slots. As process block is called (similar to following head) the BlockInput and - * its ancestors will be pruned. - * - Non-Finality times. This is a bit more tricky. There can be long periods of non-finality and storing everything - * will cause OOM. The pruneToMax will help ensure a hard limit on the number of stored blocks (with DA) that are held - * in memory at any one time. The value for MAX_BLOCK_INPUT_CACHE_SIZE is set to accommodate range-sync but in - * practice this value may need to be massaged in the future if we find issues when debugging non-finality - */ - -export class SeenBlockInputCache { - private readonly config: ChainForkConfig; - private readonly clock: IClock; - private readonly chainEvents: ChainEventEmitter; - private readonly signal: AbortSignal; - private readonly metrics: Metrics | null; - private readonly logger?: Logger; - private blockInputs = new Map(); - - constructor({config, clock, chainEvents, signal, metrics, logger}: SeenBlockInputCacheModules) { - this.config = config; - this.clock = clock; - this.chainEvents = chainEvents; - this.signal = signal; - this.metrics = metrics; - this.logger = logger; - - if (metrics) { - metrics.seenCache.blockInput.blockInputCount.addCollect(() => - metrics.seenCache.blockInput.blockInputCount.set(this.blockInputs.size) - ); - } - - this.chainEvents.on(ChainEvent.forkChoiceFinalized, this.onFinalized); - this.signal.addEventListener("abort", () => { - this.chainEvents.off(ChainEvent.forkChoiceFinalized, this.onFinalized); - }); - } - - has(rootHex: RootHex): boolean { - return this.blockInputs.has(rootHex); - } - - get(rootHex: RootHex): IBlockInput | undefined { - return this.blockInputs.get(rootHex); - } - - /** - * Removes the single BlockInput from the cache - */ - remove(rootHex: RootHex): void { - this.blockInputs.delete(rootHex); - } - - /** - * Removes a processed BlockInput from the cache and also removes any ancestors of processed blocks - */ - prune(rootHex: RootHex): void { - let blockInput = this.blockInputs.get(rootHex); - let parentRootHex = blockInput?.parentRootHex; - while (blockInput) { - this.blockInputs.delete(blockInput.blockRootHex); - blockInput = this.blockInputs.get(parentRootHex ?? ""); - parentRootHex = blockInput?.parentRootHex; - } - this.pruneToMaxSize(); - } - - onFinalized = (checkpoint: CheckpointWithHex) => { - const cutoffSlot = computeStartSlotAtEpoch(checkpoint.epoch); - for (const [rootHex, blockInput] of this.blockInputs) { - if (blockInput.slot < cutoffSlot) { - this.blockInputs.delete(rootHex); - } - } - this.pruneToMaxSize(); - }; - - getByBlock({block, source, seenTimestampSec, peerIdStr}: SourceMeta & {block: SignedBeaconBlock}): IBlockInput { - const blockRoot = this.config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message); - const blockRootHex = toRootHex(blockRoot); - - // TODO(peerDAS): Why is it necessary to static cast this here. All conditional paths result in a valid value so should be defined correctly below - let blockInput = this.blockInputs.get(blockRootHex) as IBlockInput; - if (!blockInput) { - const {forkName, daOutOfRange} = this.buildCommonProps(block.message.slot); - if (!isForkPostDeneb(forkName)) { - blockInput = BlockInputPreData.createFromBlock({ - block, - blockRootHex, - daOutOfRange, - forkName, - source: { - source, - seenTimestampSec, - peerIdStr, - }, - }); - } - // else if (isForkPostFulu(forkName)) { - // blockInput = new BlockInputColumns.createFromBlock({ - // block, - // blockRootHex, - // daOutOfRange, - // forkName, - // custodyColumns: this.custodyConfig.custodyColumns, - // sampledColumns: this.custodyConfig.sampledColumns, - // source: { - // source, - // seenTimestampSec, - // peerIdStr - // } - // }) - // } - else { - blockInput = BlockInputBlobs.createFromBlock({ - block: block as SignedBeaconBlock, - blockRootHex, - daOutOfRange, - forkName, - source: { - source, - seenTimestampSec, - peerIdStr, - }, - }); - } - this.blockInputs.set(blockInput.blockRootHex, blockInput); - } - - if (!blockInput.hasBlock()) { - blockInput.addBlock({block, blockRootHex, source: {source, seenTimestampSec, peerIdStr}}); - } else { - this.logger?.debug("Attempt to cache block but is already cached on BlockInput", blockInput.getLogMeta()); - this.metrics?.seenCache.blockInput.duplicateBlockCount.inc({source}); - } - - return blockInput; - } - - getByBlob( - {blobSidecar, source, seenTimestampSec, peerIdStr}: SourceMeta & {blobSidecar: deneb.BlobSidecar}, - opts: GetByBlobOptions = {} - ): BlockInputBlobs { - const blockRoot = this.config - .getForkTypes(blobSidecar.signedBlockHeader.message.slot) - .BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message); - const blockRootHex = toRootHex(blockRoot); - - // TODO(peerDAS): Why is it necessary to static cast this here. All conditional paths result in a valid value so should be defined correctly below - let blockInput = this.blockInputs.get(blockRootHex) as IBlockInput; - let created = false; - if (!blockInput) { - created = true; - const {forkName, daOutOfRange} = this.buildCommonProps(blobSidecar.signedBlockHeader.message.slot); - blockInput = BlockInputBlobs.createFromBlob({ - blobSidecar, - blockRootHex, - daOutOfRange, - forkName, - source, - seenTimestampSec, - peerIdStr, - }); - this.metrics?.seenCache.blockInput.createdByBlob.inc(); - this.blockInputs.set(blockRootHex, blockInput); - } - - if (!isBlockInputBlobs(blockInput)) { - throw new SeenBlockInputCacheError( - { - code: SeenBlockInputCacheErrorCode.WRONG_BLOCK_INPUT_TYPE, - cachedType: blockInput.type, - requestedType: DAType.Blobs, - ...blockInput.getLogMeta(), - }, - `BlockInputType mismatch adding blobIndex=${blobSidecar.index}` - ); - } - - if (!blockInput.hasBlob(blobSidecar.index)) { - blockInput.addBlob({blobSidecar, blockRootHex, source, seenTimestampSec, peerIdStr}); - } else if (!created) { - this.logger?.debug( - `Attempt to cache blob index #${blobSidecar.index} but is already cached on BlockInput`, - blockInput.getLogMeta() - ); - this.metrics?.seenCache.blockInput.duplicateBlobCount.inc({source}); - if (opts.throwErrorIfAlreadyKnown) { - throw new SeenBlockInputCacheError({ - code: SeenBlockInputCacheErrorCode.GOSSIP_BLOB_ALREADY_KNOWN, - ...blockInput.getLogMeta(), - }); - } - } - - return blockInput; - } - - private buildCommonProps(slot: Slot): { - daOutOfRange: boolean; - forkName: ForkName; - } { - const forkName = this.config.getForkName(slot); - return { - forkName, - daOutOfRange: isDaOutOfRange(this.config, forkName, slot, this.clock.currentEpoch), - }; - } - - /** - * Use custom implementation of pruneSetToMax to allow for sorting by slot - * and deleting via key/rootHex - */ - private pruneToMaxSize() { - let itemsToDelete = this.blockInputs.size - MAX_BLOCK_INPUT_CACHE_SIZE; - - if (itemsToDelete > 0) { - const sorted = [...this.blockInputs.entries()].sort((a, b) => b[1].slot - a[1].slot); - for (const [rootHex] of sorted) { - this.blockInputs.delete(rootHex); - itemsToDelete--; - if (itemsToDelete <= 0) return; - } - } - } -} - -enum SeenBlockInputCacheErrorCode { - WRONG_BLOCK_INPUT_TYPE = "BLOCK_INPUT_CACHE_ERROR_WRONG_BLOCK_INPUT_TYPE", - GOSSIP_BLOB_ALREADY_KNOWN = "BLOCK_INPUT_CACHE_ERROR_GOSSIP_BLOB_ALREADY_KNOWN", -} - -type SeenBlockInputCacheErrorType = - | (LogMetaBasic & { - code: SeenBlockInputCacheErrorCode.WRONG_BLOCK_INPUT_TYPE; - cachedType: DAType; - requestedType: DAType; - }) - | (LogMetaBlobs & { - code: SeenBlockInputCacheErrorCode.GOSSIP_BLOB_ALREADY_KNOWN; - }); - -class SeenBlockInputCacheError extends LodestarError {} diff --git a/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts b/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts index d1e60d887cbe..8e0616620a84 100644 --- a/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts +++ b/packages/beacon-node/src/chain/seenCache/seenGossipBlockInput.ts @@ -1,544 +1,373 @@ -import {toHexString} from "@chainsafe/ssz"; import {ChainForkConfig} from "@lodestar/config"; -import {ForkName, NUMBER_OF_COLUMNS, isForkPostDeneb} from "@lodestar/params"; -import {RootHex, SignedBeaconBlock, deneb, fulu, ssz} from "@lodestar/types"; -import {Logger, pruneSetToMax} from "@lodestar/utils"; - -import {IExecutionEngine} from "../../execution/index.js"; -import {Metrics} from "../../metrics/index.js"; +import {CheckpointWithHex} from "@lodestar/fork-choice"; +import {ForkName, ForkPostFulu, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; +import {computeStartSlotAtEpoch} from "@lodestar/state-transition"; +import {RootHex, SignedBeaconBlock, Slot, deneb, fulu} from "@lodestar/types"; +import {LodestarError, Logger} from "@lodestar/utils"; +import {Metrics} from "../../metrics/metrics.js"; import {IClock} from "../../util/clock.js"; +import {CustodyConfig} from "../../util/dataColumns.js"; import { - CustodyConfig, - RecoverResult, - getDataColumnsFromExecution, - hasSampledDataColumns, - recoverDataColumnSidecars, -} from "../../util/dataColumns.js"; -import {callInNextEventLoop} from "../../util/eventLoop.js"; -import { - BlobsSource, BlockInput, BlockInputBlobs, - BlockInputDataColumns, - BlockSource, - CachedData, - CachedDataColumns, - DataColumnsSource, - GossipedInputType, - NullBlockInput, - getBlockInput, - getBlockInputBlobs, - getBlockInputDataColumns, -} from "../blocks/types.js"; + BlockInputColumns, + BlockInputPreData, + BlockWithSource, + DAType, + ForkBlobsDA, + IBlockInput, + LogMetaBasic, + LogMetaBlobs, + LogMetaColumns, + SourceMeta, + isBlockInputBlobs, + isBlockInputColumns, + isDaOutOfRange, +} from "../blocks/blockInput/index.js"; import {ChainEvent, ChainEventEmitter} from "../emitter.js"; -import {DataColumnSidecarErrorCode, DataColumnSidecarGossipError} from "../errors/dataColumnSidecarError.js"; -import {GossipAction} from "../errors/gossipValidation.js"; -export enum BlockInputAvailabilitySource { - GOSSIP = "gossip", - RECOVERED = "recovered", - UNKNOWN_SYNC = "unknown_sync", -} +const MAX_BLOCK_INPUT_CACHE_SIZE = 5; -type GossipedBlockInput = - | {type: GossipedInputType.block; signedBlock: SignedBeaconBlock} - | {type: GossipedInputType.blob; blobSidecar: deneb.BlobSidecar} - | { - type: GossipedInputType.dataColumn; - dataColumnSidecar: fulu.DataColumnSidecar; - dataColumnBytes: Uint8Array | null; - }; - -// TODO(fulu): dedup with gossipHandlers.ts -const BLOCK_AVAILABILITY_CUTOFF_MS = 3_000; - -export type BlockInputCacheType = { - fork: ForkName; - block?: SignedBeaconBlock; - cachedData?: CachedData; - // block promise and its callback cached for delayed resolution - blockInputPromise: Promise; - resolveBlockInput: (blockInput: BlockInput) => void; +export type SeenBlockInputCacheModules = { + config: ChainForkConfig; + clock: IClock; + chainEvents: ChainEventEmitter; + signal: AbortSignal; + custodyConfig: CustodyConfig; + metrics: Metrics | null; + logger?: Logger; }; -type GossipBlockInputResponseWithBlock = { - blockInput: BlockInput; - blockInputMeta: - | {pending: GossipedInputType.blob | null; haveBlobs: number; expectedBlobs: number} - | {pending: GossipedInputType.dataColumn | null; haveColumns: number; expectedColumns: number}; +export type GetByBlobOptions = { + throwErrorIfAlreadyKnown?: boolean; }; -type BlockInputPendingBlock = {pending: GossipedInputType.block}; -export type BlockInputMetaPendingBlockWithBlobs = BlockInputPendingBlock & {haveBlobs: number; expectedBlobs: null}; -type BlockInputMetaPendingBlockWithColumns = BlockInputPendingBlock & {haveColumns: number; expectedColumns: null}; - -type GossipBlockInputResponseWithNullBlock = { - blockInput: NullBlockInput; - blockInputMeta: BlockInputMetaPendingBlockWithBlobs | BlockInputMetaPendingBlockWithColumns; -}; - -type GossipBlockInputResponse = GossipBlockInputResponseWithBlock | GossipBlockInputResponseWithNullBlock; - -const MAX_GOSSIPINPUT_CACHE = 5; - /** - * For predeneb, SeenGossipBlockInput only tracks and caches block so that we don't need to download known block - * roots. From deneb, it serves same purpose plus tracks and caches the live blobs and blocks on the network to - * solve data availability for the blockInput. If no block has been seen yet for some already seen blobs, it - * responds will null, but on the first block or the consequent blobs it responds with blobs promise till all blobs - * become available. + * Consumers that create BlockInputs or change types of old BlockInputs + * + * - gossipHandlers (block and blob) + * - beaconBlocksMaybeBlobsByRange + * - unavailableBeaconBlobsByRoot (beaconBlocksMaybeBlobsByRoot) + * - publishBlock in the beacon/blocks/index.ts API + * https://github.com/ChainSafe/lodestar/blob/unstable/packages/beacon-node/src/api/impl/beacon/blocks/index.ts#L62 + * - maybeValidateBlobs in verifyBlocksDataAvailability (is_data_available spec function) + * https://github.com/ChainSafe/lodestar/blob/unstable/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts#L111 + * * - * One can start processing block on blobs promise blockInput response and can await on the promise before - * fully importing the block. The blobs promise is gets resolved as soon as all blobs corresponding to that - * block are seen by SeenGossipBlockInput + * Pruning management for SeenBlockInputCache + * ------------------------------------------ + * There are four cases for how pruning needs to be handled + * - Normal operation following head via gossip (and/or reqresp). For this situation the consumer (process pipeline or + * caller of processBlock) will call the `prune` method to remove any processed BlockInputs from the cache. This will + * also remove any ancestors of the processed BlockInput as that will also need to have been successfully processed + * for import to work correctly + * - onFinalized event handler will help to prune any non-canonical forks once the chain finalizes. Any block-slots that + * are before the finalized checkpoint will be pruned. + * - Range-sync periods. The range process uses this cache to store and sync blocks with DA data as the chain is pulled + * from peers. We pull batches, by epoch, so 32 slots are pulled at a time and several batches are pulled concurrently. + * It is important to set the MAX_BLOCK_INPUT_CACHE_SIZE high enough to support range sync activities. Currently the + * value is set for 5 batches of 32 slots. As process block is called (similar to following head) the BlockInput and + * its ancestors will be pruned. + * - Non-Finality times. This is a bit more tricky. There can be long periods of non-finality and storing everything + * will cause OOM. The pruneToMax will help ensure a hard limit on the number of stored blocks (with DA) that are held + * in memory at any one time. The value for MAX_BLOCK_INPUT_CACHE_SIZE is set to accommodate range-sync but in + * practice this value may need to be massaged in the future if we find issues when debugging non-finality */ -export class SeenGossipBlockInput { - private readonly blockInputCache = new Map(); + +export class SeenBlockInput { + private readonly config: ChainForkConfig; private readonly custodyConfig: CustodyConfig; - private readonly executionEngine: IExecutionEngine; private readonly clock: IClock; - private readonly emitter: ChainEventEmitter; - private readonly logger: Logger; - - constructor( - custodyConfig: CustodyConfig, - executionEngine: IExecutionEngine, - emitter: ChainEventEmitter, - clock: IClock, - logger: Logger - ) { + private readonly chainEvents: ChainEventEmitter; + private readonly signal: AbortSignal; + private readonly metrics: Metrics | null; + private readonly logger?: Logger; + private blockInputs = new Map(); + + constructor({config, custodyConfig, clock, chainEvents, signal, metrics, logger}: SeenBlockInputCacheModules) { + this.config = config; this.custodyConfig = custodyConfig; - this.executionEngine = executionEngine; this.clock = clock; - this.emitter = emitter; + this.chainEvents = chainEvents; + this.signal = signal; + this.metrics = metrics; this.logger = logger; + + if (metrics) { + metrics.seenCache.blockInput.blockInputCount.addCollect(() => + metrics.seenCache.blockInput.blockInputCount.set(this.blockInputs.size) + ); + } + + this.chainEvents.on(ChainEvent.forkChoiceFinalized, this.onFinalized); + this.signal.addEventListener("abort", () => { + this.chainEvents.off(ChainEvent.forkChoiceFinalized, this.onFinalized); + }); } - globalCacheId = 0; - prune(): void { - pruneSetToMax(this.blockInputCache, MAX_GOSSIPINPUT_CACHE); + has(rootHex: RootHex): boolean { + return this.blockInputs.has(rootHex); } - hasBlock(blockRoot: RootHex): boolean { - return this.blockInputCache.has(blockRoot); + get(rootHex: RootHex): IBlockInput | undefined { + return this.blockInputs.get(rootHex); } /** - * Intended to be used for gossip validation, specifically this check: - * [IGNORE] The sidecar is the first sidecar for the tuple (block_header.slot, block_header.proposer_index, - * sidecar.index) with valid header signature, sidecar inclusion proof, and kzg proof + * Removes the single BlockInput from the cache */ - hasDataColumnSidecar(sidecar: fulu.DataColumnSidecar) { - const blockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(sidecar.signedBlockHeader.message); - const blockRootHex = toHexString(blockRoot); + remove(rootHex: RootHex): void { + this.blockInputs.delete(rootHex); + } - const blockCache = this.blockInputCache.get(blockRootHex); - if (blockCache === undefined) { - return false; - } - if (blockCache.cachedData === undefined || blockCache.cachedData.fork !== ForkName.fulu) { - return false; - } - const existingSidecar = blockCache.cachedData.dataColumnsCache.get(sidecar.index); - if (!existingSidecar) { - return false; + /** + * Removes a processed BlockInput from the cache and also removes any ancestors of processed blocks + */ + prune(rootHex: RootHex): void { + let blockInput = this.blockInputs.get(rootHex); + let parentRootHex = blockInput?.parentRootHex; + let deletedCount = 0; + while (blockInput) { + deletedCount++; + this.blockInputs.delete(blockInput.blockRootHex); + blockInput = this.blockInputs.get(parentRootHex ?? ""); + parentRootHex = blockInput?.parentRootHex; } - return ( - sidecar.signedBlockHeader.message.slot === existingSidecar.dataColumn.signedBlockHeader.message.slot && - sidecar.index === existingSidecar.dataColumn.index && - sidecar.signedBlockHeader.message.proposerIndex === - existingSidecar.dataColumn.signedBlockHeader.message.proposerIndex - ); + this.logger?.debug(`BlockInputCache.prune deleted ${deletedCount} cached BlockInputs`); + this.pruneToMaxSize(); } - getGossipBlockInput( - config: ChainForkConfig, - gossipedInput: GossipedBlockInput, - metrics: Metrics | null - ): GossipBlockInputResponse { - let blockHex: RootHex; - let blockCache: BlockInputCacheType; - let fork: ForkName; - - if (gossipedInput.type === GossipedInputType.block) { - const {signedBlock} = gossipedInput; - fork = config.getForkName(signedBlock.message.slot); - - blockHex = toHexString( - config.getForkTypes(signedBlock.message.slot).BeaconBlock.hashTreeRoot(signedBlock.message) - ); - blockCache = this.blockInputCache.get(blockHex) ?? getEmptyBlockInputCacheEntry(fork, ++this.globalCacheId); - - blockCache.block = signedBlock; - } else if (gossipedInput.type === GossipedInputType.blob) { - const {blobSidecar} = gossipedInput; - const blockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message); - fork = config.getForkName(blobSidecar.signedBlockHeader.message.slot); - - blockHex = toHexString(blockRoot); - blockCache = this.blockInputCache.get(blockHex) ?? getEmptyBlockInputCacheEntry(fork, ++this.globalCacheId); - if (blockCache.cachedData?.fork !== ForkName.deneb && blockCache.cachedData?.fork !== ForkName.electra) { - throw Error(`blob data at non deneb/electra fork=${blockCache.fork}`); - } - - // TODO: freetheblobs check if its the same blob or a duplicate and throw/take actions - blockCache.cachedData?.blobsCache.set(blobSidecar.index, blobSidecar); - } else if (gossipedInput.type === GossipedInputType.dataColumn) { - const {dataColumnSidecar, dataColumnBytes} = gossipedInput; - const blockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(dataColumnSidecar.signedBlockHeader.message); - fork = config.getForkName(dataColumnSidecar.signedBlockHeader.message.slot); - - blockHex = toHexString(blockRoot); - blockCache = this.blockInputCache.get(blockHex) ?? getEmptyBlockInputCacheEntry(fork, ++this.globalCacheId); - if (blockCache.cachedData?.fork !== ForkName.fulu) { - throw Error(`data column data at non fulu fork=${blockCache.fork}`); + onFinalized = (checkpoint: CheckpointWithHex) => { + let deletedCount = 0; + const cutoffSlot = computeStartSlotAtEpoch(checkpoint.epoch); + for (const [rootHex, blockInput] of this.blockInputs) { + if (blockInput.slot < cutoffSlot) { + deletedCount++; + this.blockInputs.delete(rootHex); } - - if (this.hasDataColumnSidecar(dataColumnSidecar)) { - throw new DataColumnSidecarGossipError(GossipAction.IGNORE, { - code: DataColumnSidecarErrorCode.ALREADY_KNOWN, - slot: dataColumnSidecar.signedBlockHeader.message.slot, - columnIdx: dataColumnSidecar.index, + } + this.logger?.debug(`BlockInputCache.onFinalized deleted ${deletedCount} cached BlockInputs`); + this.pruneToMaxSize(); + }; + + getByBlock({blockRootHex, block, source, seenTimestampSec, peerIdStr}: BlockWithSource): BlockInput { + // TODO(peerDAS): Why is it necessary to static cast this here. All conditional paths result in a valid value so should be defined correctly below + let blockInput = this.blockInputs.get(blockRootHex) as IBlockInput; + if (!blockInput) { + const {forkName, daOutOfRange} = this.buildCommonProps(block.message.slot); + if (!isForkPostDeneb(forkName)) { + blockInput = BlockInputPreData.createFromBlock({ + block, + blockRootHex, + daOutOfRange, + forkName, + source, + seenTimestampSec, + peerIdStr, + }); + } else if (isForkPostFulu(forkName)) { + blockInput = BlockInputColumns.createFromBlock({ + block: block as SignedBeaconBlock, + blockRootHex, + daOutOfRange, + forkName, + custodyColumns: this.custodyConfig.custodyColumns, + sampledColumns: this.custodyConfig.sampledColumns, + source, + seenTimestampSec, + peerIdStr, + }); + } else { + blockInput = BlockInputBlobs.createFromBlock({ + block: block as SignedBeaconBlock, + blockRootHex, + daOutOfRange, + forkName, + source, + seenTimestampSec, + peerIdStr, }); } + this.blockInputs.set(blockInput.blockRootHex, blockInput); + } - blockCache.cachedData?.dataColumnsCache.set(dataColumnSidecar.index, { - dataColumn: dataColumnSidecar, - // easily splice out the unsigned message as blob is a fixed length type - dataColumnBytes: dataColumnBytes?.slice(0, dataColumnBytes.length) ?? null, - }); + if (!blockInput.hasBlock()) { + blockInput.addBlock({block, blockRootHex, source, seenTimestampSec, peerIdStr}); } else { - // somehow helps resolve typescript that all types have been exausted - throw Error("Invalid gossipedInput type"); + this.logger?.debug("Attempt to cache block but is already cached on BlockInput", blockInput.getLogMeta()); + this.metrics?.seenCache.blockInput.duplicateBlockCount.inc({source}); } - if (!this.blockInputCache.has(blockHex)) { - this.blockInputCache.set(blockHex, blockCache); - callInNextEventLoop(() => { - getDataColumnsFromExecution(config, this.custodyConfig, this.executionEngine, this.emitter, blockCache, metrics) - .then((_success) => { - // TODO: (@matthewkeil) add metrics collection point here - }) - .catch((error) => { - this.logger.warn("Error getting data columns from execution", {blockHex}, error); - }); + return blockInput as BlockInput; + } + + getByBlob( + { + blockRootHex, + blobSidecar, + source, + seenTimestampSec, + peerIdStr, + }: SourceMeta & {blockRootHex: RootHex; blobSidecar: deneb.BlobSidecar}, + opts: GetByBlobOptions = {} + ): BlockInputBlobs { + // TODO(peerDAS): Why is it necessary to static cast this here. All conditional paths result in a valid value so should be defined correctly below + let blockInput = this.blockInputs.get(blockRootHex) as IBlockInput; + let created = false; + if (!blockInput) { + created = true; + const {forkName, daOutOfRange} = this.buildCommonProps(blobSidecar.signedBlockHeader.message.slot); + blockInput = BlockInputBlobs.createFromBlob({ + blobSidecar, + blockRootHex, + daOutOfRange, + forkName, + source, + seenTimestampSec, + peerIdStr, }); + this.metrics?.seenCache.blockInput.createdByBlob.inc(); + this.blockInputs.set(blockRootHex, blockInput); } - const {block: signedBlock, blockInputPromise, resolveBlockInput, cachedData} = blockCache; - - if (signedBlock !== undefined) { - if (!isForkPostDeneb(fork)) { - return { - blockInput: getBlockInput.preData(config, signedBlock, BlockSource.gossip), - blockInputMeta: {pending: null, haveBlobs: 0, expectedBlobs: 0}, - }; - } - - if (cachedData === undefined || !isForkPostDeneb(cachedData.fork)) { - throw Error("Missing or Invalid fork cached Data for post-deneb block"); - } - - if (cachedData.fork === ForkName.deneb || cachedData.fork === ForkName.electra) { - const {blobsCache, resolveAvailability} = cachedData; - - // block is available, check if all blobs have shown up - const {slot, body} = signedBlock.message; - const {blobKzgCommitments} = body as deneb.BeaconBlockBody; - const blockInfo = `blockHex=${blockHex}, slot=${slot}`; - - if (blobKzgCommitments.length < blobsCache.size) { - throw Error( - `Received more blobs=${blobsCache.size} than commitments=${blobKzgCommitments.length} for ${blockInfo}` - ); - } - - if (blobKzgCommitments.length === blobsCache.size) { - const allBlobs = getBlockInputBlobs(blobsCache); - const {blobs} = allBlobs; - const blockData = { - fork: cachedData.fork, - ...allBlobs, - blobsSource: BlobsSource.gossip, - }; - resolveAvailability(blockData); - metrics?.syncUnknownBlock.resolveAvailabilitySource.inc({source: BlockInputAvailabilitySource.GOSSIP}); - - const blockInput = getBlockInput.availableData(config, signedBlock, BlockSource.gossip, blockData); - - resolveBlockInput(blockInput); - return { - blockInput, - blockInputMeta: {pending: null, haveBlobs: blobs.length, expectedBlobs: blobKzgCommitments.length}, - }; - } - - const blockInput = getBlockInput.dataPromise(config, signedBlock, BlockSource.gossip, cachedData); - - resolveBlockInput(blockInput); - return { - blockInput, - blockInputMeta: { - pending: GossipedInputType.blob, - haveBlobs: blobsCache.size, - expectedBlobs: blobKzgCommitments.length, - }, - }; - } - - if (cachedData.fork === ForkName.fulu) { - const {dataColumnsCache, resolveAvailability, calledRecover} = cachedData as CachedDataColumns; - - // block is available, check if all blobs have shown up - const {slot} = signedBlock.message; - const blockInfo = `blockHex=${blockHex}, slot=${slot}`; - - if (NUMBER_OF_COLUMNS < dataColumnsCache.size) { - throw Error( - `Received more dataColumns=${dataColumnsCache.size} than columns=${NUMBER_OF_COLUMNS} for ${blockInfo}` - ); - } - - // get the custody columns and see if we have got all the requisite columns - const blobKzgCommitmentsLen = (signedBlock.message.body as deneb.BeaconBlockBody).blobKzgCommitments.length; - if (blobKzgCommitmentsLen === 0) { - const blockData: BlockInputDataColumns = { - fork: cachedData.fork, - dataColumns: [], - dataColumnsBytes: [], - dataColumnsSource: DataColumnsSource.gossip, - }; - resolveAvailability(blockData); - metrics?.syncUnknownBlock.resolveAvailabilitySource.inc({source: BlockInputAvailabilitySource.GOSSIP}); - - const blockInput = getBlockInput.availableData(config, signedBlock, BlockSource.gossip, blockData); - - resolveBlockInput(blockInput); - return { - blockInput, - blockInputMeta: {pending: null, haveColumns: 0, expectedColumns: 0}, - }; - } - - const resolveAvailabilityAndBlockInput = (source: BlockInputAvailabilitySource) => { - const allDataColumns = getBlockInputDataColumns(dataColumnsCache, this.custodyConfig.sampledColumns); - const blockData: BlockInputDataColumns = { - fork: cachedData.fork, - ...allDataColumns, - dataColumnsSource: DataColumnsSource.gossip, - }; - resolveAvailability(blockData); - // TODO(das): should not use syncUnknownBlock metrics here - metrics?.syncUnknownBlock.resolveAvailabilitySource.inc({source}); - metrics?.dataColumns.bySource.inc({source: DataColumnsSource.gossip}); - - const blockInput = getBlockInput.availableData(config, signedBlock, BlockSource.gossip, blockData); - resolveBlockInput(blockInput); - return blockInput; - }; - - const columnCount = dataColumnsCache.size; - if ( - // only try to recover all columns with "--supernode" - this.custodyConfig.sampledColumns.length === NUMBER_OF_COLUMNS && - columnCount >= NUMBER_OF_COLUMNS / 2 && - columnCount < NUMBER_OF_COLUMNS && - !calledRecover && - // doing recover right away is not efficient because it may delay data_column_sidecar validation - this.clock.secFromSlot(slot) * 1000 >= BLOCK_AVAILABILITY_CUTOFF_MS - ) { - // should call once per slot - cachedData.calledRecover = true; - callInNextEventLoop(async () => { - const logCtx = { - blockHex, - slot, - dataColumns: dataColumnsCache.size, - }; - const recoverResult = await recoverDataColumnSidecars(dataColumnsCache, this.clock, metrics).catch((e) => { - this.logger.error("Error recovering data column sidecars", logCtx, e); - return RecoverResult.Failed; - }); - metrics?.recoverDataColumnSidecars.reconstructionResult.inc({result: recoverResult}); - switch (recoverResult) { - case RecoverResult.SuccessResolved: { - resolveAvailabilityAndBlockInput(BlockInputAvailabilitySource.RECOVERED); - // Publish columns if and only if subscribed to them - const sampledColumns = this.custodyConfig.sampledColumns.map((columnIndex) => { - const dataColumn = dataColumnsCache.get(columnIndex)?.dataColumn; - if (!dataColumn) { - throw Error(`After recover, missing data column for index=${columnIndex} in cache`); - } - return dataColumn; - }); - - // for columns that we already seen, it will be ignored through `ignoreDuplicatePublishError` gossip option - this.emitter.emit(ChainEvent.publishDataColumns, sampledColumns); - this.logger.verbose("Recovered data column sidecars and resolved availability", logCtx); - break; - } - case RecoverResult.SuccessLate: - this.logger.verbose("Recovered data column sidecars but it's late to resolve availability", logCtx); - break; - case RecoverResult.Failed: - this.logger.verbose("Failed to recover data column sidecars", logCtx); - break; - case RecoverResult.NotAttemptedFull: - this.logger.verbose("Did not attempt because we have full column sidecars", logCtx); - break; - case RecoverResult.NotAttemptedLessThanHalf: - this.logger.verbose("Did not attempt because we have too few column sidecars", logCtx); - break; - default: - break; - } - }); - } - if (hasSampledDataColumns(this.custodyConfig, dataColumnsCache)) { - const blockInput = resolveAvailabilityAndBlockInput(BlockInputAvailabilitySource.GOSSIP); - const allDataColumns = getBlockInputDataColumns(dataColumnsCache, this.custodyConfig.sampledColumns); - const {dataColumns} = allDataColumns; - return { - blockInput, - blockInputMeta: { - pending: null, - haveColumns: dataColumns.length, - expectedColumns: this.custodyConfig.sampledColumns.length, - }, - }; - } - - const blockInput = getBlockInput.dataPromise(config, signedBlock, BlockSource.gossip, cachedData); - - resolveBlockInput(blockInput); - return { - blockInput, - blockInputMeta: { - pending: GossipedInputType.dataColumn, - haveColumns: dataColumnsCache.size, - expectedColumns: this.custodyConfig.sampledColumns.length, - }, - }; - } - - throw Error(`Invalid fork=${fork}`); + if (!isBlockInputBlobs(blockInput)) { + throw new SeenBlockInputCacheError( + { + code: SeenBlockInputCacheErrorCode.WRONG_BLOCK_INPUT_TYPE, + cachedType: blockInput.type, + requestedType: DAType.Blobs, + ...blockInput.getLogMeta(), + }, + `BlockInputType mismatch adding blobIndex=${blobSidecar.index}` + ); } - // will need to wait for the block to showup - if (cachedData === undefined) { - throw Error("Missing cachedData for deneb+ blobs"); + if (!blockInput.hasBlob(blobSidecar.index)) { + blockInput.addBlob({blobSidecar, blockRootHex, source, seenTimestampSec, peerIdStr}); + } else if (!created) { + this.logger?.debug( + `Attempt to cache blob index #${blobSidecar.index} but is already cached on BlockInput`, + blockInput.getLogMeta() + ); + this.metrics?.seenCache.blockInput.duplicateBlobCount.inc({source}); + if (opts.throwErrorIfAlreadyKnown) { + throw new SeenBlockInputCacheError({ + code: SeenBlockInputCacheErrorCode.GOSSIP_BLOB_ALREADY_KNOWN, + ...blockInput.getLogMeta(), + }); + } } - if (cachedData.fork === ForkName.deneb || cachedData.fork === ForkName.electra) { - const {blobsCache} = cachedData; + return blockInput; + } - return { - blockInput: { - block: null, - blockRootHex: blockHex, - cachedData, - blockInputPromise, - }, - blockInputMeta: {pending: GossipedInputType.block, haveBlobs: blobsCache.size, expectedBlobs: null}, - }; + getByColumn( + { + blockRootHex, + columnSidecar, + seenTimestampSec, + source, + peerIdStr, + }: SourceMeta & {blockRootHex: RootHex; columnSidecar: fulu.DataColumnSidecar}, + opts: GetByBlobOptions = {} + ): BlockInputColumns { + let blockInput = this.blockInputs.get(blockRootHex); + let created = false; + if (!blockInput) { + created = true; + const {forkName, daOutOfRange} = this.buildCommonProps(columnSidecar.signedBlockHeader.message.slot); + blockInput = BlockInputColumns.createFromColumn({ + columnSidecar, + blockRootHex, + daOutOfRange, + forkName, + source, + seenTimestampSec, + peerIdStr, + custodyColumns: this.custodyConfig.custodyColumns, + sampledColumns: this.custodyConfig.sampledColumns, + }); + this.metrics?.seenCache.blockInput.createdByBlob.inc(); + this.blockInputs.set(blockRootHex, blockInput); } - if (fork === ForkName.fulu) { - const {dataColumnsCache} = cachedData as CachedDataColumns; - - return { - blockInput: { - block: null, - blockRootHex: blockHex, - cachedData, - blockInputPromise, + if (!isBlockInputColumns(blockInput)) { + throw new SeenBlockInputCacheError( + { + code: SeenBlockInputCacheErrorCode.WRONG_BLOCK_INPUT_TYPE, + cachedType: blockInput.type, + requestedType: DAType.Columns, + ...blockInput.getLogMeta(), }, - blockInputMeta: {pending: GossipedInputType.block, haveColumns: dataColumnsCache.size, expectedColumns: null}, - }; + `BlockInputType mismatch adding columnIndex=${columnSidecar.index}` + ); } - throw Error(`invalid fork=${fork} data not implemented`); - - /** - * TODO: @matthewkeil this code was unreachable. Commented to remove lint error but need to verify the condition - * again to make sure this is not necessary before deleting it - * - * DO NOT DELETE until verified can be removed - */ - // will need to wait for the block to showup - // if (cachedData === undefined) { - // throw Error("Missing cachedData for deneb+ blobs"); - // } - // const {blobsCache} = cachedData as CachedBlobs; - - // return { - // blockInput: { - // block: null, - // blockRootHex: blockHex, - // cachedData: cachedData as CachedData, - // blockInputPromise, - // }, - // blockInputMeta: {pending: GossipedInputType.block, haveBlobs: blobsCache.size, expectedBlobs: null}, - // }; - } -} + if (!blockInput.hasColumn(columnSidecar.index)) { + blockInput.addColumn({columnSidecar, blockRootHex, source, seenTimestampSec, peerIdStr}); + } else if (!created) { + this.logger?.debug( + `Attempt to cache column index #${columnSidecar.index} but is already cached on BlockInput`, + blockInput.getLogMeta() + ); + this.metrics?.seenCache.blockInput.duplicateColumnCount.inc({source}); + if (opts.throwErrorIfAlreadyKnown) { + throw new SeenBlockInputCacheError({ + code: SeenBlockInputCacheErrorCode.GOSSIP_COLUMN_ALREADY_KNOWN, + ...blockInput.getLogMeta(), + }); + } + } -export function getEmptyBlockInputCacheEntry(fork: ForkName, globalCacheId: number): BlockInputCacheType { - // Capture both the promise and its callbacks for blockInput and final availability - // It is not spec'ed but in tests in Firefox and NodeJS the promise constructor is run immediately - let resolveBlockInput: ((block: BlockInput) => void) | null = null; - const blockInputPromise = new Promise((resolveCB) => { - resolveBlockInput = resolveCB; - }); - if (resolveBlockInput === null) { - throw Error("Promise Constructor was not executed immediately"); - } - if (!isForkPostDeneb(fork)) { - return {fork, blockInputPromise, resolveBlockInput}; + return blockInput; } - if (fork === ForkName.deneb || fork === ForkName.electra) { - let resolveAvailability: ((blobs: BlockInputBlobs) => void) | null = null; - const availabilityPromise = new Promise((resolveCB) => { - resolveAvailability = resolveCB; - }); - - if (resolveAvailability === null) { - throw Error("Promise Constructor was not executed immediately"); - } - - const blobsCache = new Map(); - const cachedData: CachedData = { - fork, - blobsCache, - availabilityPromise, - resolveAvailability, - cacheId: ++globalCacheId, + private buildCommonProps(slot: Slot): { + daOutOfRange: boolean; + forkName: ForkName; + } { + const forkName = this.config.getForkName(slot); + return { + forkName, + daOutOfRange: isDaOutOfRange(this.config, forkName, slot, this.clock.currentEpoch), }; - return {fork, blockInputPromise, resolveBlockInput, cachedData}; } - if (fork === ForkName.fulu) { - let resolveAvailability: ((blobs: BlockInputDataColumns) => void) | null = null; - const availabilityPromise = new Promise((resolveCB) => { - resolveAvailability = resolveCB; - }); - - if (resolveAvailability === null) { - throw Error("Promise Constructor was not executed immediately"); + /** + * Use custom implementation of pruneSetToMax to allow for sorting by slot + * and deleting via key/rootHex + */ + private pruneToMaxSize() { + let itemsToDelete = this.blockInputs.size - MAX_BLOCK_INPUT_CACHE_SIZE; + + if (itemsToDelete > 0) { + const sorted = [...this.blockInputs.entries()].sort((a, b) => b[1].slot - a[1].slot); + for (const [rootHex] of sorted) { + this.blockInputs.delete(rootHex); + itemsToDelete--; + if (itemsToDelete <= 0) return; + } } - - const dataColumnsCache = new Map(); - const cachedData: CachedData = { - fork, - dataColumnsCache, - availabilityPromise, - resolveAvailability, - cacheId: ++globalCacheId, - calledRecover: false, - }; - return {fork, blockInputPromise, resolveBlockInput, cachedData}; } +} - throw Error(`Invalid fork=${fork} for getEmptyBlockInputCacheEntry`); +enum SeenBlockInputCacheErrorCode { + WRONG_BLOCK_INPUT_TYPE = "BLOCK_INPUT_CACHE_ERROR_WRONG_BLOCK_INPUT_TYPE", + GOSSIP_BLOB_ALREADY_KNOWN = "BLOCK_INPUT_CACHE_ERROR_GOSSIP_BLOB_ALREADY_KNOWN", + GOSSIP_COLUMN_ALREADY_KNOWN = "BLOCK_INPUT_CACHE_ERROR_GOSSIP_COLUMN_ALREADY_KNOWN", } + +type SeenBlockInputCacheErrorType = + | (LogMetaBasic & { + code: SeenBlockInputCacheErrorCode.WRONG_BLOCK_INPUT_TYPE; + cachedType: DAType; + requestedType: DAType; + }) + | (LogMetaBlobs & { + code: SeenBlockInputCacheErrorCode.GOSSIP_BLOB_ALREADY_KNOWN; + }) + | (LogMetaColumns & { + code: SeenBlockInputCacheErrorCode.GOSSIP_COLUMN_ALREADY_KNOWN; + }); + +class SeenBlockInputCacheError extends LodestarError {} diff --git a/packages/beacon-node/src/chain/validation/blobSidecar.ts b/packages/beacon-node/src/chain/validation/blobSidecar.ts index ef77fed3258d..8d4df35f7be7 100644 --- a/packages/beacon-node/src/chain/validation/blobSidecar.ts +++ b/packages/beacon-node/src/chain/validation/blobSidecar.ts @@ -13,9 +13,8 @@ import { import {BlobIndex, Root, Slot, SubnetID, deneb, ssz} from "@lodestar/types"; import {toRootHex, verifyMerkleBranch} from "@lodestar/utils"; -import {byteArrayEquals} from "../../util/bytes.js"; import {kzg} from "../../util/kzg.js"; -import {BlobSidecarErrorCode, BlobSidecarGossipError} from "../errors/blobSidecarError.js"; +import {BlobSidecarErrorCode, BlobSidecarGossipError, BlobSidecarValidationError} from "../errors/blobSidecarError.js"; import {GossipAction} from "../errors/gossipValidation.js"; import {IBeaconChain} from "../interface.js"; import {RegenCaller} from "../regen/index.js"; @@ -135,7 +134,7 @@ export async function validateGossipBlobSidecar( } // verify if the blob inclusion proof is correct - if (!validateInclusionProof(blobSidecar)) { + if (!validateBlobSidecarInclusionProof(blobSidecar)) { throw new BlobSidecarGossipError(GossipAction.REJECT, { code: BlobSidecarErrorCode.INCLUSION_PROOF_INVALID, slot: blobSidecar.signedBlockHeader.message.slot, @@ -164,7 +163,7 @@ export async function validateGossipBlobSidecar( // blob, proof and commitment as a valid BLS G1 point gets verified in batch validation try { - await validateBlobsAndProofs([blobSidecar.kzgCommitment], [blobSidecar.blob], [blobSidecar.kzgProof]); + await validateBlobsAndBlobProofs([blobSidecar.kzgCommitment], [blobSidecar.blob], [blobSidecar.kzgProof]); } catch (_e) { throw new BlobSidecarGossipError(GossipAction.REJECT, { code: BlobSidecarErrorCode.INVALID_KZG_PROOF, @@ -173,53 +172,102 @@ export async function validateGossipBlobSidecar( } } -// https://github.com/ethereum/consensus-specs/blob/dev/specs/eip4844/beacon-chain.md#validate_blobs_sidecar -export async function validateBlobSidecars( +/** + * Validate some blob sidecars in a block + * + * Requires the block to be known to the node + */ +export async function validateBlockBlobSidecars( blockSlot: Slot, blockRoot: Root, - expectedKzgCommitments: deneb.BlobKzgCommitments, - blobSidecars: deneb.BlobSidecars, - opts: {skipProofsCheck: boolean} = {skipProofsCheck: false} + blockBlobCount: number, + blobSidecars: deneb.BlobSidecars ): Promise { - // assert len(expected_kzg_commitments) == len(blobs) - if (expectedKzgCommitments.length !== blobSidecars.length) { - throw new Error( - `blobSidecars length to commitments length mismatch. Blob length: ${blobSidecars.length}, Expected commitments length ${expectedKzgCommitments.length}` + if (blobSidecars.length === 0) { + return; + } + + if (blockBlobCount === 0) { + throw new BlobSidecarValidationError({ + code: BlobSidecarErrorCode.INCORRECT_SIDECAR_COUNT, + slot: blockSlot, + expected: blockBlobCount, + actual: blobSidecars.length, + }); + } + + // Hash the first sidecar block header and compare the rest via (cheaper) equality + const firstSidecarBlockHeader = blobSidecars[0].signedBlockHeader.message; + const firstBlockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(firstSidecarBlockHeader); + if (Buffer.compare(blockRoot, firstBlockRoot) !== 0) { + throw new BlobSidecarValidationError( + { + code: BlobSidecarErrorCode.INCORRECT_BLOCK, + slot: blockSlot, + blobIdx: 0, + expected: toRootHex(blockRoot), + actual: toRootHex(firstBlockRoot), + }, + "BlobSidecar doesn't match corresponding block" ); } - // No need to verify the aggregate proof of zero blobs - if (blobSidecars.length > 0) { - // Verify the blob slot and root matches - const blobs = []; - const proofs = []; - for (let index = 0; index < blobSidecars.length; index++) { - const blobSidecar = blobSidecars[index]; - const blobBlockHeader = blobSidecar.signedBlockHeader.message; - const blobBlockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(blobBlockHeader); - if ( - blobBlockHeader.slot !== blockSlot || - !byteArrayEquals(blobBlockRoot, blockRoot) || - blobSidecar.index !== index || - !byteArrayEquals(expectedKzgCommitments[index], blobSidecar.kzgCommitment) - ) { - throw new Error( - `Invalid blob with slot=${blobBlockHeader.slot} blobBlockRoot=${toRootHex(blobBlockRoot)} index=${ - blobSidecar.index - } for the block blockRoot=${toRootHex(blockRoot)} slot=${blockSlot} index=${index}` - ); - } - blobs.push(blobSidecar.blob); - proofs.push(blobSidecar.kzgProof); + const commitments = []; + const blobs = []; + const proofs = []; + for (const blobSidecar of blobSidecars) { + const blobIdx = blobSidecar.index; + if (!ssz.phase0.BeaconBlockHeader.equals(blobSidecar.signedBlockHeader.message, firstSidecarBlockHeader)) { + throw new BlobSidecarValidationError( + { + code: BlobSidecarErrorCode.INCORRECT_BLOCK, + slot: blockSlot, + blobIdx, + expected: toRootHex(blockRoot), + actual: "unknown - compared via equality", + }, + "BlobSidecar doesn't match corresponding block" + ); + } + + if (!validateBlobSidecarInclusionProof(blobSidecar)) { + throw new BlobSidecarValidationError( + { + code: BlobSidecarErrorCode.INCLUSION_PROOF_INVALID, + slot: blockSlot, + blobIdx, + }, + "BlobSidecar inclusion proof invalid" + ); } - if (!opts.skipProofsCheck) { - await validateBlobsAndProofs(expectedKzgCommitments, blobs, proofs); + commitments.push(blobSidecar.kzgCommitment); + blobs.push(blobSidecar.blob); + proofs.push(blobSidecar.kzgProof); + } + + // Final batch KZG proof verification + let reason: string | undefined = undefined; + try { + if (!(await kzg.asyncVerifyBlobKzgProofBatch(blobs, commitments, proofs))) { + reason = "Invalid verifyBlobKzgProofBatch"; } + } catch (e) { + reason = (e as Error).message; + } + if (reason !== undefined) { + throw new BlobSidecarValidationError( + { + code: BlobSidecarErrorCode.INVALID_KZG_PROOF_BATCH, + slot: blockSlot, + reason, + }, + "BlobSidecar has invalid KZG proof batch" + ); } } -async function validateBlobsAndProofs( +export async function validateBlobsAndBlobProofs( expectedKzgCommitments: deneb.BlobKzgCommitments, blobs: deneb.Blobs, proofs: deneb.KZGProofs @@ -237,7 +285,7 @@ async function validateBlobsAndProofs( } } -function validateInclusionProof(blobSidecar: deneb.BlobSidecar): boolean { +export function validateBlobSidecarInclusionProof(blobSidecar: deneb.BlobSidecar): boolean { return verifyMerkleBranch( ssz.deneb.KZGCommitment.hashTreeRoot(blobSidecar.kzgCommitment), blobSidecar.kzgCommitmentInclusionProof, diff --git a/packages/beacon-node/src/chain/validation/dataColumnSidecar.ts b/packages/beacon-node/src/chain/validation/dataColumnSidecar.ts index cf0e3a34ea64..123e348c23d6 100644 --- a/packages/beacon-node/src/chain/validation/dataColumnSidecar.ts +++ b/packages/beacon-node/src/chain/validation/dataColumnSidecar.ts @@ -4,14 +4,17 @@ import { KZG_COMMITMENTS_SUBTREE_INDEX, NUMBER_OF_COLUMNS, } from "@lodestar/params"; -import {Root, Slot, SubnetID, deneb, fulu, ssz} from "@lodestar/types"; +import {Root, Slot, SubnetID, fulu, ssz} from "@lodestar/types"; import {toRootHex, verifyMerkleBranch} from "@lodestar/utils"; import {computeStartSlotAtEpoch, getBlockHeaderProposerSignatureSet} from "@lodestar/state-transition"; import {Metrics} from "../../metrics/metrics.js"; -import {byteArrayEquals} from "../../util/bytes.js"; import {kzg} from "../../util/kzg.js"; -import {DataColumnSidecarErrorCode, DataColumnSidecarGossipError} from "../errors/dataColumnSidecarError.js"; +import { + DataColumnSidecarErrorCode, + DataColumnSidecarGossipError, + DataColumnSidecarValidationError, +} from "../errors/dataColumnSidecarError.js"; import {GossipAction} from "../errors/gossipValidation.js"; import {IBeaconChain} from "../interface.js"; import {RegenCaller} from "../regen/interface.js"; @@ -178,90 +181,15 @@ export async function validateGossipDataColumnSidecar( // -- Handled in seenGossipBlockInput } -export async function validateDataColumnsSidecars( - blockSlot: Slot, - blockRoot: Root, - blockKzgCommitments: deneb.BlobKzgCommitments, - dataColumnSidecars: fulu.DataColumnSidecars, - metrics: Metrics | null, - opts: {skipProofsCheck: boolean} = {skipProofsCheck: false} -): Promise { - // Skip verification if there are no data columns - if (dataColumnSidecars.length === 0) { - return; - } - - const commitmentBytes: Uint8Array[] = []; - const cellIndices: number[] = []; - const cells: Uint8Array[] = []; - const proofBytes: Uint8Array[] = []; - - for (let sidecarsIndex = 0; sidecarsIndex < dataColumnSidecars.length; sidecarsIndex++) { - const columnSidecar = dataColumnSidecars[sidecarsIndex]; - const {index: columnIndex, column, kzgCommitments, kzgProofs} = columnSidecar; - const columnBlockHeader = columnSidecar.signedBlockHeader.message; - const columnBlockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(columnBlockHeader); - if ( - columnBlockHeader.slot !== blockSlot || - !byteArrayEquals(columnBlockRoot, blockRoot) || - kzgCommitments.length === 0 || - blockKzgCommitments.length === 0 || - blockKzgCommitments.length !== kzgCommitments.length || - blockKzgCommitments - .map((commitment, i) => byteArrayEquals(commitment, kzgCommitments[i])) - .filter((result) => result === false).length - ) { - throw new Error( - `Invalid data column sidecar slot=${columnBlockHeader.slot} columnBlockRoot=${toRootHex(columnBlockRoot)} columnIndex=${columnIndex} for the block blockRoot=${toRootHex(blockRoot)} slot=${blockSlot} sidecarsIndex=${sidecarsIndex} kzgCommitments=${kzgCommitments.length} blockKzgCommitments=${blockKzgCommitments.length}` - ); - } - - if (columnIndex >= NUMBER_OF_COLUMNS) { - throw new Error( - `Invalid data sidecar columnIndex=${columnIndex} in slot=${blockSlot} blockRoot=${toRootHex(blockRoot)} sidecarsIndex=${sidecarsIndex}` - ); - } - - if (column.length !== kzgCommitments.length || column.length !== kzgProofs.length) { - throw new Error( - `Invalid data sidecar array lengths for columnIndex=${columnIndex} in slot=${blockSlot} blockRoot=${toRootHex(blockRoot)}` - ); - } - - commitmentBytes.push(...kzgCommitments); - cellIndices.push(...Array.from({length: column.length}, () => columnIndex)); - cells.push(...column); - proofBytes.push(...kzgProofs); - } - - if (opts.skipProofsCheck) { - return; - } - - let valid: boolean; - try { - const timer = metrics?.peerDas.kzgVerificationDataColumnBatchTime.startTimer(); - valid = await kzg.asyncVerifyCellKzgProofBatch(commitmentBytes, cellIndices, cells, proofBytes); - timer?.(); - } catch (err) { - (err as Error).message = - `Error in verifyCellKzgProofBatch for slot=${blockSlot} blockRoot=${toRootHex(blockRoot)} commitmentBytes=${commitmentBytes.length} cellIndices=${cellIndices.length} cells=${cells.length} proofBytes=${proofBytes.length}`; - throw err; - } - - if (!valid) { - throw new Error(`Invalid data column sidecars in slot=${blockSlot} blockRoot=${toRootHex(blockRoot)}`); - } -} - /** * SPEC FUNCTION * https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.4/specs/fulu/p2p-interface.md#verify_data_column_sidecar */ -export function verifyDataColumnSidecar(dataColumnSidecar: fulu.DataColumnSidecar): void { +function verifyDataColumnSidecar(dataColumnSidecar: fulu.DataColumnSidecar): void { if (dataColumnSidecar.index >= NUMBER_OF_COLUMNS) { throw new DataColumnSidecarGossipError(GossipAction.REJECT, { code: DataColumnSidecarErrorCode.INVALID_INDEX, + slot: dataColumnSidecar.signedBlockHeader.message.slot, columnIdx: dataColumnSidecar.index, }); } @@ -269,6 +197,7 @@ export function verifyDataColumnSidecar(dataColumnSidecar: fulu.DataColumnSideca if (dataColumnSidecar.kzgCommitments.length === 0) { throw new DataColumnSidecarGossipError(GossipAction.REJECT, { code: DataColumnSidecarErrorCode.NO_COMMITMENTS, + slot: dataColumnSidecar.signedBlockHeader.message.slot, columnIdx: dataColumnSidecar.index, }); } @@ -322,6 +251,125 @@ export function verifyDataColumnSidecarInclusionProof(dataColumnSidecar: fulu.Da ); } +/** + * Validate a subset of data column sidecars in a block + * + * Requires the block to be known to the node + */ +export async function validateBlockDataColumnSidecars( + blockSlot: Slot, + blockRoot: Root, + blockBlobCount: number, + dataColumnSidecars: fulu.DataColumnSidecars +): Promise { + if (dataColumnSidecars.length === 0) { + return; + } + + if (blockBlobCount === 0) { + throw new DataColumnSidecarValidationError( + { + code: DataColumnSidecarErrorCode.INCORRECT_SIDECAR_COUNT, + slot: blockSlot, + expected: 0, + actual: dataColumnSidecars.length, + }, + "Block has no blob commitments but data column sidecars were provided" + ); + } + + // Hash the first sidecar block header and compare the rest via (cheaper) equality + const firstSidecarBlockHeader = dataColumnSidecars[0].signedBlockHeader.message; + const firstBlockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(firstSidecarBlockHeader); + if (Buffer.compare(blockRoot, firstBlockRoot) !== 0) { + throw new DataColumnSidecarValidationError( + { + code: DataColumnSidecarErrorCode.INCORRECT_BLOCK, + slot: blockSlot, + columnIdx: 0, + expected: toRootHex(blockRoot), + actual: toRootHex(firstBlockRoot), + }, + "DataColumnSidecar doesn't match corresponding block" + ); + } + + const commitments: Uint8Array[] = []; + const cellIndices: number[] = []; + const cells: Uint8Array[] = []; + const proofs: Uint8Array[] = []; + for (let i = 0; i < dataColumnSidecars.length; i++) { + const columnSidecar = dataColumnSidecars[i]; + + if (columnSidecar.index >= NUMBER_OF_COLUMNS) { + throw new DataColumnSidecarValidationError( + { + code: DataColumnSidecarErrorCode.INVALID_INDEX, + slot: blockSlot, + columnIdx: columnSidecar.index, + }, + "DataColumnSidecar has invalid index" + ); + } + + if (columnSidecar.kzgCommitments.length !== blockBlobCount) { + throw new DataColumnSidecarValidationError({ + code: DataColumnSidecarErrorCode.INCORRECT_KZG_COMMITMENTS_COUNT, + slot: blockSlot, + columnIdx: columnSidecar.index, + expected: blockBlobCount, + actual: columnSidecar.kzgCommitments.length, + }); + } + + if (columnSidecar.kzgProofs.length !== columnSidecar.kzgCommitments.length) { + throw new DataColumnSidecarValidationError({ + code: DataColumnSidecarErrorCode.INCORRECT_KZG_PROOF_COUNT, + slot: blockSlot, + columnIdx: columnSidecar.index, + expected: columnSidecar.kzgCommitments.length, + actual: columnSidecar.kzgProofs.length, + }); + } + + if (!verifyDataColumnSidecarInclusionProof(columnSidecar)) { + throw new DataColumnSidecarValidationError( + { + code: DataColumnSidecarErrorCode.INCLUSION_PROOF_INVALID, + slot: blockSlot, + columnIdx: columnSidecar.index, + }, + "DataColumnSidecar has invalid inclusion proof" + ); + } + + commitments.push(...columnSidecar.kzgCommitments); + cellIndices.push(...Array.from({length: columnSidecar.column.length}, () => columnSidecar.index)); + cells.push(...columnSidecar.column); + proofs.push(...columnSidecar.kzgProofs); + } + + let reason: string | undefined; + try { + const valid = await kzg.asyncVerifyCellKzgProofBatch(commitments, cellIndices, cells, proofs); + if (!valid) { + reason = "Invalid KZG proof batch"; + } + } catch (e) { + reason = (e as Error).message; + } + if (reason !== undefined) { + throw new DataColumnSidecarValidationError( + { + code: DataColumnSidecarErrorCode.INVALID_KZG_PROOF_BATCH, + slot: blockSlot, + reason, + }, + "DataColumnSidecar has invalid KZG proof batch" + ); + } +} + /** * SPEC FUNCTION * https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.4/specs/fulu/p2p-interface.md#compute_subnet_for_data_column_sidecar diff --git a/packages/beacon-node/src/eth1/provider/utils.ts b/packages/beacon-node/src/eth1/provider/utils.ts index 39cb9d4b1849..9b3c88c2e8a6 100644 --- a/packages/beacon-node/src/eth1/provider/utils.ts +++ b/packages/beacon-node/src/eth1/provider/utils.ts @@ -1,5 +1,5 @@ import {RootHex} from "@lodestar/types"; -import {bigIntToBytes, bytesToBigInt, fromHex, toHex} from "@lodestar/utils"; +import {bigIntToBytes, bytesToBigInt, fromHex, fromHexInto, toHex} from "@lodestar/utils"; import {ErrorParseJson} from "./jsonRpcHttpClient.js"; /** QUANTITY as defined in ethereum execution layer JSON RPC https://eth.wiki/json-rpc/API */ @@ -118,6 +118,15 @@ export function dataToBytes(hex: DATA, fixedLength: number | null): Uint8Array { } } +/** + * Convert DATA into a preallocated buffer + * fromHexInto will throw if buffer's length is not the same as the decoded hex length + */ +export function dataIntoBytes(hex: DATA, buffer: Uint8Array): Uint8Array { + fromHexInto(hex, buffer); + return buffer; +} + /** * DATA as defined in ethereum execution layer JSON RPC https://eth.wiki/json-rpc/API */ diff --git a/packages/beacon-node/src/execution/engine/http.ts b/packages/beacon-node/src/execution/engine/http.ts index ec6cd07c0ac2..e4e69f9d5fab 100644 --- a/packages/beacon-node/src/execution/engine/http.ts +++ b/packages/beacon-node/src/execution/engine/http.ts @@ -29,12 +29,14 @@ import { } from "./interface.js"; import {PayloadIdCache} from "./payloadIdCache.js"; import { + BLOB_AND_PROOF_V2_RPC_BYTES, EngineApiRpcParamTypes, EngineApiRpcReturnTypes, ExecutionPayloadBody, assertReqSizeLimit, deserializeBlobAndProofs, deserializeBlobAndProofsV2, + deserializeBlobAndProofsV2IntoBytes, deserializeExecutionPayloadBody, parseExecutionPayload, serializeBeaconBlockRoot, @@ -489,8 +491,16 @@ export class ExecutionEngineHttp implements IExecutionEngine { return response.map(deserializeExecutionPayloadBody); } - async getBlobs(fork: ForkPostFulu, versionedHashes: VersionedHashes): Promise; - async getBlobs(fork: ForkPreFulu, versionedHashes: VersionedHashes): Promise<(BlobAndProof | null)[]>; + async getBlobs( + fork: ForkPostFulu, + versionedHashes: VersionedHashes, + buffers?: Uint8Array[] + ): Promise; + async getBlobs( + fork: ForkPreFulu, + versionedHashes: VersionedHashes, + buffers?: Uint8Array[] + ): Promise<(BlobAndProof | null)[]>; async getBlobs( fork: ForkName, versionedHashes: VersionedHashes @@ -526,7 +536,19 @@ export class ExecutionEngineHttp implements IExecutionEngine { return response.map(deserializeBlobAndProofs); } - private async getBlobsV2(versionedHashesHex: string[]) { + private async getBlobsV2(versionedHashesHex: string[], buffers?: Uint8Array[]) { + if (buffers) { + if (buffers.length !== versionedHashesHex.length) { + throw Error(`Invalid buffers length=${buffers.length} versionedHashes=${versionedHashesHex.length}`); + } + + for (const [i, buffer] of buffers.entries()) { + if (buffer.length !== BLOB_AND_PROOF_V2_RPC_BYTES) { + throw Error(`Invalid buffer[${i}] length=${buffer.length} expected=${BLOB_AND_PROOF_V2_RPC_BYTES}`); + } + } + } + const response = await this.rpc.fetchWithRetries< EngineApiRpcReturnTypes["engine_getBlobsV2"], EngineApiRpcParamTypes["engine_getBlobsV2"] @@ -547,7 +569,16 @@ export class ExecutionEngineHttp implements IExecutionEngine { throw Error(error); } - return !response ? null : response.map(deserializeBlobAndProofsV2); + if (response == null) { + return null; + } + + if (buffers) { + // getBlobsV2() is designed to called once per slot so we expect to have buffers + return response.map((data, i) => deserializeBlobAndProofsV2IntoBytes(data, buffers[i])); + } + + return response.map(deserializeBlobAndProofsV2); } private async getClientVersion(clientVersion: ClientVersion): Promise { diff --git a/packages/beacon-node/src/execution/engine/interface.ts b/packages/beacon-node/src/execution/engine/interface.ts index 5f8527c094f2..db4901956ccd 100644 --- a/packages/beacon-node/src/execution/engine/interface.ts +++ b/packages/beacon-node/src/execution/engine/interface.ts @@ -187,6 +187,14 @@ export interface IExecutionEngine { getPayloadBodiesByRange(fork: ForkName, start: number, count: number): Promise<(ExecutionPayloadBody | null)[]>; - getBlobs(fork: ForkPostFulu, versionedHashes: VersionedHashes): Promise; - getBlobs(fork: ForkPreFulu, versionedHashes: VersionedHashes): Promise<(BlobAndProof | null)[]>; + getBlobs( + fork: ForkPostFulu, + versionedHashes: VersionedHashes, + buffers?: Uint8Array[] + ): Promise; + getBlobs( + fork: ForkPreFulu, + versionedHashes: VersionedHashes, + buffers?: Uint8Array[] + ): Promise<(BlobAndProof | null)[]>; } diff --git a/packages/beacon-node/src/execution/engine/types.ts b/packages/beacon-node/src/execution/engine/types.ts index 561a0d25e52b..c58b766d2e4d 100644 --- a/packages/beacon-node/src/execution/engine/types.ts +++ b/packages/beacon-node/src/execution/engine/types.ts @@ -1,6 +1,7 @@ import { BYTES_PER_FIELD_ELEMENT, BYTES_PER_LOGS_BLOOM, + CELLS_PER_EXT_BLOB, CONSOLIDATION_REQUEST_TYPE, DEPOSIT_REQUEST_TYPE, FIELD_ELEMENTS_PER_BLOB, @@ -27,6 +28,7 @@ import { DATA, QUANTITY, bytesToData, + dataIntoBytes, dataToBytes, numToQuantity, quantityToBigint, @@ -211,6 +213,11 @@ export type BlobAndProofV2Rpc = { proofs: DATA[]; }; +const BLOB_BYTES = BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB; +const PROOF_BYTES = 48; + +export const BLOB_AND_PROOF_V2_RPC_BYTES = BLOB_BYTES + PROOF_BYTES * CELLS_PER_EXT_BLOB; + export type VersionedHashesRpc = DATA[]; export type PayloadAttributesRpc = { @@ -403,8 +410,8 @@ export function parseBlobsBundle(data: BlobsBundleRpc): BlobsBundle { return { // As of Nov 17th 2022 according to Dan's tests Geth returns null if no blobs in block commitments: (data.commitments ?? []).map((kzg) => dataToBytes(kzg, 48)), - blobs: (data.blobs ?? []).map((blob) => dataToBytes(blob, BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB)), - proofs: (data.proofs ?? []).map((kzg) => dataToBytes(kzg, 48)), + blobs: (data.blobs ?? []).map((blob) => dataToBytes(blob, BLOB_BYTES)), + proofs: (data.proofs ?? []).map((kzg) => dataToBytes(kzg, PROOF_BYTES)), }; } @@ -579,16 +586,51 @@ export function serializeExecutionPayloadBody(data: ExecutionPayloadBody | null) export function deserializeBlobAndProofs(data: BlobAndProofRpc | null): BlobAndProof | null { return data ? { - blob: dataToBytes(data.blob, BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB), - proof: dataToBytes(data.proof, 48), + blob: dataToBytes(data.blob, BLOB_BYTES), + proof: dataToBytes(data.proof, PROOF_BYTES), } : null; } export function deserializeBlobAndProofsV2(data: BlobAndProofV2Rpc): BlobAndProofV2 { return { - blob: dataToBytes(data.blob, BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB), - proofs: data.proofs.map((proof) => dataToBytes(proof, 48)), + blob: dataToBytes(data.blob, BLOB_BYTES), + proofs: data.proofs.map((proof) => dataToBytes(proof, PROOF_BYTES)), + }; +} + +/** + * The same to deserializeBlobAndProofsV2 but using preallocated buffers since BlobAndProofV2Rpc is fixed size + */ +export function deserializeBlobAndProofsV2IntoBytes(data: BlobAndProofV2Rpc, buffer: Uint8Array): BlobAndProofV2 { + if (buffer.length !== BLOB_AND_PROOF_V2_RPC_BYTES) { + throw Error( + `Invalid buffer length ${buffer.length}, expected ${BLOB_AND_PROOF_V2_RPC_BYTES} to hold BlobAndProofV2Rpc` + ); + } + + // https://github.com/ethereum/execution-apis/blob/main/src/engine/osaka.md#blobandproofv2 + // proofs MUST contain exactly CELLS_PER_EXT_BLOB cell proofs. + if (data.proofs.length !== CELLS_PER_EXT_BLOB) { + throw Error(`Invalid proofs length ${data.proofs.length}, expected ${CELLS_PER_EXT_BLOB}`); + } + + const blob = dataIntoBytes(data.blob, buffer.subarray(0, BLOB_BYTES)); + const proofs: Uint8Array[] = []; + for (let i = 0; i < CELLS_PER_EXT_BLOB; i++) { + const proof = dataIntoBytes( + data.proofs[i], + buffer.subarray(BLOB_BYTES + i * PROOF_BYTES, BLOB_BYTES + (i + 1) * PROOF_BYTES) + ); + if (proof.length !== PROOF_BYTES) { + throw Error(`Invalid proof length ${proof.length}, expected ${PROOF_BYTES}`); + } + proofs.push(proof); + } + + return { + blob, + proofs, }; } diff --git a/packages/beacon-node/src/metrics/metrics/beacon.ts b/packages/beacon-node/src/metrics/metrics/beacon.ts index 2dd9bf6bc4b4..b3d7b58ddeb0 100644 --- a/packages/beacon-node/src/metrics/metrics/beacon.ts +++ b/packages/beacon-node/src/metrics/metrics/beacon.ts @@ -287,11 +287,16 @@ export function createBeaconMetrics(register: RegistryMetricCreator) { }), }, + // TODO(fulu): check if these and metrics in lodestar.ts for dataColumns should/can be combined or organized together peerDas: { dataColumnSidecarProcessingRequests: register.counter({ name: "beacon_data_column_sidecar_processing_requests_total", help: "Number of data column sidecars submitted for processing", }), + dataColumnSidecarProcessingSkip: register.counter({ + name: "beacon_data_column_sidecar_processing_skip_total", + help: "Number of data column sidecars with processing skipped for gossip", + }), dataColumnSidecarProcessingSuccesses: register.counter({ name: "beacon_data_column_sidecar_processing_successes_total", help: "Number of data column sidecars verified for gossip", diff --git a/packages/beacon-node/src/metrics/metrics/lodestar.ts b/packages/beacon-node/src/metrics/metrics/lodestar.ts index 6737a385b94d..818a9c899354 100644 --- a/packages/beacon-node/src/metrics/metrics/lodestar.ts +++ b/packages/beacon-node/src/metrics/metrics/lodestar.ts @@ -1,7 +1,6 @@ /** biome-ignore-all lint/suspicious/noTemplateCurlyInString: The metric templates requires to have `${}` in a normal string */ import {NotReorgedReason} from "@lodestar/fork-choice"; import {BlockInputSource} from "../../chain/blocks/blockInput/index.js"; -import {BlobsSource, BlockSource, DataColumnsSource} from "../../chain/blocks/types.js"; import {JobQueueItemType} from "../../chain/bls/index.js"; import {AttestationErrorCode, BlockErrorCode} from "../../chain/errors/index.js"; import { @@ -12,17 +11,16 @@ import {InsertOutcome} from "../../chain/opPools/types.js"; import {RegenCaller, RegenFnName} from "../../chain/regen/interface.js"; import {ReprocessStatus} from "../../chain/reprocess.js"; import {RejectReason} from "../../chain/seenCache/seenAttestationData.js"; -import {BlockInputAvailabilitySource} from "../../chain/seenCache/seenGossipBlockInput.js"; import {CacheItemType} from "../../chain/stateCache/types.js"; import {OpSource} from "../../chain/validatorMonitor.js"; import {ExecutionPayloadStatus} from "../../execution/index.js"; import {GossipType} from "../../network/index.js"; import {CannotAcceptWorkReason, ReprocessRejectReason} from "../../network/processor/index.js"; import {BackfillSyncMethod} from "../../sync/backfill/backfill.js"; -import {PendingBlockType} from "../../sync/index.js"; +import {PendingBlockType} from "../../sync/types.js"; import {PeerSyncType, RangeSyncType} from "../../sync/utils/remoteSyncType.js"; import {AllocSource} from "../../util/bufferPool.js"; -import {RecoverResult} from "../../util/dataColumns.js"; +import {DataColumnReconstructionCode} from "../../util/dataColumns.js"; import {LodestarMetadata} from "../options.js"; import {RegistryMetricCreator} from "../utils/registryMetricCreator.js"; @@ -498,9 +496,25 @@ export function createLodestarMetrics( help: "Count of finalized sync peers by group index", labelNames: ["columnIndex"], }), + downloadByRange: { + success: register.gauge({ + name: "lodestar_sync_range_download_by_range_success_total", + help: "Total number of successful downloadByRange calls", + }), + error: register.gauge<{code: string; client: string}>({ + name: "lodestar_sync_range_download_by_range_error_total", + help: "Total number of errored downloadByRange calls", + labelNames: ["code", "client"], + }), + warn: register.gauge<{code: string; client: string}>({ + name: "lodestar_sync_range_download_by_range_warn_total", + help: "Total number of downloadByRange call warnings", + labelNames: ["code", "client"], + }), + }, }, - syncUnknownBlock: { + blockInputSync: { switchNetworkSubscriptions: register.gauge<{action: string}>({ name: "lodestar_sync_unknown_block_network_subscriptions_count", help: "Switch network subscriptions on/off", @@ -511,6 +525,11 @@ export function createLodestarMetrics( help: "Total number of unknown block events or requests", labelNames: ["type"], }), + source: register.gauge<{source: BlockInputSource}>({ + name: "lodestar_block_input_sync_source_total", + help: "The origination source of one of the BlockInputSync triggers", + labelNames: ["source"], + }), pendingBlocks: register.gauge({ name: "lodestar_sync_unknown_block_pending_blocks_size", help: "Current size of UnknownBlockSync pending blocks cache", @@ -544,11 +563,27 @@ export function createLodestarMetrics( help: "Time elapsed between block slot time and the time block received via unknown block sync", buckets: [0.5, 1, 2, 4, 6, 12], }), - resolveAvailabilitySource: register.gauge<{source: BlockInputAvailabilitySource}>({ + resolveAvailabilitySource: register.gauge<{source: BlockInputSource}>({ name: "lodestar_sync_blockinput_availability_source", help: "Total number of blocks whose data availability was resolved", labelNames: ["source"], }), + downloadByRoot: { + success: register.gauge({ + name: "lodestar_sync_unknown_block_download_by_root_success_total", + help: "Total number of successful downloadByRoot calls", + }), + error: register.gauge<{code: string; client: string}>({ + name: "lodestar_sync_unknown_block_download_by_root_error_total", + help: "Total number of errored downloadByRoot calls", + labelNames: ["code", "client"], + }), + warn: register.gauge<{code: string; client: string}>({ + name: "lodestar_sync_unknown_block_download_by_root_warn_total", + help: "Total number of downloadByRoot call warnings", + labelNames: ["code", "client"], + }), + }, peerBalancer: { peersMetaCount: register.gauge({ name: "lodestar_sync_unknown_block_peer_balancer_peers_meta_count", @@ -744,28 +779,33 @@ export function createLodestarMetrics( }), }, recoverDataColumnSidecars: { - elapsedTimeTillReconstructed: register.histogram({ - name: "lodestar_data_column_sidecar_elapsed_time_till_reconstructed_seconds", - help: "Time elapsed between block slot time and the time data column sidecar reconstructed", - buckets: [2, 4, 6, 8, 10, 12], + recoverTime: register.histogram({ + name: "lodestar_recover_data_column_sidecar_recover_time_seconds", + help: "Time elapsed to recover data column sidecar", + // this data comes from 20 blobs in `fusaka-devnet-1`, need to reevaluate in the future + buckets: [0.4, 0.6, 0.8, 1.0, 1.2], }), custodyBeforeReconstruction: register.gauge({ name: "lodestar_data_columns_in_custody_before_reconstruction", help: "Number of data columns in custody before reconstruction", }), - reconstructionResult: register.gauge<{result: RecoverResult}>({ + numberOfColumnsRecovered: register.gauge({ + name: "lodestar_recover_data_column_sidecar_recovered_columns_total", + help: "Total number of columns that were recovered", + }), + reconstructionResult: register.gauge<{result: DataColumnReconstructionCode}>({ name: "lodestar_data_column_sidecars_reconstruction_result", help: "Data column sidecars reconstruction result", labelNames: ["result"], }), }, dataColumns: { - bySource: register.gauge<{source: DataColumnsSource}>({ + bySource: register.gauge<{source: BlockInputSource}>({ name: "lodestar_data_columns_by_source", help: "Number of received data columns by source", labelNames: ["source"], }), - elapsedTimeTillReceived: register.histogram<{source: DataColumnsSource}>({ + elapsedTimeTillReceived: register.histogram<{source: BlockInputSource}>({ name: "lodestar_data_column_elapsed_time_till_received_seconds", help: "Time elapsed between block slot time and the time data column received", labelNames: ["source"], @@ -800,16 +840,21 @@ export function createLodestarMetrics( name: "lodestar_import_block_set_head_after_first_interval_total", help: "Total times an imported block is set as head after the first slot interval", }), - bySource: register.gauge<{source: BlockSource}>({ + bySource: register.gauge<{source: BlockInputSource}>({ name: "lodestar_import_block_by_source_total", help: "Total number of imported blocks by source", labelNames: ["source"], }), - blobsBySource: register.gauge<{blobsSource: BlobsSource}>({ + blobsBySource: register.gauge<{blobsSource: BlockInputSource}>({ name: "lodestar_import_blobs_by_source_total", help: "Total number of imported blobs by source", labelNames: ["blobsSource"], }), + columnsBySource: register.gauge<{source: BlockInputSource}>({ + name: "lodestar_import_columns_by_source_total", + help: "Total number of imported columns (sampled columns) by source", + labelNames: ["source"], + }), notOverrideFcuReason: register.counter<{reason: NotReorgedReason}>({ name: "lodestar_import_block_not_override_fcu_reason_total", help: "Reason why the fcu call is not suppressed during block import", @@ -1333,6 +1378,11 @@ export function createLodestarMetrics( help: "Total number of duplicate blobs that pass validation and attempt to be cached but are known", labelNames: ["source"], }), + duplicateColumnCount: register.gauge<{source: BlockInputSource}>({ + name: "lodestar_seen_block_input_cache_duplicate_column_count", + help: "Total number of duplicate columns that pass validation and attempt to be cached but are known", + labelNames: ["source"], + }), createdByBlock: register.gauge({ name: "lodestar_seen_block_input_cache_items_created_by_block", help: "Number of BlockInputs created via a block being seen first", diff --git a/packages/beacon-node/src/network/events.ts b/packages/beacon-node/src/network/events.ts index feae977e20e4..8650483f2447 100644 --- a/packages/beacon-node/src/network/events.ts +++ b/packages/beacon-node/src/network/events.ts @@ -1,7 +1,6 @@ import {EventEmitter} from "node:events"; import {PeerId, TopicValidatorResult} from "@libp2p/interface"; -import {CustodyIndex, RootHex, Status} from "@lodestar/types"; -import {BlockInput, NullBlockInput} from "../chain/blocks/types.js"; +import {CustodyIndex, Status} from "@lodestar/types"; import {PeerIdStr} from "../util/peerId.js"; import {StrictEventEmitterSingleArg} from "../util/strictEvents.js"; import {EventDirection} from "../util/workerEvents.js"; @@ -14,10 +13,6 @@ export enum NetworkEvent { /** A peer has been disconnected */ peerDisconnected = "peer-manager.peer-disconnected", reqRespRequest = "req-resp.request", - // TODO remove this event, this is not a network-level concern, rather a chain / sync concern - unknownBlockParent = "unknownBlockParent", - unknownBlock = "unknownBlock", - unknownBlockInput = "unknownBlockInput", // Network processor events /** (Network -> App) A gossip message is ready for validation */ @@ -35,9 +30,6 @@ export type NetworkEventData = { }; [NetworkEvent.peerDisconnected]: {peer: PeerIdStr}; [NetworkEvent.reqRespRequest]: {request: RequestTypedContainer; peer: PeerId}; - [NetworkEvent.unknownBlockParent]: {blockInput: BlockInput; peer: PeerIdStr}; - [NetworkEvent.unknownBlock]: {rootHex: RootHex; peer?: PeerIdStr}; - [NetworkEvent.unknownBlockInput]: {blockInput: BlockInput | NullBlockInput; peer?: PeerIdStr}; [NetworkEvent.pendingGossipsubMessage]: PendingGossipsubMessage; [NetworkEvent.gossipMessageValidationResult]: { msgId: string; @@ -50,9 +42,6 @@ export const networkEventDirection: Record = { [NetworkEvent.peerConnected]: EventDirection.workerToMain, [NetworkEvent.peerDisconnected]: EventDirection.workerToMain, [NetworkEvent.reqRespRequest]: EventDirection.none, // Only used internally in NetworkCore - [NetworkEvent.unknownBlockParent]: EventDirection.workerToMain, - [NetworkEvent.unknownBlock]: EventDirection.workerToMain, - [NetworkEvent.unknownBlockInput]: EventDirection.workerToMain, [NetworkEvent.pendingGossipsubMessage]: EventDirection.workerToMain, [NetworkEvent.gossipMessageValidationResult]: EventDirection.mainToWorker, }; diff --git a/packages/beacon-node/src/network/interface.ts b/packages/beacon-node/src/network/interface.ts index 522b7bf07303..4b69deae4a01 100644 --- a/packages/beacon-node/src/network/interface.ts +++ b/packages/beacon-node/src/network/interface.ts @@ -34,6 +34,7 @@ import { } from "@lodestar/types"; import type {Datastore} from "interface-datastore"; import {Libp2p as ILibp2p} from "libp2p"; +import {BlockInputSource} from "../chain/blocks/blockInput/types.js"; import {CustodyConfig} from "../util/dataColumns.js"; import {PeerIdStr} from "../util/peerId.js"; import {BeaconBlocksByRootRequest, BlobSidecarsByRootRequest, DataColumnSidecarsByRootRequest} from "../util/types.js"; @@ -66,7 +67,7 @@ export interface INetwork extends INetworkCorePublic { reportPeer(peer: PeerIdStr, action: PeerAction, actionName: string): void; shouldAggregate(subnet: SubnetID, slot: Slot): boolean; reStatusPeers(peers: PeerIdStr[]): Promise; - searchUnknownSlotRoot(slotRoot: SlotRootHex, peer?: PeerIdStr): void; + searchUnknownSlotRoot(slotRoot: SlotRootHex, source: BlockInputSource, peer?: PeerIdStr): void; // ReqResp sendBeaconBlocksByRange( peerId: PeerIdStr, diff --git a/packages/beacon-node/src/network/network.ts b/packages/beacon-node/src/network/network.ts index 1005d563d7a9..c0251dde2b83 100644 --- a/packages/beacon-node/src/network/network.ts +++ b/packages/beacon-node/src/network/network.ts @@ -28,6 +28,7 @@ import { phase0, } from "@lodestar/types"; import {prettyPrintIndices, sleep} from "@lodestar/utils"; +import {BlockInputSource} from "../chain/blocks/blockInput/types.js"; import {ChainEvent, IBeaconChain} from "../chain/index.js"; import {computeSubnetForDataColumnSidecar} from "../chain/validation/dataColumnSidecar.js"; import {IBeaconDb} from "../db/interface.js"; @@ -271,8 +272,8 @@ export class Network implements INetwork { return this.core.reStatusPeers(peers); } - searchUnknownSlotRoot(slotRoot: SlotRootHex, peer?: PeerIdStr): void { - this.networkProcessor.searchUnknownSlotRoot(slotRoot, peer); + searchUnknownSlotRoot(slotRoot: SlotRootHex, source: BlockInputSource, peer?: PeerIdStr): void { + this.networkProcessor.searchUnknownSlotRoot(slotRoot, source, peer); } async reportPeer(peer: PeerIdStr, action: PeerAction, actionName: string): Promise { diff --git a/packages/beacon-node/src/network/peers/peerManager.ts b/packages/beacon-node/src/network/peers/peerManager.ts index bf0351035f30..c4a6349b95cb 100644 --- a/packages/beacon-node/src/network/peers/peerManager.ts +++ b/packages/beacon-node/src/network/peers/peerManager.ts @@ -357,6 +357,8 @@ export class PeerManager { (metadata as Partial).custodyGroupCount ?? // TODO: spec says that Clients MAY reject peers with a value less than CUSTODY_REQUIREMENT this.config.CUSTODY_REQUIREMENT, + // TODO(fulu): this should be columns not groups. need to change everywhere. we consume columns and should + // cache that instead so if groups->columns ever changes from 1-1 we only need to update that here custodyGroups, samplingGroups, }; diff --git a/packages/beacon-node/src/network/processor/gossipHandlers.ts b/packages/beacon-node/src/network/processor/gossipHandlers.ts index e07bfc2a9d31..05addcf776bb 100644 --- a/packages/beacon-node/src/network/processor/gossipHandlers.ts +++ b/packages/beacon-node/src/network/processor/gossipHandlers.ts @@ -1,14 +1,20 @@ import {routes} from "@lodestar/api"; -import {BeaconConfig, ChainForkConfig} from "@lodestar/config"; -import {ForkName, ForkPostElectra, ForkPreElectra, ForkSeq, isForkPostElectra} from "@lodestar/params"; -import {computeTimeAtSlot} from "@lodestar/state-transition"; +import {BeaconConfig} from "@lodestar/config"; +import { + ForkName, + ForkPostElectra, + ForkPreElectra, + ForkSeq, + INTERVALS_PER_SLOT, + isForkPostElectra, + NUMBER_OF_COLUMNS, +} from "@lodestar/params"; import { Root, SignedBeaconBlock, SingleAttestation, Slot, SubnetID, - UintNum64, deneb, fulu, ssz, @@ -16,14 +22,14 @@ import { } from "@lodestar/types"; import {LogLevel, Logger, prettyBytes, toHex, toRootHex} from "@lodestar/utils"; import { - BlobSidecarValidation, BlockInput, - BlockInputAvailableData, - BlockInputType, - DataColumnsSource, - GossipedInputType, - NullBlockInput, -} from "../../chain/blocks/types.js"; + BlockInputColumns, + BlockInputSource, + IBlockInput, + isBlockInputColumns, +} from "../../chain/blocks/blockInput/index.js"; +import {BlobSidecarValidation} from "../../chain/blocks/types.js"; +import {ChainEvent} from "../../chain/emitter.js"; import { AttestationError, AttestationErrorCode, @@ -60,7 +66,7 @@ import {OpSource} from "../../chain/validatorMonitor.js"; import {Metrics} from "../../metrics/index.js"; import {kzgCommitmentToVersionedHash} from "../../util/blobs.js"; import {INetworkCore} from "../core/index.js"; -import {NetworkEvent, NetworkEventBus} from "../events.js"; +import {NetworkEventBus} from "../events.js"; import { BatchGossipHandlers, GossipHandlerParamGeneric, @@ -72,6 +78,7 @@ import {sszDeserialize} from "../gossip/topic.js"; import {INetwork} from "../interface.js"; import {PeerAction} from "../peers/index.js"; import {AggregatorTracker} from "./aggregatorTracker.js"; +import {getCutoffTimeMs} from "../../util/clock.js"; /** * Gossip handler options as part of network options @@ -117,14 +124,14 @@ export function getGossipHandlers(modules: ValidatorFnsModules, options: GossipH * We only have a choice to do batch validation for beacon_attestation topic. */ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHandlerOpts): SequentialGossipHandlers { - const {chain, config, metrics, events, logger, core} = modules; + const {chain, config, metrics, logger, core} = modules; async function validateBeaconBlock( signedBlock: SignedBeaconBlock, fork: ForkName, peerIdStr: string, seenTimestampSec: number - ): Promise { + ): Promise { const slot = signedBlock.message.slot; const forkTypes = config.getForkTypes(slot); const blockRootHex = toRootHex(forkTypes.BeaconBlock.hashTreeRoot(signedBlock.message)); @@ -135,38 +142,27 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand // always set block to seen cache for all forks so that we don't need to download it // TODO: validate block before adding to cache // tracked in https://github.com/ChainSafe/lodestar/issues/7957 - const blockInputRes = chain.seenGossipBlockInput.getGossipBlockInput( - config, - { - type: GossipedInputType.block, - signedBlock, - }, - metrics - ); - const blockInput = blockInputRes.blockInput; - // blockInput can't be returned null, improve by enforcing via return types - if (blockInput.block === null) { - throw Error( - `Invalid null blockInput returned by getGossipBlockInput for type=${GossipedInputType.block} blockHex=${blockShortHex} slot=${slot}` - ); - } - const blockInputMeta = - config.getForkSeq(signedBlock.message.slot) >= ForkSeq.deneb ? blockInputRes.blockInputMeta : {}; const logCtx = { - slot: slot, - root: blockShortHex, currentSlot: chain.clock.currentSlot, peerId: peerIdStr, delaySec, - ...blockInputMeta, recvToValLatency, }; logger.debug("Received gossip block", {...logCtx}); + let blockInput: IBlockInput | undefined; try { await validateGossipBlock(config, chain, signedBlock, fork); + blockInput = chain.seenBlockInputCache.getByBlock({ + block: signedBlock, + blockRootHex, + source: BlockInputSource.gossip, + seenTimestampSec, + peerIdStr, + }); + const blockInputMeta = blockInput.getLogMeta(); const recvToValidation = Date.now() / 1000 - seenTimestampSec; const validationTime = recvToValidation - recvToValLatency; @@ -174,17 +170,26 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand metrics?.gossipBlock.gossipValidation.recvToValidation.observe(recvToValidation); metrics?.gossipBlock.gossipValidation.validationTime.observe(validationTime); - logger.debug("Validated gossip block", {...logCtx, recvToValidation, validationTime}); + logger.debug("Validated gossip block", {...blockInputMeta, ...logCtx, recvToValidation, validationTime}); chain.emitter.emit(routes.events.EventType.blockGossip, {slot, block: blockRootHex}); return blockInput; } catch (e) { if (e instanceof BlockGossipError) { + // TODO(fulu): check that this is the only error that should trigger resolution of the block and all others + // cause the block to get thrown away // Don't trigger this yet if full block and blobs haven't arrived yet - if (e.type.code === BlockErrorCode.PARENT_UNKNOWN && blockInput !== null) { + if (e.type.code === BlockErrorCode.PARENT_UNKNOWN && blockInput) { logger.debug("Gossip block has error", {slot, root: blockShortHex, code: e.type.code}); - events.emit(NetworkEvent.unknownBlockParent, {blockInput, peer: peerIdStr}); + // TODO(fulu): should this be unknownParent event? + chain.emitter.emit(ChainEvent.incompleteBlockInput, { + blockInput, + peer: peerIdStr, + source: BlockInputSource.gossip, + }); + // throw error (don't prune the blockInput) + throw e; } if (e.action === GossipAction.REJECT) { @@ -192,6 +197,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand } } + chain.seenBlockInputCache.prune(blockRootHex); throw e; } } @@ -201,7 +207,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand subnet: SubnetID, peerIdStr: string, seenTimestampSec: number - ): Promise { + ): Promise { const blobBlockHeader = blobSidecar.signedBlockHeader.message; const slot = blobBlockHeader.slot; const fork = config.getForkName(slot); @@ -213,14 +219,13 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand try { await validateGossipBlobSidecar(fork, chain, blobSidecar, subnet); - const {blockInput, blockInputMeta} = chain.seenGossipBlockInput.getGossipBlockInput( - config, - { - type: GossipedInputType.blob, - blobSidecar, - }, - metrics - ); + const blockInput = chain.seenBlockInputCache.getByBlob({ + blockRootHex, + blobSidecar, + source: BlockInputSource.gossip, + seenTimestampSec, + peerIdStr, + }); const recvToValidation = Date.now() / 1000 - seenTimestampSec; const validationTime = recvToValidation - recvToValLatency; @@ -238,13 +243,11 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand } logger.debug("Received gossip blob", { - slot: slot, - root: blockShortHex, + ...blockInput.getLogMeta(), currentSlot: chain.clock.currentSlot, peerId: peerIdStr, delaySec, subnet, - ...blockInputMeta, recvToValLatency, recvToValidation, validationTime, @@ -257,6 +260,11 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand if (e.type.code === BlobSidecarErrorCode.PARENT_UNKNOWN) { logger.debug("Gossip blob has error", {slot, root: blockShortHex, code: e.type.code}); // no need to trigger `unknownBlockParent` event here, as we already did it in `validateBeaconBlock()` + // + // TODO(fulu): is this note above correct? Could have random blob that we see that could trigger + // unknownBlockSync. And duplicate addition of a block will be deduplicated by the + // BlockInputSync event handler. Check this!! + // events.emit(NetworkEvent.unknownBlockParent, {blockInput, peer: peerIdStr}); } if (e.action === GossipAction.REJECT) { @@ -274,33 +282,43 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand async function validateBeaconDataColumn( dataColumnSidecar: fulu.DataColumnSidecar, - dataColumnBytes: Uint8Array, + _dataColumnBytes: Uint8Array, gossipSubnet: SubnetID, peerIdStr: string, seenTimestampSec: number - ): Promise { + ): Promise { metrics?.peerDas.dataColumnSidecarProcessingRequests.inc(); - const verificationTimer = metrics?.peerDas.dataColumnSidecarGossipVerificationTime.startTimer(); - const dataColumnBlockHeader = dataColumnSidecar.signedBlockHeader.message; const slot = dataColumnBlockHeader.slot; const blockRootHex = toRootHex(ssz.phase0.BeaconBlockHeader.hashTreeRoot(dataColumnBlockHeader)); - const blockShortHex = prettyBytes(blockRootHex); + + // first check if we should even process this column (we may have already processed it via getBlobsV2) + { + const blockInput = chain.seenBlockInputCache.get(blockRootHex); + if (blockInput && isBlockInputColumns(blockInput) && blockInput.hasColumn(dataColumnSidecar.index)) { + metrics?.peerDas.dataColumnSidecarProcessingSkip.inc(); + logger.debug("Already have column sidecar, skipping processing", { + ...blockInput.getLogMeta(), + index: dataColumnSidecar.index, + }); + return blockInput; + } + } + + const verificationTimer = metrics?.peerDas.dataColumnSidecarGossipVerificationTime.startTimer(); const delaySec = chain.clock.secFromSlot(slot, seenTimestampSec); const recvToValLatency = Date.now() / 1000 - seenTimestampSec; try { await validateGossipDataColumnSidecar(chain, dataColumnSidecar, gossipSubnet, metrics); - const {blockInput, blockInputMeta} = chain.seenGossipBlockInput.getGossipBlockInput( - config, - { - type: GossipedInputType.dataColumn, - dataColumnSidecar, - dataColumnBytes, - }, - metrics - ); + const blockInput = chain.seenBlockInputCache.getByColumn({ + blockRootHex, + columnSidecar: dataColumnSidecar, + source: BlockInputSource.gossip, + seenTimestampSec, + peerIdStr, + }); const recvToValidation = Date.now() / 1000 - seenTimestampSec; const validationTime = recvToValidation - recvToValLatency; @@ -319,14 +337,12 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand } logger.debug("Received gossip dataColumn", { - slot: slot, - root: blockShortHex, + ...blockInput.getLogMeta(), currentSlot: chain.clock.currentSlot, peerId: peerIdStr, delaySec, gossipSubnet, columnIndex: dataColumnSidecar.index, - ...blockInputMeta, recvToValLatency, recvToValidation, validationTime, @@ -341,6 +357,11 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand `gossip_reject_slot_${slot}_index_${dataColumnSidecar.index}` ); // no need to trigger `unknownBlockParent` event here, as we already did it in `validateBeaconBlock()` + // + // TODO(fulu): is this note above correct? Could have random column that we see that could trigger + // unknownBlockSync. And duplicate addition of a block will be deduplicated by the + // BlockInputSync event handler. Check this!! + // events.emit(NetworkEvent.unknownBlockParent, {blockInput, peer: peerIdStr}); } throw e; @@ -349,21 +370,29 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand } } - function handleValidBeaconBlock(blockInput: BlockInput, peerIdStr: string, seenTimestampSec: number): void { - const signedBlock = blockInput.block; + function handleValidBeaconBlock(blockInput: IBlockInput, peerIdStr: string, seenTimestampSec: number): void { + const signedBlock = blockInput.getBlock(); + const slot = signedBlock.message.slot; // Handler - MUST NOT `await`, to allow validation result to be propagated - const delaySec = seenTimestampSec - (chain.genesisTime + signedBlock.message.slot * config.SECONDS_PER_SLOT); + const delaySec = seenTimestampSec - (chain.genesisTime + slot * config.SECONDS_PER_SLOT); metrics?.gossipBlock.elapsedTimeTillReceived.observe({source: OpSource.gossip}, delaySec); chain.validatorMonitor?.registerBeaconBlock(OpSource.gossip, delaySec, signedBlock.message); - // if blobs are not yet fully available start an aggressive blob pull - if (blockInput.type === BlockInputType.dataPromise) { - events.emit(NetworkEvent.unknownBlockInput, {blockInput, peer: peerIdStr}); - } else if (blockInput.type === BlockInputType.availableData) { + if (!blockInput.hasBlockAndAllData()) { + chain.logger.debug("Received gossip block, attempting fetch of unavailable data", blockInput.getLogMeta()); + // The data is not yet fully available, immediately trigger an aggressive pull via unknown block sync + chain.emitter.emit(ChainEvent.incompleteBlockInput, { + blockInput, + peer: peerIdStr, + source: BlockInputSource.gossip, + }); + // immediately attempt fetch of data columns from execution engine + chain.getBlobsTracker.triggerGetBlobs(blockInput); + } else { metrics?.blockInputFetchStats.totalDataAvailableBlockInputs.inc(); metrics?.blockInputFetchStats.totalDataAvailableBlockInputBlobs.inc( - (blockInput.block.message as deneb.BeaconBlock).body.blobKzgCommitments.length + (signedBlock.message as deneb.BeaconBlock).body.blobKzgCommitments.length ); } @@ -385,14 +414,15 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand // to track block process steps seenTimestampSec, // gossip block is validated, we want to process it asap - eagerPersistBlock: true, + // however, due to other optimizations, we don't eagerly persist the block + eagerPersistBlock: false, isGossipBlock: true, }) .then(() => { // Returns the delay between the start of `block.slot` and `current time` - const delaySec = chain.clock.secFromSlot(signedBlock.message.slot); + const delaySec = chain.clock.secFromSlot(slot); metrics?.gossipBlock.elapsedTimeTillProcessed.observe(delaySec); - chain.seenGossipBlockInput.prune(); + chain.seenBlockInputCache.prune(blockInput.blockRootHex); }) .catch((e) => { // Adjust verbosity based on error type @@ -401,12 +431,6 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand if (e instanceof BlockError) { switch (e.type.code) { case BlockErrorCode.DATA_UNAVAILABLE: { - const slot = signedBlock.message.slot; - const forkTypes = config.getForkTypes(slot); - const rootHex = toRootHex(forkTypes.BeaconBlock.hashTreeRoot(signedBlock.message)); - - events.emit(NetworkEvent.unknownBlock, {rootHex, peer: peerIdStr}); - // Error is quite frequent and not critical logLevel = LogLevel.debug; break; @@ -431,29 +455,10 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand logLevel = LogLevel.error; } metrics?.gossipBlock.processBlockErrors.inc({error: e instanceof BlockError ? e.type.code : "NOT_BLOCK_ERROR"}); - logger[logLevel]("Error receiving block", {slot: signedBlock.message.slot, peer: peerIdStr}, e as Error); - chain.seenGossipBlockInput.prune(); - }); - - if (blockInput.type === BlockInputType.dataPromise) { - const blockSlot = blockInput.block.message.slot; - // if blobs are not yet fully available start an aggressive blob pull - chain.logger.debug("Block under processing is not available, racing with cutoff to add to unknownBlockInput", { - blockSlot, + logger[logLevel]("Error processing block", {slot, peer: peerIdStr}, e as Error); + // TODO(fulu): Revisit when we prune block inputs + chain.seenBlockInputCache.prune(blockInput.blockRootHex); }); - raceWithCutoff( - chain, - blockSlot, - blockInput.cachedData.availabilityPromise as Promise, - BLOCK_AVAILABILITY_CUTOFF_MS - ).catch((_e) => { - chain.logger.debug("Block under processing not yet available, racing with cutoff to add to unknownBlockInput", { - blockSlot, - }); - events.emit(NetworkEvent.unknownBlockInput, {blockInput, peer: peerIdStr}); - return null; - }); - } } return { @@ -486,70 +491,27 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand throw new GossipActionError(GossipAction.REJECT, {code: "PRE_DENEB_BLOCK"}); } const blockInput = await validateBeaconBlob(blobSidecar, topic.subnet, peerIdStr, seenTimestampSec); - if (blockInput.block !== null) { - if (blockInput.type === BlockInputType.dataPromise) { - chain.logger.debug("Block corresponding to blob is available but waiting for data availability", { - blobSlot, - index, - }); - await raceWithCutoff( - chain, - blobSlot, - blockInput.cachedData.availabilityPromise as Promise, - BLOCK_AVAILABILITY_CUTOFF_MS - ).catch((_e) => { - chain.logger.debug("Block under processing not yet fully available adding to unknownBlockInput", { - blobSlot, - }); - events.emit(NetworkEvent.unknownBlockInput, {blockInput, peer: peerIdStr}); - }); - } - } else { - // wait for the block to arrive till some cutoff else emit unknownBlockInput event - chain.logger.debug("Block not yet available, racing with cutoff", {blobSlot, index}); - const normalBlockInput = await raceWithCutoff( - chain, - blobSlot, - blockInput.blockInputPromise, - BLOCK_AVAILABILITY_CUTOFF_MS - ).catch((_e) => { - return null; + if (!blockInput.hasBlockAndAllData()) { + const cutoffTimeMs = getCutoffTimeMs(chain, blobSlot, BLOCK_AVAILABILITY_CUTOFF_MS); + chain.logger.debug("Received gossip blob, waiting for full data availability", { + msToWait: cutoffTimeMs, + blobIndex: index, + ...blockInput.getLogMeta(), }); - - if (normalBlockInput !== null) { - // we can directly send it for processing but block gossip handler will queue it up anyway - // if we see any issues later, we can send it to handleValidBeaconBlock - // - // handleValidBeaconBlock(normalBlockInput, peerIdStr, seenTimestampSec); - // - // however we can emit the event which will atleast add the peer to the list of peers to pull - // data from - if (normalBlockInput.type === BlockInputType.dataPromise) { - chain.logger.debug("Block corresponding to blob is now available but waiting for data availability", { - blobSlot, - index, - }); - await raceWithCutoff( - chain, - blobSlot, - normalBlockInput.cachedData.availabilityPromise as Promise, - BLOCK_AVAILABILITY_CUTOFF_MS - ).catch((_e) => { - chain.logger.debug("Block under processing not yet fully available adding to unknownBlockInput", { - blobSlot, - }); - events.emit(NetworkEvent.unknownBlockInput, {blockInput: normalBlockInput, peer: peerIdStr}); - }); - } else { - chain.logger.debug("Block corresponding to blob is now available for processing", {blobSlot, index}); - } - } else { + blockInput.waitForAllData(cutoffTimeMs).catch((_e) => { chain.logger.debug( - "Block corresponding to blob not available till BLOCK_AVAILABILITY_CUTOFF_MS adding to unknownBlockInput", - {blobSlot, index} + "Waited for data after receiving gossip blob. Cut-off reached so attempting to fetch remainder of BlockInput", + { + blobIndex: index, + ...blockInput.getLogMeta(), + } ); - events.emit(NetworkEvent.unknownBlockInput, {blockInput, peer: peerIdStr}); - } + chain.emitter.emit(ChainEvent.incompleteBlockInput, { + blockInput, + peer: peerIdStr, + source: BlockInputSource.gossip, + }); + }); } }, @@ -568,7 +530,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand throw new GossipActionError(GossipAction.REJECT, {code: "PRE_FULU_BLOCK"}); } const delaySec = chain.clock.secFromSlot(dataColumnSlot, seenTimestampSec); - metrics?.dataColumns.elapsedTimeTillReceived.observe({source: DataColumnsSource.gossip}, delaySec); + metrics?.dataColumns.elapsedTimeTillReceived.observe({source: BlockInputSource.gossip}, delaySec); const blockInput = await validateBeaconDataColumn( dataColumnSidecar, serializedData, @@ -576,68 +538,37 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand peerIdStr, seenTimestampSec ); - if (blockInput.block !== null) { - if (blockInput.type === BlockInputType.dataPromise) { - chain.logger.debug("Block corresponding to data column is available but waiting for data availability", { - dataColumnSlot, - index, - }); - await raceWithCutoff( - chain, - dataColumnSlot, - blockInput.cachedData.availabilityPromise as Promise, - BLOCK_AVAILABILITY_CUTOFF_MS - ).catch((_e) => { - chain.logger.debug("Block under processing not yet fully available adding to unknownBlockInput", { - dataColumnSlot, - }); - events.emit(NetworkEvent.unknownBlockInput, {blockInput, peer: peerIdStr}); - }); - } - } else { - // wait for the block to arrive till some cutoff else emit unknownBlockInput event - chain.logger.debug("Block not yet available, racing with cutoff", {dataColumnSlot, index}); - const normalBlockInput = await raceWithCutoff( - chain, - dataColumnSlot, - blockInput.blockInputPromise, - BLOCK_AVAILABILITY_CUTOFF_MS - ).catch((_e) => { - return null; + if (!blockInput.hasBlockAndAllData()) { + const cutoffTimeMs = getCutoffTimeMs(chain, dataColumnSlot, BLOCK_AVAILABILITY_CUTOFF_MS); + chain.logger.debug("Received gossip data column, waiting for full data availability", { + msToWait: cutoffTimeMs, + dataColumnIndex: index, + ...blockInput.getLogMeta(), }); - - if (normalBlockInput !== null) { - if (normalBlockInput.type === BlockInputType.dataPromise) { - chain.logger.debug( - "Block corresponding to data column is now available but waiting for data availability", - { - dataColumnSlot, - index, - } - ); - await raceWithCutoff( - chain, - dataColumnSlot, - normalBlockInput.cachedData.availabilityPromise as Promise, - BLOCK_AVAILABILITY_CUTOFF_MS - ).catch((_e) => { - chain.logger.debug("Block under processing not yet fully available adding to unknownBlockInput", { - dataColumnSlot, - }); - events.emit(NetworkEvent.unknownBlockInput, {blockInput: normalBlockInput, peer: peerIdStr}); - }); - } else { - chain.logger.debug("Block corresponding to data column is now available for processing", { - dataColumnSlot, - index, - }); - } - } else { - chain.logger.debug("Block not available till BLOCK_AVAILABILITY_CUTOFF_MS", { - dataColumnSlot, - index, + // do not await here to not delay gossip validation + blockInput.waitForAllData(cutoffTimeMs).catch((_e) => { + chain.logger.debug( + "Waited for data after receiving gossip column. Cut-off reached so attempting to fetch remainder of BlockInput", + { + dataColumnIndex: index, + ...blockInput.getLogMeta(), + } + ); + chain.emitter.emit(ChainEvent.incompleteBlockInput, { + blockInput, + peer: peerIdStr, + source: BlockInputSource.gossip, }); - events.emit(NetworkEvent.unknownBlockInput, {blockInput, peer: peerIdStr}); + }); + // immediately attempt fetch of data columns from execution engine + chain.getBlobsTracker.triggerGetBlobs(blockInput); + // if we've received at least half of the columns, trigger reconstruction of the rest + if (blockInput.columnCount >= NUMBER_OF_COLUMNS / 2) { + chain.columnReconstructionTracker.triggerColumnReconstruction( + // wait to reconstruct until after head vote + getCutoffTimeMs(chain, dataColumnSlot, (config.SECONDS_PER_SLOT / INTERVALS_PER_SLOT) * 1000), + blockInput + ); } } }, @@ -976,7 +907,7 @@ export async function validateGossipFnRetryUnknownRoot( if (unknownBlockRootRetries === 0) { // Trigger unknown block root search here const rootHex = toRootHex(blockRoot); - network.searchUnknownSlotRoot({slot, root: rootHex}); + network.searchUnknownSlotRoot({slot, root: rootHex}, BlockInputSource.gossip); } if (unknownBlockRootRetries++ < MAX_UNKNOWN_BLOCK_ROOT_RETRIES) { @@ -993,19 +924,3 @@ export async function validateGossipFnRetryUnknownRoot( } } } - -async function raceWithCutoff( - chain: {config: ChainForkConfig; genesisTime: UintNum64; logger: Logger}, - blockSlot: Slot, - availabilityPromise: Promise, - cutoffMsFromSlotStart: number -): Promise { - const cutoffTimeMs = Math.max( - computeTimeAtSlot(chain.config, blockSlot, chain.genesisTime) * 1000 + cutoffMsFromSlotStart - Date.now(), - 0 - ); - const cutoffTimeout = new Promise((_resolve, reject) => setTimeout(reject, cutoffTimeMs)); - await Promise.race([availabilityPromise, cutoffTimeout]); - // we can only be here if availabilityPromise has resolved else an error will be thrown - return availabilityPromise; -} diff --git a/packages/beacon-node/src/network/processor/index.ts b/packages/beacon-node/src/network/processor/index.ts index db0596b3fe57..333e595ba9de 100644 --- a/packages/beacon-node/src/network/processor/index.ts +++ b/packages/beacon-node/src/network/processor/index.ts @@ -4,6 +4,8 @@ import {computeStartSlotAtEpoch} from "@lodestar/state-transition"; import {RootHex, Slot, SlotRootHex} from "@lodestar/types"; import {Logger, MapDef, mapValues, sleep} from "@lodestar/utils"; import {pruneSetToMax} from "@lodestar/utils"; +import {BlockInputSource} from "../../chain/blocks/blockInput/types.js"; +import {ChainEvent} from "../../chain/emitter.js"; import {GossipErrorCode} from "../../chain/errors/gossipValidation.js"; import {IBeaconChain} from "../../chain/interface.js"; import {IBeaconDb} from "../../db/interface.js"; @@ -229,13 +231,13 @@ export class NetworkProcessor { return queue.getAll(); } - searchUnknownSlotRoot({slot, root}: SlotRootHex, peer?: PeerIdStr): void { + searchUnknownSlotRoot({slot, root}: SlotRootHex, source: BlockInputSource, peer?: PeerIdStr): void { if (this.chain.seenBlock(root) || this.unknownRootsBySlot.getOrDefault(slot).has(root)) { return; } // Search for the unknown block this.unknownRootsBySlot.getOrDefault(slot).add(root); - this.events.emit(NetworkEvent.unknownBlock, {rootHex: root, peer}); + this.chain.emitter.emit(ChainEvent.unknownBlockRoot, {rootHex: root, peer, source}); } private onPendingGossipsubMessage(message: PendingGossipsubMessage): void { @@ -268,7 +270,7 @@ export class NetworkProcessor { // check if we processed a block with this root // no need to check if root is a descendant of the current finalized block, it will be checked once we validate the message if needed if (root && !this.chain.forkChoice.hasBlockHexUnsafe(root)) { - this.searchUnknownSlotRoot({slot, root}, message.propagationSource.toString()); + this.searchUnknownSlotRoot({slot, root}, BlockInputSource.gossip, message.propagationSource.toString()); if (this.unknownBlockGossipsubMessagesCount > MAX_QUEUED_UNKNOWN_BLOCK_GOSSIP_OBJECTS) { // TODO: Should report the dropped job to gossip? It will be eventually pruned from the mcache diff --git a/packages/beacon-node/src/network/reqresp/beaconBlocksMaybeBlobsByRange.ts b/packages/beacon-node/src/network/reqresp/beaconBlocksMaybeBlobsByRange.ts deleted file mode 100644 index 439be84b1d7d..000000000000 --- a/packages/beacon-node/src/network/reqresp/beaconBlocksMaybeBlobsByRange.ts +++ /dev/null @@ -1,508 +0,0 @@ -import {ChainForkConfig} from "@lodestar/config"; -import {ForkName, ForkSeq} from "@lodestar/params"; -import {computeEpochAtSlot} from "@lodestar/state-transition"; -import { - ColumnIndex, - Epoch, - SignedBeaconBlock, - Slot, - WithOptionalBytes, - deneb, - fulu, - phase0, - ssz, -} from "@lodestar/types"; -import {Logger, prettyPrintIndices} from "@lodestar/utils"; -import { - BlobsSource, - BlockInput, - BlockInputBlobs, - BlockInputDataColumns, - BlockInputType, - BlockSource, - CachedData, - CachedDataColumns, - DataColumnsSource, - getBlockInput, - getBlockInputDataColumns, -} from "../../chain/blocks/types.js"; -import {getEmptyBlockInputCacheEntry} from "../../chain/seenCache/seenGossipBlockInput.js"; -import {Metrics} from "../../metrics/index.js"; -import {RangeSyncType} from "../../sync/utils/remoteSyncType.js"; -import {PeerIdStr} from "../../util/peerId.js"; -import {INetwork} from "../interface.js"; -import {PeerSyncMeta} from "../peers/peersData.js"; -import {PeerAction} from "../peers/score/interface.js"; - -export type PartialDownload = null | {blocks: BlockInput[]; pendingDataColumns: number[]}; -export const SyncSourceByRoot = "ByRoot" as const; -export type SyncSource = RangeSyncType | typeof SyncSourceByRoot; - -/** - * Download blocks and blobs (prefulu) or data columns (fulu) by range. - * returns: - * - array of blocks with blobs or data columns - * - pendingDataColumns: null if all data columns are present, or array of column indexes that are missing. Also null for prefulu - */ -export async function beaconBlocksMaybeBlobsByRange( - config: ChainForkConfig, - network: INetwork, - peer: PeerSyncMeta, - request: phase0.BeaconBlocksByRangeRequest, - currentEpoch: Epoch, - partialDownload: PartialDownload, - syncSource: SyncSource, - metrics: Metrics | null, - logger?: Logger -): Promise<{blocks: BlockInput[]; pendingDataColumns: null | number[]}> { - const {peerId, client: peerClient, custodyGroups: peerColumns, earliestAvailableSlot} = peer; - // Code below assumes the request is in the same epoch - // Range sync satisfies this condition, but double check here for sanity - const {startSlot, count} = request; - if (count < 1) { - throw Error(`Invalid count=${count} in BeaconBlocksByRangeRequest`); - } - const endSlot = startSlot + count - 1; - - const startEpoch = computeEpochAtSlot(startSlot); - const endEpoch = computeEpochAtSlot(endSlot); - if (startEpoch !== endEpoch) { - throw Error( - `BeaconBlocksByRangeRequest must be in the same epoch startEpoch=${startEpoch} != endEpoch=${endEpoch}` - ); - } - - const forkSeq = config.getForkSeq(startSlot); - - // Note: Assumes all blocks in the same epoch - if (forkSeq < ForkSeq.deneb) { - const beaconBlocks = await network.sendBeaconBlocksByRange(peerId, request); - if (beaconBlocks.length === 0) { - throw Error( - `peerId=${peerId} peerClient=${peerClient} returned no blocks for BeaconBlocksByRangeRequest ${JSON.stringify(request)}` - ); - } - - const blocks = beaconBlocks.map((block) => getBlockInput.preData(config, block.data, BlockSource.byRange)); - return {blocks, pendingDataColumns: null}; - } - - // From Deneb - // Only request blobs if they are recent enough - if (startEpoch >= currentEpoch - config.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS) { - if (forkSeq < ForkSeq.fulu) { - const [allBlocks, allBlobSidecars] = await Promise.all([ - network.sendBeaconBlocksByRange(peerId, request), - network.sendBlobSidecarsByRange(peerId, request), - ]); - - if (allBlocks.length === 0) { - throw Error( - `peerId=${peerId} peerClient=${peerClient} returns no blocks allBlobSidecars=${allBlobSidecars.length} for BeaconBlocksByRangeRequest ${JSON.stringify(request)}` - ); - } - - const blocks = matchBlockWithBlobs( - config, - allBlocks, - allBlobSidecars, - endSlot, - BlockSource.byRange, - BlobsSource.byRange, - syncSource - ); - return {blocks, pendingDataColumns: null}; - } - - // From fulu, get columns - const sampledColumns = network.custodyConfig.sampledColumns; - const neededColumns = partialDownload ? partialDownload.pendingDataColumns : sampledColumns; - - // This should never throw. Already checking for this in ChainPeerBalancer when selecting the peer - if ((earliestAvailableSlot ?? 0) > startSlot) { - throw new Error( - `earliestAvailableSlot=${earliestAvailableSlot} not respected for ByRange startSlot=${startSlot}` - ); - } - - // get match - const columns = peerColumns.reduce((acc, elem) => { - if (neededColumns.includes(elem)) { - acc.push(elem); - } - return acc; - }, [] as number[]); - - if (columns.length === 0 && partialDownload !== null) { - // this peer has nothing to offer and should not have been selected for batch download - // throw error? - return partialDownload; - } - - const pendingDataColumns = neededColumns.reduce((acc, elem) => { - if (!columns.includes(elem)) { - acc.push(elem); - } - return acc; - }, [] as number[]); - - const dataColumnRequest = {...request, columns}; - const [allBlocks, allDataColumnSidecars] = await Promise.all([ - // TODO-das: investigate why partialDownload blocks is empty here - partialDownload && partialDownload.blocks.length > 0 - ? partialDownload.blocks.map((blockInput) => ({data: blockInput.block})) - : network.sendBeaconBlocksByRange(peerId, request), - columns.length === 0 ? [] : network.sendDataColumnSidecarsByRange(peerId, dataColumnRequest), - ]); - logger?.debug("ByRange requests", { - beaconBlocksRequest: JSON.stringify(ssz.phase0.BeaconBlocksByRangeRequest.toJson(request)), - dataColumnRequest: JSON.stringify(ssz.fulu.DataColumnSidecarsByRangeRequest.toJson(dataColumnRequest)), - [`allBlocks(${allBlocks.length})`]: allBlocks.map((blk) => blk.data.message.slot).join(" "), - [`allDataColumnSidecars(${allDataColumnSidecars.length})`]: allDataColumnSidecars - .map((dCol) => `${dCol.signedBlockHeader.message.slot}:${dCol.index}`) - .join(" "), - peerColumns: prettyPrintIndices(peerColumns), - peerId, - peerClient, - prevPartialDownload: !!partialDownload, - }); - - if (allBlocks.length === 0) { - throw Error( - `peerId=${peerId} peerClient=${peerClient} returns no blocks dataColumnSidecars=${allDataColumnSidecars.length} for BeaconBlocksByRangeRequest ${JSON.stringify(request)}` - ); - } - - const blocks = matchBlockWithDataColumns( - network, - peerId, - config, - sampledColumns, - columns, - allBlocks, - allDataColumnSidecars, - endSlot, - BlockSource.byRange, - DataColumnsSource.byRange, - partialDownload, - peerClient, - syncSource, - metrics, - logger - ); - - return {blocks, pendingDataColumns: pendingDataColumns.length > 0 ? pendingDataColumns : null}; - } - - logger?.verbose( - `Download range is out of ${config.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS} epochs, skip Blobs and DataColumnSidecars download`, - { - startEpoch, - startSlot, - endSlot, - currentEpoch, - } - ); - - // Data is out of range, only request blocks - const blocks = await network.sendBeaconBlocksByRange(peerId, request); - if (blocks.length === 0) { - throw Error( - `peerId=${peerId} peerClient=${peerClient} returned no blocks for BeaconBlocksByRangeRequest ${JSON.stringify(request)}` - ); - } - return { - blocks: blocks.map((block) => getBlockInput.outOfRangeData(config, block.data, BlockSource.byRange)), - // null means all data columns are present - pendingDataColumns: null, - }; -} - -// Assumes that the blobs are in the same sequence as blocks, doesn't require block to be sorted -export function matchBlockWithBlobs( - config: ChainForkConfig, - allBlocks: WithOptionalBytes[], - allBlobSidecars: deneb.BlobSidecar[], - endSlot: Slot, - blockSource: BlockSource, - blobsSource: BlobsSource, - syncSource: SyncSource -): BlockInput[] { - const blockInputs: BlockInput[] = []; - let blobSideCarIndex = 0; - let lastMatchedSlot = -1; - - // Match blobSideCar with the block as some blocks would have no blobs and hence - // would be omitted from the response. If there are any inconsitencies in the - // response, the validations during import will reject the block and hence this - // entire segment. - // - // Assuming that the blocks and blobs will come in same sorted order - for (let i = 0; i < allBlocks.length; i++) { - const block = allBlocks[i]; - if (config.getForkSeq(block.data.message.slot) < ForkSeq.deneb) { - blockInputs.push(getBlockInput.preData(config, block.data, blockSource)); - } else { - const blobSidecars: deneb.BlobSidecar[] = []; - - const blockRoot = config.getForkTypes(block.data.message.slot).BeaconBlock.hashTreeRoot(block.data.message); - const matchBlob = (blobSidecar?: deneb.BlobSidecar): boolean => { - if (blobSidecar === undefined) { - return false; - } - - if (syncSource === RangeSyncType.Head || syncSource === SyncSourceByRoot) { - return ( - Buffer.compare( - ssz.phase0.BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message), - blockRoot - ) === 0 - ); - } - - // For finalized range sync, we can just match by slot - return blobSidecar.signedBlockHeader.message.slot === block.data.message.slot; - }; - - while (matchBlob(allBlobSidecars[blobSideCarIndex])) { - blobSidecars.push(allBlobSidecars[blobSideCarIndex]); - lastMatchedSlot = block.data.message.slot; - blobSideCarIndex++; - } - - // Quick inspect how many blobSidecars was expected - const blobKzgCommitmentsLen = (block.data.message.body as deneb.BeaconBlockBody).blobKzgCommitments.length; - if (blobKzgCommitmentsLen !== blobSidecars.length) { - throw Error( - `Missing blobSidecars for blockSlot=${block.data.message.slot} with blobKzgCommitmentsLen=${blobKzgCommitmentsLen} blobSidecars=${blobSidecars.length}` - ); - } - - const blockData = { - fork: config.getForkName(block.data.message.slot), - blobs: blobSidecars, - blobsSource, - } as BlockInputBlobs; - - blockInputs.push(getBlockInput.availableData(config, block.data, blockSource, blockData)); - } - } - - // If there are still unconsumed blobs this means that the response was inconsistent - // and matching was wrong and hence we should throw error - if ( - allBlobSidecars[blobSideCarIndex] !== undefined && - // If there are no blobs, the blobs request can give 1 block outside the requested range - allBlobSidecars[blobSideCarIndex].signedBlockHeader.message.slot <= endSlot - ) { - throw Error( - `Unmatched blobSidecars, blocks=${allBlocks.length}, blobs=${ - allBlobSidecars.length - } lastMatchedSlot=${lastMatchedSlot}, pending blobSidecars slots=${allBlobSidecars - .slice(blobSideCarIndex) - .map((blb) => blb.signedBlockHeader.message.slot) - .join(" ")}` - ); - } - return blockInputs; -} - -export function matchBlockWithDataColumns( - network: INetwork, - peerId: PeerIdStr, - config: ChainForkConfig, - sampledColumns: ColumnIndex[], - requestedColumns: number[], - allBlocks: WithOptionalBytes[], - allDataColumnSidecars: fulu.DataColumnSidecar[], - endSlot: Slot, - blockSource: BlockSource, - dataColumnsSource: DataColumnsSource, - prevPartialDownload: null | PartialDownload, - peerClient: string, - syncSource: SyncSource, - metrics: Metrics | null, - logger?: Logger -): BlockInput[] { - const blockInputs: BlockInput[] = []; - let dataColumnSideCarIndex = 0; - let lastMatchedSlot = -1; - const neededColumns = prevPartialDownload?.pendingDataColumns ?? sampledColumns; - const shouldHaveAllData = neededColumns.reduce((acc, elem) => acc && requestedColumns.includes(elem), true); - - // Match dataColumnSideCar with the block as some blocks would have no dataColumns and hence - // would be omitted from the response. If there are any inconsitencies in the - // response, the validations during import will reject the block and hence this - // entire segment. - // - // Assuming that the blocks and blobs will come in same sorted order - for (let i = 0; i < allBlocks.length; i++) { - const block = allBlocks[i]; - - const forkSeq = config.getForkSeq(block.data.message.slot); - if (forkSeq < ForkSeq.fulu) { - throw Error(`Invalid block forkSeq=${forkSeq} < ForSeq.fulu for matchBlockWithDataColumns`); - } - const dataColumnSidecars: fulu.DataColumnSidecar[] = []; - const blockRoot = config.getForkTypes(block.data.message.slot).BeaconBlock.hashTreeRoot(block.data.message); - const matchDataColumnSidecar = (dataColumnSidecar?: fulu.DataColumnSidecar): boolean => { - if (dataColumnSidecar === undefined) { - return false; - } - - if (syncSource === RangeSyncType.Head || syncSource === SyncSourceByRoot) { - return ( - Buffer.compare( - ssz.phase0.BeaconBlockHeader.hashTreeRoot(dataColumnSidecar.signedBlockHeader.message), - blockRoot - ) === 0 - ); - } - - // For finalized range sync, we can just match by slot - return dataColumnSidecar.signedBlockHeader.message.slot === block.data.message.slot; - }; - while (matchDataColumnSidecar(allDataColumnSidecars[dataColumnSideCarIndex])) { - dataColumnSidecars.push(allDataColumnSidecars[dataColumnSideCarIndex]); - lastMatchedSlot = block.data.message.slot; - dataColumnSideCarIndex++; - } - metrics?.dataColumns.bySource.inc({source: DataColumnsSource.byRange}, dataColumnSidecars.length); - - const blobKzgCommitmentsLen = (block.data.message.body as deneb.BeaconBlockBody).blobKzgCommitments.length; - logger?.debug("processing matchBlockWithDataColumns", { - blobKzgCommitmentsLen, - dataColumnSidecars: dataColumnSidecars.length, - shouldHaveAllData, - neededColumns: prettyPrintIndices(neededColumns), - requestedColumns: prettyPrintIndices(requestedColumns), - slot: block.data.message.slot, - dataColumnsSlots: prettyPrintIndices(dataColumnSidecars.map((dcm) => dcm.signedBlockHeader.message.slot)), - peerClient, - }); - if (blobKzgCommitmentsLen === 0) { - if (dataColumnSidecars.length > 0) { - // only penalize peer with Finalized range sync or "ByRoot" sync source - if (syncSource !== RangeSyncType.Head) { - network.reportPeer(peerId, PeerAction.LowToleranceError, "Missing or mismatching dataColumnSidecars"); - } - throw Error( - `Missing or mismatching dataColumnSidecars from peerId=${peerId} for blockSlot=${block.data.message.slot} with blobKzgCommitmentsLen=0 dataColumnSidecars=${dataColumnSidecars.length}>0` - ); - } - - const blockData = { - fork: config.getForkName(block.data.message.slot), - dataColumns: [], - dataColumnsBytes: [], - dataColumnsSource, - } as BlockInputDataColumns; - blockInputs.push(getBlockInput.availableData(config, block.data, blockSource, blockData)); - } else { - // Quick inspect how many blobSidecars was expected - const dataColumnIndexes = dataColumnSidecars.map((dataColumnSidecar) => dataColumnSidecar.index); - const requestedColumnsPresent = requestedColumns.reduce( - (acc, columnIndex) => acc && dataColumnIndexes.includes(columnIndex), - true - ); - - logger?.debug("matchBlockWithDataColumns2", { - dataColumnIndexes: prettyPrintIndices(dataColumnIndexes), - requestedColumnsPresent, - slot: block.data.message.slot, - peerClient, - }); - - if (dataColumnSidecars.length !== requestedColumns.length || !requestedColumnsPresent) { - logger?.debug( - `Missing or mismatching dataColumnSidecars from peerId=${peerId} for blockSlot=${block.data.message.slot} with numColumns=${sampledColumns.length} dataColumnSidecars=${dataColumnSidecars.length} requestedColumnsPresent=${requestedColumnsPresent} received dataColumnIndexes=${dataColumnIndexes.join(" ")} requested=${requestedColumns.join(" ")}`, - { - allBlocks: allBlocks.length, - allDataColumnSidecars: allDataColumnSidecars.length, - peerId, - blobKzgCommitmentsLen, - peerClient, - } - ); - // only penalize peer with Finalized range sync or "ByRoot" sync source - if (syncSource !== RangeSyncType.Head) { - network.reportPeer(peerId, PeerAction.LowToleranceError, "Missing or mismatching dataColumnSidecars"); - } - throw Error( - `Missing or mismatching dataColumnSidecars from peerId=${peerId} for blockSlot=${block.data.message.slot} blobKzgCommitmentsLen=${blobKzgCommitmentsLen} with numColumns=${sampledColumns.length} dataColumnSidecars=${dataColumnSidecars.length} requestedColumnsPresent=${requestedColumnsPresent} received dataColumnIndexes=${dataColumnIndexes.join(" ")} requested=${requestedColumns.join(" ")}` - ); - } - - let cachedData: CachedData; - // TODO-das: investigate why partialDownload blocks is empty here - if (prevPartialDownload !== null && prevPartialDownload.blocks.length > 0) { - const prevBlockInput = prevPartialDownload.blocks[i]; - if (prevBlockInput.type !== BlockInputType.dataPromise) { - throw Error(`prevBlockInput.type=${prevBlockInput.type} in prevPartialDownload`); - } - cachedData = prevBlockInput.cachedData; - } else { - // biome-ignore lint/style/noNonNullAssertion: checked below for validity - cachedData = getEmptyBlockInputCacheEntry(config.getForkName(block.data.message.slot), -1).cachedData!; - if (cachedData === undefined) { - throw Error("Invalid cachedData=undefined from getEmptyBlockInputCacheEntry"); - } - } - - if (cachedData.fork !== ForkName.fulu) { - throw Error("Invalid fork for cachedData on dataColumns"); - } - - for (const dataColumnSidecar of dataColumnSidecars) { - (cachedData as CachedDataColumns).dataColumnsCache.set(dataColumnSidecar.index, { - dataColumn: dataColumnSidecar, - dataColumnBytes: null, - }); - } - - if (shouldHaveAllData) { - const {dataColumns, dataColumnsBytes} = getBlockInputDataColumns( - (cachedData as CachedDataColumns).dataColumnsCache, - sampledColumns - ); - - const blockData = { - fork: config.getForkName(block.data.message.slot), - dataColumns, - dataColumnsBytes, - dataColumnsSource, - } as BlockInputDataColumns; - - // TODO DENEB: instead of null, pass payload in bytes - blockInputs.push(getBlockInput.availableData(config, block.data, blockSource, blockData)); - } else { - blockInputs.push(getBlockInput.dataPromise(config, block.data, blockSource, cachedData)); - } - } - } - - // for head sync, there could be unconsumed data column sidecars because the retried peers may have higher head - if ( - allDataColumnSidecars[dataColumnSideCarIndex] !== undefined && - // If there are no data columns, the data columns request can give 1 block outside the requested range - allDataColumnSidecars[dataColumnSideCarIndex].signedBlockHeader.message.slot <= endSlot && - // only penalize peer with Finalized range sync or "ByRoot" sync source - syncSource !== RangeSyncType.Head - ) { - network.reportPeer(peerId, PeerAction.LowToleranceError, "Unmatched dataColumnSidecars"); - throw Error( - `Unmatched dataColumnSidecars, blocks=${allBlocks.length}, blobs=${ - allDataColumnSidecars.length - } lastMatchedSlot=${lastMatchedSlot}, pending dataColumnSidecars slots=${allDataColumnSidecars - .slice(dataColumnSideCarIndex) - .map((blb) => blb.signedBlockHeader.message.slot) - .join(" ")} endSlot=${endSlot}, peerId=${peerId}, peerClient=${peerClient}` - ); - } - logger?.debug("matched BlockWithDataColumns", { - peerClient, - slots: prettyPrintIndices(blockInputs.map((b) => Number(b.block.message.slot))), - types: blockInputs.map((b) => b.type).join(" "), - }); - return blockInputs; -} diff --git a/packages/beacon-node/src/network/reqresp/beaconBlocksMaybeBlobsByRoot.ts b/packages/beacon-node/src/network/reqresp/beaconBlocksMaybeBlobsByRoot.ts deleted file mode 100644 index 62ca819e73f9..000000000000 --- a/packages/beacon-node/src/network/reqresp/beaconBlocksMaybeBlobsByRoot.ts +++ /dev/null @@ -1,681 +0,0 @@ -import {toHexString} from "@chainsafe/ssz"; -import {routes} from "@lodestar/api"; -import {ChainForkConfig} from "@lodestar/config"; -import {ForkName, ForkSeq} from "@lodestar/params"; -import {signedBlockToSignedHeader} from "@lodestar/state-transition"; -import {RootHex, SignedBeaconBlock, deneb, fulu} from "@lodestar/types"; -import {BlobAndProof} from "@lodestar/types/deneb"; -import {Logger, fromHex, toHex} from "@lodestar/utils"; -import { - BlobsSource, - BlockInput, - BlockInputBlobs, - BlockInputDataColumns, - BlockInputType, - BlockSource, - CachedBlobs, - CachedDataColumns, - DataColumnsSource, - NullBlockInput, - getBlockInput, - getBlockInputBlobs, - getBlockInputDataColumns, -} from "../../chain/blocks/types.js"; -import {ChainEventEmitter} from "../../chain/emitter.js"; -import {BlockInputAvailabilitySource} from "../../chain/seenCache/seenGossipBlockInput.js"; -import {IExecutionEngine} from "../../execution/index.js"; -import {Metrics} from "../../metrics/index.js"; -import {computeInclusionProof, kzgCommitmentToVersionedHash} from "../../util/blobs.js"; -import {getDataColumnsFromExecution} from "../../util/dataColumns.js"; -import {PeerIdStr} from "../../util/peerId.js"; -import {BeaconBlocksByRootRequest} from "../../util/types.js"; -import {INetwork} from "../interface.js"; -import { - PartialDownload, - SyncSourceByRoot, - matchBlockWithBlobs, - matchBlockWithDataColumns, -} from "./beaconBlocksMaybeBlobsByRange.js"; - -// keep 1 epoch of stuff, assmume 16 blobs -const MAX_ENGINE_GETBLOBS_CACHE = 32 * 16; -const MAX_UNAVAILABLE_RETRY_CACHE = 32; - -/** - * Request beacon blocks by root, and blobs or data columns if available. - * return BlockInput[] along with pendingDataColumns (null for prefulu forks for postfulu where data is available) - */ -export async function beaconBlocksMaybeBlobsByRoot( - config: ChainForkConfig, - network: INetwork, - peerId: PeerIdStr, - request: BeaconBlocksByRootRequest, - partialDownload: null | PartialDownload, - peerClient: string, - metrics: Metrics | null, - logger?: Logger -): Promise<{blocks: BlockInput[]; pendingDataColumns: null | number[]}> { - // console.log("beaconBlocksMaybeBlobsByRoot", request); - const allBlocks = partialDownload - ? partialDownload.blocks.map((blockInput) => ({data: blockInput.block})) - : await network.sendBeaconBlocksByRoot(peerId, request); - - logger?.debug("beaconBlocksMaybeBlobsByRoot response", {allBlocks: allBlocks.length, peerClient}); - - const preDataBlocks = []; - const blobsDataBlocks = []; - const dataColumnsDataBlocks = []; - - const sampledColumns = network.custodyConfig.sampledColumns; - const neededColumns = partialDownload ? partialDownload.pendingDataColumns : sampledColumns; - const {custodyGroups: peerColumns} = network.getConnectedPeerSyncMeta(peerId); - - // get match - const columns = peerColumns.reduce((acc, elem) => { - if (neededColumns.includes(elem)) { - acc.push(elem); - } - return acc; - }, [] as number[]); - let pendingDataColumns = null; - - const blobIdentifiers: deneb.BlobIdentifier[] = []; - const dataColumnsByRootIdentifiers: fulu.DataColumnsByRootIdentifier[] = []; - - let prevFork = null; - for (const block of allBlocks) { - const slot = block.data.message.slot; - const blockRoot = config.getForkTypes(slot).BeaconBlock.hashTreeRoot(block.data.message); - const fork = config.getForkName(slot); - if (fork !== (prevFork ?? fork)) { - throw Error("beaconBlocksMaybeBlobsByRoot only accepts requests of same fork"); - } - prevFork = fork; - - if (ForkSeq[fork] < ForkSeq.deneb) { - preDataBlocks.push(block); - } else if (fork === ForkName.deneb || fork === ForkName.electra) { - blobsDataBlocks.push(block); - const blobKzgCommitmentsLen = (block.data.message.body as deneb.BeaconBlockBody).blobKzgCommitments.length; - logger?.debug("beaconBlocksMaybeBlobsByRoot", {blobKzgCommitmentsLen, peerClient}); - for (let index = 0; index < blobKzgCommitmentsLen; index++) { - // try see if the blob is available locally - blobIdentifiers.push({blockRoot, index}); - } - } else if (fork === ForkName.fulu) { - dataColumnsDataBlocks.push(block); - const blobKzgCommitmentsLen = (block.data.message.body as deneb.BeaconBlockBody).blobKzgCommitments.length; - const custodyColumnIndexes = blobKzgCommitmentsLen > 0 ? columns : []; - if (custodyColumnIndexes.length > 0) { - dataColumnsByRootIdentifiers.push({ - blockRoot, - columns: custodyColumnIndexes, - }); - } - } else { - throw Error(`Invalid fork=${fork} in beaconBlocksMaybeBlobsByRoot`); - } - } - - let blockInputs = preDataBlocks.map((block) => getBlockInput.preData(config, block.data, BlockSource.byRoot)); - - if (blobsDataBlocks.length > 0) { - let allBlobSidecars: deneb.BlobSidecar[]; - if (blobIdentifiers.length > 0) { - allBlobSidecars = await network.sendBlobSidecarsByRoot(peerId, blobIdentifiers); - } else { - allBlobSidecars = []; - } - - // The last arg is to provide slot to which all blobs should be exausted in matching - // and here it should be infinity since all bobs should match - const blockInputWithBlobs = matchBlockWithBlobs( - config, - allBlocks, - allBlobSidecars, - Infinity, - BlockSource.byRoot, - BlobsSource.byRoot, - SyncSourceByRoot - ); - blockInputs = [...blockInputs, ...blockInputWithBlobs]; - } - - if (dataColumnsDataBlocks.length > 0) { - pendingDataColumns = neededColumns.reduce((acc, elem) => { - if (!columns.includes(elem)) { - acc.push(elem); - } - return acc; - }, [] as number[]); - - let allDataColumnsSidecars: fulu.DataColumnSidecar[]; - logger?.debug("allDataColumnsSidecars partialDownload", { - ...(partialDownload - ? {blocks: partialDownload.blocks.length, pendingDataColumns: partialDownload.pendingDataColumns.join(" ")} - : {blocks: null, pendingDataColumns: null}), - dataColumnIdentifiers: dataColumnsByRootIdentifiers - .map((id) => `${id.blockRoot}: ${id.columns.join(" ")}`) - .join(" "), - peerClient, - }); - if (dataColumnsByRootIdentifiers.length > 0) { - allDataColumnsSidecars = await network.sendDataColumnSidecarsByRoot(peerId, dataColumnsByRootIdentifiers); - } else { - if (partialDownload !== null) { - return partialDownload; - } - allDataColumnsSidecars = []; - } - - // The last arg is to provide slot to which all blobs should be exausted in matching - // and here it should be infinity since all bobs should match - // TODO: should not call matchBlockWithDataColumns() because it's supposed for range sync - // in that function, peers should return all requested data columns, this function runs at gossip time - // and it should not expect that - const blockInputWithBlobs = matchBlockWithDataColumns( - network, - peerId, - config, - sampledColumns, - columns, - allBlocks, - allDataColumnsSidecars, - Infinity, - BlockSource.byRoot, - DataColumnsSource.byRoot, - partialDownload, - peerClient, - SyncSourceByRoot, - metrics, - logger - ); - blockInputs = [...blockInputs, ...blockInputWithBlobs]; - } - - return { - blocks: blockInputs, - pendingDataColumns: pendingDataColumns && pendingDataColumns.length > 0 ? pendingDataColumns : null, - }; -} - -export async function unavailableBeaconBlobsByRoot( - config: ChainForkConfig, - network: INetwork, - peerId: PeerIdStr, - peerClient: string, - unavailableBlockInput: BlockInput | NullBlockInput, - opts: { - logger?: Logger; - metrics?: Metrics | null; - executionEngine: IExecutionEngine; - emitter: ChainEventEmitter; - engineGetBlobsCache?: Map; - blockInputsRetryTrackerCache?: Set; - } -): Promise { - if (unavailableBlockInput.block !== null && unavailableBlockInput.type !== BlockInputType.dataPromise) { - return unavailableBlockInput; - } - - // resolve the block if thats unavailable - let block: SignedBeaconBlock, cachedData: NullBlockInput["cachedData"]; - if (unavailableBlockInput.block === null) { - const allBlocks = await network.sendBeaconBlocksByRoot(peerId, [fromHex(unavailableBlockInput.blockRootHex)]); - block = allBlocks[0].data; - cachedData = unavailableBlockInput.cachedData; - unavailableBlockInput = getBlockInput.dataPromise(config, block, BlockSource.byRoot, cachedData); - } else { - ({block, cachedData} = unavailableBlockInput); - } - - const forkSeq = config.getForkSeq(block.message.slot); - - if (forkSeq < ForkSeq.fulu) { - return unavailableBeaconBlobsByRootPreFulu( - config, - network, - peerId, - unavailableBlockInput, - block, - cachedData as CachedBlobs, - opts - ); - } - - return unavailableBeaconBlobsByRootPostFulu( - config, - network, - peerId, - peerClient, - unavailableBlockInput, - block, - cachedData, - { - metrics: opts.metrics, - executionEngine: opts.executionEngine, - emitter: opts.emitter, - logger: opts.logger, - } - ); -} - -export async function unavailableBeaconBlobsByRootPreFulu( - config: ChainForkConfig, - network: INetwork, - peerId: PeerIdStr, - unavailableBlockInput: BlockInput | NullBlockInput, - block: SignedBeaconBlock, - cachedData: CachedBlobs, - opts: { - metrics?: Metrics | null; - emitter: ChainEventEmitter; - executionEngine: IExecutionEngine; - engineGetBlobsCache?: Map; - blockInputsRetryTrackerCache?: Set; - } -): Promise { - const {executionEngine, metrics, emitter, engineGetBlobsCache, blockInputsRetryTrackerCache} = opts; - if (unavailableBlockInput.block !== null && unavailableBlockInput.type !== BlockInputType.dataPromise) { - return unavailableBlockInput; - } - - // resolve missing blobs - const slot = block.message.slot; - const fork = config.getForkName(slot); - const blockRoot = config.getForkTypes(slot).BeaconBlock.hashTreeRoot(block.message); - const blockRootHex = toHexString(blockRoot); - - const blockTriedBefore = blockInputsRetryTrackerCache?.has(blockRootHex) === true; - if (blockTriedBefore) { - metrics?.blockInputFetchStats.totalDataPromiseBlockInputsReTriedBlobsPull.inc(); - } else { - metrics?.blockInputFetchStats.totalDataPromiseBlockInputsTriedBlobsPull.inc(); - blockInputsRetryTrackerCache?.add(blockRootHex); - } - - const blobKzgCommitmentsLen = (block.message.body as deneb.BeaconBlockBody).blobKzgCommitments.length; - const signedBlockHeader = signedBlockToSignedHeader(config, block); - - const engineReqIdentifiers: (deneb.BlobIdentifier & { - kzgCommitment: deneb.KZGCommitment; - versionedHash: Uint8Array; - })[] = []; - const networkReqIdentifiers: deneb.BlobIdentifier[] = []; - - let getBlobsUseful = false; - for (let index = 0; index < blobKzgCommitmentsLen; index++) { - if (cachedData.blobsCache.has(index) === false) { - const kzgCommitment = (block.message.body as deneb.BeaconBlockBody).blobKzgCommitments[index]; - const versionedHash = kzgCommitmentToVersionedHash(kzgCommitment); - - // check if the getblobs cache has the data if block not been queried before - if (engineGetBlobsCache?.has(toHexString(versionedHash)) === true && !blockTriedBefore) { - const catchedBlobAndProof = engineGetBlobsCache.get(toHexString(versionedHash)) ?? null; - if (catchedBlobAndProof === null) { - metrics?.blockInputFetchStats.dataPromiseBlobsFoundInGetBlobsCacheNull.inc(); - networkReqIdentifiers.push({blockRoot, index}); - } else { - metrics?.blockInputFetchStats.dataPromiseBlobsFoundInGetBlobsCacheNotNull.inc(); - // compute TODO: also add inclusion proof cache - const {blob, proof: kzgProof} = catchedBlobAndProof; - const kzgCommitmentInclusionProof = computeInclusionProof(fork, block.message.body, index); - const blobSidecar = {index, blob, kzgCommitment, kzgProof, signedBlockHeader, kzgCommitmentInclusionProof}; - cachedData.blobsCache.set(blobSidecar.index, blobSidecar); - } - } else if (blockTriedBefore) { - // only retry it from network - networkReqIdentifiers.push({blockRoot, index}); - } else { - // see if we can pull from EL - metrics?.blockInputFetchStats.dataPromiseBlobsNotAvailableInGetBlobsCache.inc(); - engineReqIdentifiers.push({blockRoot, index, versionedHash, kzgCommitment}); - } - } else { - metrics?.blockInputFetchStats.dataPromiseBlobsAlreadyAvailable.inc(); - } - } - - if (engineReqIdentifiers.length > 0) { - metrics?.blockInputFetchStats.totalDataPromiseBlockInputsTriedGetBlobs.inc(); - } - const versionedHashes = engineReqIdentifiers.map((bi) => bi.versionedHash); - metrics?.blockInputFetchStats.dataPromiseBlobsEngineGetBlobsApiRequests.inc(versionedHashes.length); - - const blobAndProofs = await executionEngine.getBlobs(ForkName.deneb, versionedHashes).catch((_e) => { - metrics?.blockInputFetchStats.dataPromiseBlobsEngineApiGetBlobsErroredNull.inc(versionedHashes.length); - return versionedHashes.map((_vh) => null); - }); - - for (let j = 0; j < versionedHashes.length; j++) { - const blobAndProof = blobAndProofs[j] ?? null; - const versionedHash = versionedHashes[j]; - // save to cache for future reference - engineGetBlobsCache?.set(toHexString(versionedHash), blobAndProof); - if (blobAndProof !== null) { - metrics?.blockInputFetchStats.dataPromiseBlobsEngineGetBlobsApiNotNull.inc(); - - // if we already got it by now, save the compute - if (cachedData.blobsCache.has(engineReqIdentifiers[j].index) === false) { - metrics?.blockInputFetchStats.dataPromiseBlobsEngineApiGetBlobsUseful.inc(); - getBlobsUseful = true; - const {blob, proof: kzgProof} = blobAndProof; - const {kzgCommitment, index} = engineReqIdentifiers[j]; - const kzgCommitmentInclusionProof = computeInclusionProof(fork, block.message.body, index); - const blobSidecar = {index, blob, kzgCommitment, kzgProof, signedBlockHeader, kzgCommitmentInclusionProof}; - // add them in cache so that its reflected in all the blockInputs that carry this - // for e.g. a blockInput that might be awaiting blobs promise fullfillment in - // verifyBlocksDataAvailability - cachedData.blobsCache.set(blobSidecar.index, blobSidecar); - - if (emitter.listenerCount(routes.events.EventType.blobSidecar)) { - emitter.emit(routes.events.EventType.blobSidecar, { - blockRoot: blockRootHex, - slot, - index, - kzgCommitment: toHex(kzgCommitment), - versionedHash: toHex(versionedHash), - }); - } - } else { - metrics?.blockInputFetchStats.dataPromiseBlobsDelayedGossipAvailable.inc(); - metrics?.blockInputFetchStats.dataPromiseBlobsDelayedGossipAvailableSavedGetBlobsCompute.inc(); - } - } - // may be blobsidecar arrived in the timespan of making the request - else { - metrics?.blockInputFetchStats.dataPromiseBlobsEngineGetBlobsApiNull.inc(); - if (cachedData.blobsCache.has(engineReqIdentifiers[j].index) === false) { - const {blockRoot, index} = engineReqIdentifiers[j]; - networkReqIdentifiers.push({blockRoot, index}); - } else { - metrics?.blockInputFetchStats.dataPromiseBlobsDelayedGossipAvailable.inc(); - } - } - } - - if (engineGetBlobsCache !== undefined) { - // prune out engineGetBlobsCache - let pruneLength = Math.max(0, engineGetBlobsCache?.size - MAX_ENGINE_GETBLOBS_CACHE); - for (const key of engineGetBlobsCache.keys()) { - if (pruneLength <= 0) break; - engineGetBlobsCache.delete(key); - pruneLength--; - metrics?.blockInputFetchStats.getBlobsCachePruned.inc(); - } - metrics?.blockInputFetchStats.getBlobsCacheSize.set(engineGetBlobsCache.size); - } - if (blockInputsRetryTrackerCache !== undefined) { - // prune out engineGetBlobsCache - let pruneLength = Math.max(0, blockInputsRetryTrackerCache?.size - MAX_UNAVAILABLE_RETRY_CACHE); - for (const key of blockInputsRetryTrackerCache.keys()) { - if (pruneLength <= 0) break; - blockInputsRetryTrackerCache.delete(key); - pruneLength--; - metrics?.blockInputFetchStats.dataPromiseBlockInputRetryTrackerCachePruned.inc(); - } - metrics?.blockInputFetchStats.dataPromiseBlockInputRetryTrackerCacheSize.set(blockInputsRetryTrackerCache.size); - } - - // if clients expect sorted identifiers - networkReqIdentifiers.sort((a, b) => a.index - b.index); - let networkResBlobSidecars: deneb.BlobSidecar[]; - metrics?.blockInputFetchStats.dataPromiseBlobsFinallyQueriedFromNetwork.inc(networkReqIdentifiers.length); - if (blockTriedBefore) { - metrics?.blockInputFetchStats.dataPromiseBlobsRetriedFromNetwork.inc(networkReqIdentifiers.length); - } - - if (networkReqIdentifiers.length > 0) { - networkResBlobSidecars = await network.sendBlobSidecarsByRoot(peerId, networkReqIdentifiers); - metrics?.blockInputFetchStats.dataPromiseBlobsFinallyAvailableFromNetwork.inc(networkResBlobSidecars.length); - if (blockTriedBefore) { - metrics?.blockInputFetchStats.dataPromiseBlobsRetriedAvailableFromNetwork.inc(networkResBlobSidecars.length); - } - } else { - networkResBlobSidecars = []; - } - - // add them in cache so that its reflected in all the blockInputs that carry this - // for e.g. a blockInput that might be awaiting blobs promise fullfillment in - // verifyBlocksDataAvailability - for (const blobSidecar of networkResBlobSidecars) { - cachedData.blobsCache.set(blobSidecar.index, blobSidecar); - - if (emitter.listenerCount(routes.events.EventType.blobSidecar)) { - emitter.emit(routes.events.EventType.blobSidecar, { - blockRoot: blockRootHex, - slot, - index: blobSidecar.index, - kzgCommitment: toHex(blobSidecar.kzgCommitment), - versionedHash: toHex(kzgCommitmentToVersionedHash(blobSidecar.kzgCommitment)), - }); - } - } - - // check and see if all blobs are now available and in that case resolve availability - // if not this will error and the leftover blobs will be tried from another peer - const allBlobs = getBlockInputBlobs(cachedData.blobsCache); - const {blobs} = allBlobs; - if (blobs.length !== blobKzgCommitmentsLen) { - throw Error(`Not all blobs fetched missingBlobs=${blobKzgCommitmentsLen - blobs.length}`); - } - const blockData = {fork: cachedData.fork, ...allBlobs, blobsSource: BlobsSource.byRoot} as BlockInputBlobs; - cachedData.resolveAvailability(blockData); - metrics?.syncUnknownBlock.resolveAvailabilitySource.inc({source: BlockInputAvailabilitySource.UNKNOWN_SYNC}); - - metrics?.blockInputFetchStats.totalDataPromiseBlockInputsResolvedAvailable.inc(); - if (getBlobsUseful) { - metrics?.blockInputFetchStats.totalDataPromiseBlockInputsAvailableUsingGetBlobs.inc(); - if (networkReqIdentifiers.length === 0) { - metrics?.blockInputFetchStats.totalDataPromiseBlockInputsAvailableFromGetBlobs.inc(); - } - } - if (networkResBlobSidecars.length > 0) { - metrics?.blockInputFetchStats.totalDataPromiseBlockInputsFinallyAvailableFromNetworkReqResp.inc(); - } - if (blockTriedBefore) { - metrics?.blockInputFetchStats.totalDataPromiseBlockInputsRetriedAvailableFromNetwork.inc(); - } - - return getBlockInput.availableData(config, block, BlockSource.byRoot, blockData); -} - -/** - * Download more columns for a BlockInput - * - unavailableBlockInput should have block, but not enough blobs (deneb) or data columns (fulu) - * - * This function may return data promise, and consumer should continue with fetching more blobs or columns from other peers - * see UnknownBlockSync.fetchUnavailableBlockInput() - */ -export async function unavailableBeaconBlobsByRootPostFulu( - config: ChainForkConfig, - network: INetwork, - peerId: PeerIdStr, - peerClient: string, - unavailableBlockInput: BlockInput, - block: SignedBeaconBlock, - cachedData: NullBlockInput["cachedData"], - opts: { - metrics?: Metrics | null; - executionEngine: IExecutionEngine; - emitter: ChainEventEmitter; - logger?: Logger; - } -): Promise { - if (unavailableBlockInput.block !== null && unavailableBlockInput.type !== BlockInputType.dataPromise) { - return unavailableBlockInput; - } - - if (cachedData.fork === ForkName.deneb || cachedData.fork === ForkName.electra) { - const {blobsCache, resolveAvailability} = cachedData; - - // resolve missing blobs - const blobIdentifiers: deneb.BlobIdentifier[] = []; - const slot = block.message.slot; - const blockRoot = config.getForkTypes(slot).BeaconBlock.hashTreeRoot(block.message); - - const blobKzgCommitmentsLen = (block.message.body as deneb.BeaconBlockBody).blobKzgCommitments.length; - for (let index = 0; index < blobKzgCommitmentsLen; index++) { - if (blobsCache.has(index) === false) blobIdentifiers.push({blockRoot, index}); - } - - let allBlobSidecars: deneb.BlobSidecar[]; - if (blobIdentifiers.length > 0) { - allBlobSidecars = await network.sendBlobSidecarsByRoot(peerId, blobIdentifiers); - } else { - allBlobSidecars = []; - } - - // add them in cache so that its reflected in all the blockInputs that carry this - // for e.g. a blockInput that might be awaiting blobs promise fullfillment in - // verifyBlocksDataAvailability - for (const blobSidecar of allBlobSidecars) { - blobsCache.set(blobSidecar.index, blobSidecar); - } - - // check and see if all blobs are now available and in that case resolve availability - // if not this will error and the leftover blobs will be tried from another peer - const allBlobs = getBlockInputBlobs(blobsCache); - const {blobs} = allBlobs; - if (blobs.length !== blobKzgCommitmentsLen) { - throw Error(`Not all blobs fetched missingBlobs=${blobKzgCommitmentsLen - blobs.length}`); - } - const blockData = {fork: cachedData.fork, ...allBlobs, blobsSource: BlobsSource.byRoot} as BlockInputBlobs; - resolveAvailability(blockData); - opts.metrics?.syncUnknownBlock.resolveAvailabilitySource.inc({source: BlockInputAvailabilitySource.UNKNOWN_SYNC}); - return getBlockInput.availableData(config, block, BlockSource.byRoot, blockData); - } - - // fulu fork - const {dataColumnsCache, resolveAvailability} = cachedData as CachedDataColumns; - - // resolve missing blobs - const slot = block.message.slot; - const blockRoot = config.getForkTypes(slot).BeaconBlock.hashTreeRoot(block.message); - - const blobKzgCommitments = (block.message.body as deneb.BeaconBlockBody).blobKzgCommitments; - if (blobKzgCommitments.length === 0) { - const blockData = { - fork: cachedData.fork, - dataColumns: [], - dataColumnsBytes: [], - dataColumnsSource: DataColumnsSource.gossip, - } as BlockInputDataColumns; - - resolveAvailability(blockData); - opts.metrics?.syncUnknownBlock.resolveAvailabilitySource.inc({source: BlockInputAvailabilitySource.UNKNOWN_SYNC}); - return getBlockInput.availableData(config, block, BlockSource.byRoot, blockData); - } - - const sampledColumns = network.custodyConfig.sampledColumns; - let neededColumns = sampledColumns.reduce((acc, elem) => { - if (dataColumnsCache.get(elem) === undefined) { - acc.push(elem); - } - return acc; - }, [] as number[]); - - let resolveBlockInput: ((block: BlockInput) => void) | null = null; - const blockInputPromise = new Promise((resolveCB) => { - resolveBlockInput = resolveCB; - }); - if (resolveBlockInput === null) { - throw Error("Promise Constructor was not executed immediately"); - } - - const gotColumnsFromExecution = await getDataColumnsFromExecution( - config, - network.custodyConfig, - opts.executionEngine, - opts.emitter, - { - fork: config.getForkName(block.message.slot), - block: block, - cachedData: cachedData, - blockInputPromise, - resolveBlockInput, - }, - opts.metrics ?? null - ); - - if (!gotColumnsFromExecution) { - const {custodyGroups: peerColumns} = network.getConnectedPeerSyncMeta(peerId); - - // get match - const columns = peerColumns.reduce((acc, elem) => { - if (neededColumns.includes(elem)) { - acc.push(elem); - } - return acc; - }, [] as number[]); - - // this peer can't help fetching columns for this block - if (unavailableBlockInput.block !== null && columns.length === 0 && neededColumns.length > 0) { - return unavailableBlockInput; - } - - let allDataColumnSidecars: fulu.DataColumnSidecar[]; - if (columns.length > 0) { - allDataColumnSidecars = await network.sendDataColumnSidecarsByRoot(peerId, [{blockRoot, columns}]); - opts.metrics?.dataColumns.bySource.inc({source: DataColumnsSource.byRoot}, allDataColumnSidecars.length); - } else { - allDataColumnSidecars = []; - } - - const logCtx = { - slot: block.message.slot, - requestedColumns: columns.join(","), - respondedColumns: allDataColumnSidecars.map((dcs) => dcs.index).join(","), - peerClient, - }; - - opts.logger?.verbose("unavailableBeaconBlobsByRootPostFulu: Requested data columns from peer", logCtx); - - // the same to matchBlockWithDataColumns() without expecting requested data columns = responded data columns - // because at gossip time peer may not have enough column to return - for (const dataColumnSidecar of allDataColumnSidecars) { - dataColumnsCache.set(dataColumnSidecar.index, { - dataColumn: dataColumnSidecar, - // TODO: req/resp should return bytes here - dataColumnBytes: null, - }); - } - } - - // reevaluate needeColumns and resolve availability if possible - neededColumns = sampledColumns.reduce((acc, elem) => { - if (dataColumnsCache.get(elem) === undefined) { - acc.push(elem); - } - return acc; - }, [] as number[]); - - const logCtx = { - slot: block.message.slot, - neededColumns: neededColumns.join(","), - sampledColumns: sampledColumns.join(","), - }; - - if (neededColumns.length === 0) { - const {dataColumns, dataColumnsBytes} = getBlockInputDataColumns( - (cachedData as CachedDataColumns).dataColumnsCache, - sampledColumns - ); - - // don't forget to resolve availability as the block may be stuck in availability wait - const blockData = { - fork: config.getForkName(block.message.slot), - dataColumns, - dataColumnsBytes, - dataColumnsSource: DataColumnsSource.byRoot, - } as BlockInputDataColumns; - resolveAvailability(blockData); - opts.logger?.verbose( - "unavailableBeaconBlobsByRootPostFulu: Resolved availability for block with all data columns", - logCtx - ); - return getBlockInput.availableData(config, block, BlockSource.byRoot, blockData); - } - opts.logger?.verbose("unavailableBeaconBlobsByRootPostFulu: Still missing data columns for block", logCtx); - return getBlockInput.dataPromise(config, block, BlockSource.byRoot, cachedData); -} diff --git a/packages/beacon-node/src/network/reqresp/index.ts b/packages/beacon-node/src/network/reqresp/index.ts index dfce5c426c8c..033834c4eadf 100644 --- a/packages/beacon-node/src/network/reqresp/index.ts +++ b/packages/beacon-node/src/network/reqresp/index.ts @@ -1,4 +1,2 @@ export * from "./ReqRespBeaconNode.js"; export * from "./interface.js"; -export * from "./beaconBlocksMaybeBlobsByRange.js"; -export * from "./beaconBlocksMaybeBlobsByRoot.js"; diff --git a/packages/beacon-node/src/sync/constants.ts b/packages/beacon-node/src/sync/constants.ts index 0df632917acd..7bef3598b181 100644 --- a/packages/beacon-node/src/sync/constants.ts +++ b/packages/beacon-node/src/sync/constants.ts @@ -12,7 +12,8 @@ export const MAX_BATCH_DOWNLOAD_ATTEMPTS = 20; /** * Consider batch faulty after downloading and processing this number of times - * for example a peer may send us a non-canonical chain segment or not returning all blocks + * as in https://github.com/ChainSafe/lodestar/issues/8147 we cannot proceed the sync chain if there is unknown parent + * from prior batch. For example a peer may send us a non-canonical chain segment or not returning all blocks * in that case we should throw error and `RangeSync` should remove that error chain and add a new one. **/ export const MAX_BATCH_PROCESSING_ATTEMPTS = 0; diff --git a/packages/beacon-node/src/sync/interface.ts b/packages/beacon-node/src/sync/interface.ts index 4aef2b74be8d..546d457eb618 100644 --- a/packages/beacon-node/src/sync/interface.ts +++ b/packages/beacon-node/src/sync/interface.ts @@ -1,8 +1,7 @@ import {routes} from "@lodestar/api"; import {BeaconConfig} from "@lodestar/config"; -import {RootHex, Slot, phase0} from "@lodestar/types"; +import {Slot, phase0} from "@lodestar/types"; import {Logger} from "@lodestar/utils"; -import {BlockInput, BlockInputType, NullBlockInput} from "../chain/blocks/types.js"; import {IBeaconChain} from "../chain/index.js"; import {IBeaconDb} from "../db/index.js"; import {Metrics} from "../metrics/index.js"; @@ -54,62 +53,3 @@ export interface SyncModules { chain: IBeaconChain; wsCheckpoint?: phase0.Checkpoint; } - -export type UnknownAndAncestorBlocks = { - unknowns: UnknownBlock[]; - ancestors: DownloadedBlock[]; -}; - -/** - * onUnknownBlock: store 1 record with undefined parentBlockRootHex & blockInput, blockRootHex as key, status pending - * onUnknownBlockParent: - * - store 1 record with known parentBlockRootHex & blockInput, blockRootHex as key, status downloaded - * - store 1 record with undefined parentBlockRootHex & blockInput, parentBlockRootHex as key, status pending - */ -export type PendingBlock = UnknownBlock | DownloadedBlock; - -type PendingBlockCommon = { - blockRootHex: RootHex; - peerIdStrs: Set; - downloadAttempts: number; -}; - -export type UnknownBlock = PendingBlockCommon & { - status: PendingBlockStatus.pending | PendingBlockStatus.fetching; - parentBlockRootHex: null; -} & ( - | {unknownBlockType: PendingBlockType.UNKNOWN_BLOCK; blockInput: null} - | {unknownBlockType: PendingBlockType.UNKNOWN_DATA; blockInput: BlockInput & {type: BlockInputType.dataPromise}} - | {unknownBlockType: PendingBlockType.UNKNOWN_BLOCKINPUT; blockInput: NullBlockInput} - ); - -/** - * either the blobs are unknown or in future some blobs and even the block is unknown - */ - -export type DownloadedBlock = PendingBlockCommon & { - status: PendingBlockStatus.downloaded | PendingBlockStatus.processing; - parentBlockRootHex: RootHex; - blockInput: BlockInput; -}; - -export enum PendingBlockStatus { - pending = "pending", - fetching = "fetching", - downloaded = "downloaded", - processing = "processing", -} - -export enum PendingBlockType { - /** - * We got a block root (from a gossip attestation, for exxample) but we don't have the block in forkchoice. - */ - UNKNOWN_BLOCK = "unknown_block", - /** - * During gossip time, we may get a block but the parent root is unknown (not in forkchoice). - */ - UNKNOWN_PARENT = "unknown_parent", - - UNKNOWN_BLOCKINPUT = "unknown_blockinput", - UNKNOWN_DATA = "unknown_data", -} diff --git a/packages/beacon-node/src/sync/options.ts b/packages/beacon-node/src/sync/options.ts index 7afd624b9d2f..a7248f42e338 100644 --- a/packages/beacon-node/src/sync/options.ts +++ b/packages/beacon-node/src/sync/options.ts @@ -15,7 +15,7 @@ export type SyncOptions = { /** USE FOR TESTING ONLY. Disable range sync completely */ disableRangeSync?: boolean; /** USE FOR TESTING ONLY. Disable unknown block sync completely */ - disableUnknownBlockSync?: boolean; + disableBlockInputSync?: boolean; /** * The batch size of slots for backfill sync can attempt to sync/process before yielding * to sync loop. This number can be increased or decreased to make a suitable resource diff --git a/packages/beacon-node/src/sync/range/batch.ts b/packages/beacon-node/src/sync/range/batch.ts index 6e8e8d6d6560..724778cfaa8b 100644 --- a/packages/beacon-node/src/sync/range/batch.ts +++ b/packages/beacon-node/src/sync/range/batch.ts @@ -1,13 +1,18 @@ import {ChainForkConfig} from "@lodestar/config"; -import {ForkSeq} from "@lodestar/params"; -import {Epoch, RootHex, phase0} from "@lodestar/types"; +import {ForkName, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; +import {Epoch, RootHex, Slot, phase0} from "@lodestar/types"; import {LodestarError} from "@lodestar/utils"; -import {BlockInput} from "../../chain/blocks/types.js"; +import {isBlockInputColumns} from "../../chain/blocks/blockInput/blockInput.js"; +import {IBlockInput} from "../../chain/blocks/blockInput/types.js"; import {BlockError, BlockErrorCode} from "../../chain/errors/index.js"; -import {PartialDownload} from "../../network/reqresp/beaconBlocksMaybeBlobsByRange.js"; +import {CustodyConfig} from "../../util/dataColumns.js"; import {PeerIdStr} from "../../util/peerId.js"; import {MAX_BATCH_DOWNLOAD_ATTEMPTS, MAX_BATCH_PROCESSING_ATTEMPTS} from "../constants.js"; +import {DownloadByRangeRequests} from "../utils/downloadByRange.js"; import {getBatchSlotRange, hashBlocks} from "./utils/index.js"; +import {PeerSyncMeta} from "../../network/peers/peersData.js"; +import {IClock} from "../../util/clock.js"; +import {isDaOutOfRange} from "../../chain/blocks/blockInput/utils.js"; /** * Current state of a batch @@ -33,33 +38,33 @@ export enum BatchStatus { export type Attempt = { /** The peer that made the attempt */ - peer: PeerIdStr; + peers: PeerIdStr[]; /** The hash of the blocks of the attempt */ hash: RootHex; }; +export type AwaitingDownloadState = { + status: BatchStatus.AwaitingDownload; + blocks: IBlockInput[]; +}; + +export type DownloadSuccessState = { + status: BatchStatus.AwaitingProcessing; + blocks: IBlockInput[]; +}; + export type BatchState = - | {status: BatchStatus.AwaitingDownload; partialDownload: PartialDownload} - | {status: BatchStatus.Downloading; peer: PeerIdStr; partialDownload: PartialDownload} - | {status: BatchStatus.AwaitingProcessing; peer: PeerIdStr; blocks: BlockInput[]} - | {status: BatchStatus.Processing; attempt: Attempt} - | {status: BatchStatus.AwaitingValidation; attempt: Attempt}; + | AwaitingDownloadState + | {status: BatchStatus.Downloading; peer: PeerIdStr; blocks: IBlockInput[]} + | DownloadSuccessState + | {status: BatchStatus.Processing; blocks: IBlockInput[]; attempt: Attempt} + | {status: BatchStatus.AwaitingValidation; blocks: IBlockInput[]; attempt: Attempt}; export type BatchMetadata = { startEpoch: Epoch; status: BatchStatus; }; -export type DownloadSuccessOutput = - | { - status: BatchStatus.AwaitingProcessing; - blocks: BlockInput[]; - } - | { - status: BatchStatus.AwaitingDownload; - pendingDataColumns: number[]; - }; - /** * Batches are downloaded at the first block of the epoch. * @@ -72,11 +77,17 @@ export type DownloadSuccessOutput = * Jul2022: Offset changed from 1 to 0, see rationale in {@link BATCH_SLOT_OFFSET} */ export class Batch { + readonly forkName: ForkName; readonly startEpoch: Epoch; + readonly startSlot: Slot; + readonly count: number; + + /** Block, blob and column requests that are used to determine the best peer and are used in downloadByRange */ + requests: DownloadByRangeRequests; /** State of the batch. */ - state: BatchState = {status: BatchStatus.AwaitingDownload, partialDownload: null}; - /** BeaconBlocksByRangeRequest */ - readonly request: phase0.BeaconBlocksByRangeRequest; + state: BatchState = {status: BatchStatus.AwaitingDownload, blocks: []}; + /** Peers that provided good data */ + goodPeers: PeerIdStr[] = []; /** The `Attempts` that have been made and failed to send us this batch. */ readonly failedProcessingAttempts: Attempt[] = []; /** The `Attempts` that have been made and failed because of execution malfunction. */ @@ -84,16 +95,156 @@ export class Batch { /** The number of download retries this batch has undergone due to a failed request. */ private readonly failedDownloadAttempts: PeerIdStr[] = []; private readonly config: ChainForkConfig; + private readonly clock: IClock; + private readonly custodyConfig: CustodyConfig; - constructor(startEpoch: Epoch, config: ChainForkConfig) { - const {startSlot, count} = getBatchSlotRange(startEpoch); - + constructor(startEpoch: Epoch, config: ChainForkConfig, clock: IClock, custodyConfig: CustodyConfig) { this.config = config; + this.clock = clock; + this.custodyConfig = custodyConfig; + + const {startSlot, count} = getBatchSlotRange(startEpoch); + this.forkName = this.config.getForkName(startSlot); this.startEpoch = startEpoch; - this.request = { - startSlot, - count, - step: 1, + this.startSlot = startSlot; + this.count = count; + this.requests = this.getRequests([]); + } + + /** + * Builds ByRange requests for block, blobs and columns + */ + private getRequests(blocks: IBlockInput[]): DownloadByRangeRequests { + const withinValidRequestWindow = !isDaOutOfRange( + this.config, + this.forkName, + this.startSlot, + this.clock.currentEpoch + ); + + // fresh request where no blocks have started to be pulled yet + if (!blocks.length) { + const blocksRequest: phase0.BeaconBlocksByRangeRequest = { + startSlot: this.startSlot, + count: this.count, + step: 1, + }; + if (isForkPostFulu(this.forkName) && withinValidRequestWindow) { + return { + blocksRequest, + columnsRequest: { + startSlot: this.startSlot, + count: this.count, + columns: this.custodyConfig.sampledColumns, + }, + }; + } + if (isForkPostDeneb(this.forkName) && withinValidRequestWindow) { + return { + blocksRequest, + blobsRequest: { + startSlot: this.startSlot, + count: this.count, + }, + }; + } + return { + blocksRequest, + }; + } + + // subsequent request where part of the epoch has already been downloaded. Need to figure out what is the beginning + // of the range where download needs to resume + let blockStartSlot = this.startSlot; + let dataStartSlot = this.startSlot; + const neededColumns = new Set(); + + // ensure blocks are in slot-wise order + for (const blockInput of blocks) { + const blockSlot = blockInput.slot; + // check if block/data is present (hasBlock/hasAllData). If present then check if startSlot is the same as + // blockSlot. If it is then do not need to pull that slot so increment startSlot by 1. check will fail + // if there is a gap and then the blocks/data is present again. to simplify the request just re-pull remainder + // of range. + // + // ie startSlot = 32 and count = 32. so for slots = [32, 33, 34, 35, 36, _, 38, 39, _, _, ... _endSlot=63_] + // will return an updated startSlot of 37 and pull range 37-63 on the next request. + // + // if all slot have already been pulled then the startSlot will eventually get incremented to the slot after + // the desired end slot + if (blockInput.hasBlock() && blockStartSlot === blockSlot) { + blockStartSlot = blockSlot + 1; + } + if (!blockInput.hasAllData()) { + if (isBlockInputColumns(blockInput)) { + for (const index of blockInput.getMissingSampledColumnMeta().missing) { + neededColumns.add(index); + } + } + } else if (dataStartSlot === blockSlot) { + dataStartSlot = blockSlot + 1; + } + } + + // if the blockStartSlot or dataStartSlot is after the desired endSlot then no request will be made for the batch + // because it is complete + const endSlot = this.startSlot + this.count - 1; + const requests: DownloadByRangeRequests = {}; + if (blockStartSlot <= endSlot) { + requests.blocksRequest = { + startSlot: blockStartSlot, + // range of 40 - 63, startSlot will be inclusive but subtraction will exclusive so need to + 1 + count: endSlot - blockStartSlot + 1, + step: 1, + }; + } + if (dataStartSlot <= endSlot) { + // range of 40 - 63, startSlot will be inclusive but subtraction will exclusive so need to + 1 + const count = endSlot - dataStartSlot + 1; + if (isForkPostFulu(this.forkName) && withinValidRequestWindow) { + requests.columnsRequest = { + count, + startSlot: dataStartSlot, + columns: Array.from(neededColumns), + }; + } else if (isForkPostDeneb(this.forkName) && withinValidRequestWindow) { + requests.blobsRequest = { + count, + startSlot: dataStartSlot, + }; + } + // dataSlot will still have a value but do not create a request for preDeneb forks + } + + return requests; + } + + /** + * Post-fulu we should only get columns that peer has advertised + */ + getRequestsForPeer(peer: PeerSyncMeta): DownloadByRangeRequests { + if (!isForkPostFulu(this.forkName)) { + return this.requests; + } + + // post-fulu we need to ensure that we only request columns that the peer has advertised + const {columnsRequest} = this.requests; + if (columnsRequest == null) { + return this.requests; + } + + const peerColumns = new Set(peer.custodyGroups ?? []); + const requestedColumns = columnsRequest.columns.filter((c) => peerColumns.has(c)); + if (requestedColumns.length === columnsRequest.columns.length) { + return this.requests; + } + + return { + ...this.requests, + columnsRequest: { + ...columnsRequest, + columns: requestedColumns, + }, }; } @@ -101,94 +252,100 @@ export class Batch { * Gives a list of peers from which this batch has had a failed download or processing attempt. */ getFailedPeers(): PeerIdStr[] { - return [...this.failedDownloadAttempts, ...this.failedProcessingAttempts.map((a) => a.peer)]; + return [...this.failedDownloadAttempts, ...this.failedProcessingAttempts.flatMap((a) => a.peers)]; } getMetadata(): BatchMetadata { return {startEpoch: this.startEpoch, status: this.state.status}; } + getBlocks(): IBlockInput[] { + return this.state.blocks; + } + /** * AwaitingDownload -> Downloading */ - startDownloading(peer: PeerIdStr): PartialDownload { + startDownloading(peer: PeerIdStr): void { if (this.state.status !== BatchStatus.AwaitingDownload) { throw new BatchError(this.wrongStatusErrorType(BatchStatus.AwaitingDownload)); } - const {partialDownload} = this.state; - this.state = {status: BatchStatus.Downloading, peer, partialDownload}; - return partialDownload; + this.state = {status: BatchStatus.Downloading, peer, blocks: this.state.blocks}; } /** * Downloading -> AwaitingProcessing - * pendingDataColumns is null when a complete download is done, otherwise it contains the columns that are still pending */ - downloadingSuccess(downloadResult: { - blocks: BlockInput[]; - pendingDataColumns: null | number[]; - }): DownloadSuccessOutput { + downloadingSuccess(peer: PeerIdStr, blocks: IBlockInput[]): DownloadSuccessState { if (this.state.status !== BatchStatus.Downloading) { throw new BatchError(this.wrongStatusErrorType(BatchStatus.Downloading)); } - let updatedPendingDataColumns = this.state.partialDownload?.pendingDataColumns ?? null; - const {blocks, pendingDataColumns} = downloadResult; - if (updatedPendingDataColumns == null) { - // state pendingDataColumns is null as initial value, just update it to pendingDataColumns in this case - updatedPendingDataColumns = pendingDataColumns; - } else { - updatedPendingDataColumns = - // pendingDataColumns = null means a complete download - pendingDataColumns == null - ? null - : // if not state pendingDataColumns should be reduced over time, see see https://github.com/ChainSafe/lodestar/issues/8036 - updatedPendingDataColumns.filter((column) => pendingDataColumns.includes(column)); + // ensure that blocks are always sorted before getting stored on the batch.state or being used to getRequests + blocks.sort((a, b) => a.slot - b.slot); + + this.goodPeers.push(peer); + + let allComplete = true; + const slots = new Set(); + for (const block of blocks) { + slots.add(block.slot); + if (!block.hasBlockAndAllData()) { + allComplete = false; + } } - if (updatedPendingDataColumns === null) { - // complete download - this.state = {status: BatchStatus.AwaitingProcessing, peer: this.state.peer, blocks}; - return {status: BatchStatus.AwaitingProcessing, blocks}; + if (slots.size > this.count) { + throw new BatchError({ + code: BatchErrorCode.INVALID_COUNT, + startEpoch: this.startEpoch, + count: slots.size, + expected: this.count, + status: this.state.status, + }); + } + if (allComplete) { + this.state = {status: BatchStatus.AwaitingProcessing, blocks}; + } else { + this.requests = this.getRequests(blocks); + this.state = {status: BatchStatus.AwaitingDownload, blocks}; } - // partial download, track updatedPendingDataColumns in state - this.state = { - status: BatchStatus.AwaitingDownload, - partialDownload: blocks.length === 0 ? null : {blocks, pendingDataColumns: updatedPendingDataColumns}, - }; - return {status: BatchStatus.AwaitingDownload, pendingDataColumns: updatedPendingDataColumns}; + return this.state as DownloadSuccessState; } /** * Downloading -> AwaitingDownload */ - downloadingError(): void { + downloadingError(peer: PeerIdStr): void { if (this.state.status !== BatchStatus.Downloading) { throw new BatchError(this.wrongStatusErrorType(BatchStatus.Downloading)); } - this.failedDownloadAttempts.push(this.state.peer); + this.failedDownloadAttempts.push(peer); if (this.failedDownloadAttempts.length > MAX_BATCH_DOWNLOAD_ATTEMPTS) { throw new BatchError(this.errorType({code: BatchErrorCode.MAX_DOWNLOAD_ATTEMPTS})); } - const {partialDownload} = this.state; - this.state = {status: BatchStatus.AwaitingDownload, partialDownload}; + this.state = {status: BatchStatus.AwaitingDownload, blocks: this.state.blocks}; } /** * AwaitingProcessing -> Processing */ - startProcessing(): BlockInput[] { + startProcessing(): IBlockInput[] { if (this.state.status !== BatchStatus.AwaitingProcessing) { throw new BatchError(this.wrongStatusErrorType(BatchStatus.AwaitingProcessing)); } const blocks = this.state.blocks; const hash = hashBlocks(blocks, this.config); // tracks blocks to report peer on processing error - this.state = {status: BatchStatus.Processing, attempt: {peer: this.state.peer, hash}}; + // Reset goodPeers in case another download attempt needs to be made. When Attempt is successful or not the peers + // that the data came from will be handled by the Attempt that goes for processing + const peers = this.goodPeers; + this.goodPeers = []; + this.state = {status: BatchStatus.Processing, blocks, attempt: {peers, hash}}; return blocks; } @@ -200,7 +357,7 @@ export class Batch { throw new BatchError(this.wrongStatusErrorType(BatchStatus.Processing)); } - this.state = {status: BatchStatus.AwaitingValidation, attempt: this.state.attempt}; + this.state = {status: BatchStatus.AwaitingValidation, blocks: this.state.blocks, attempt: this.state.attempt}; } /** @@ -243,17 +400,15 @@ export class Batch { return this.state.attempt; } - isPostFulu(): boolean { - return this.config.getForkSeq(this.request.startSlot) >= ForkSeq.fulu; - } - private onExecutionEngineError(attempt: Attempt): void { this.executionErrorAttempts.push(attempt); if (this.executionErrorAttempts.length > MAX_BATCH_PROCESSING_ATTEMPTS) { throw new BatchError(this.errorType({code: BatchErrorCode.MAX_EXECUTION_ENGINE_ERROR_ATTEMPTS})); } - this.state = {status: BatchStatus.AwaitingDownload, partialDownload: null}; + // remove any downloaded blocks and re-attempt + // TODO(fulu): need to remove the bad blocks from the SeenBlockInputCache + this.state = {status: BatchStatus.AwaitingDownload, blocks: []}; } private onProcessingError(attempt: Attempt): void { @@ -262,7 +417,9 @@ export class Batch { throw new BatchError(this.errorType({code: BatchErrorCode.MAX_PROCESSING_ATTEMPTS})); } - this.state = {status: BatchStatus.AwaitingDownload, partialDownload: null}; + // remove any downloaded blocks and re-attempt + // TODO(fulu): need to remove the bad blocks from the SeenBlockInputCache + this.state = {status: BatchStatus.AwaitingDownload, blocks: []}; } /** Helper to construct typed BatchError. Stack traces are correct as the error is thrown above */ @@ -277,6 +434,7 @@ export class Batch { export enum BatchErrorCode { WRONG_STATUS = "BATCH_ERROR_WRONG_STATUS", + INVALID_COUNT = "BATCH_ERROR_INVALID_COUNT", MAX_DOWNLOAD_ATTEMPTS = "BATCH_ERROR_MAX_DOWNLOAD_ATTEMPTS", MAX_PROCESSING_ATTEMPTS = "BATCH_ERROR_MAX_PROCESSING_ATTEMPTS", MAX_EXECUTION_ENGINE_ERROR_ATTEMPTS = "MAX_EXECUTION_ENGINE_ERROR_ATTEMPTS", @@ -284,6 +442,7 @@ export enum BatchErrorCode { type BatchErrorType = | {code: BatchErrorCode.WRONG_STATUS; expectedStatus: BatchStatus} + | {code: BatchErrorCode.INVALID_COUNT; count: number; expected: number} | {code: BatchErrorCode.MAX_DOWNLOAD_ATTEMPTS} | {code: BatchErrorCode.MAX_PROCESSING_ATTEMPTS} | {code: BatchErrorCode.MAX_EXECUTION_ENGINE_ERROR_ATTEMPTS}; diff --git a/packages/beacon-node/src/sync/range/chain.ts b/packages/beacon-node/src/sync/range/chain.ts index bf239ce4b72f..48fdee091bec 100644 --- a/packages/beacon-node/src/sync/range/chain.ts +++ b/packages/beacon-node/src/sync/range/chain.ts @@ -1,17 +1,20 @@ import {ChainForkConfig} from "@lodestar/config"; -import {ForkName, isForkPostFulu} from "@lodestar/params"; -import {Epoch, Root, Slot, phase0} from "@lodestar/types"; -import {ErrorAborted, Logger, toRootHex} from "@lodestar/utils"; -import {BlockInput, BlockInputDataColumns, BlockInputType} from "../../chain/blocks/types.js"; +import {Epoch, Root, Slot} from "@lodestar/types"; +import {ErrorAborted, LodestarError, Logger, toRootHex} from "@lodestar/utils"; +import {isBlockInputBlobs, isBlockInputColumns} from "../../chain/blocks/blockInput/blockInput.js"; +import {BlockInputErrorCode} from "../../chain/blocks/blockInput/errors.js"; +import {IBlockInput} from "../../chain/blocks/blockInput/types.js"; +import {BlobSidecarErrorCode} from "../../chain/errors/blobSidecarError.js"; +import {DataColumnSidecarErrorCode} from "../../chain/errors/dataColumnSidecarError.js"; import {Metrics} from "../../metrics/metrics.js"; import {PeerAction, prettyPrintPeerIdStr} from "../../network/index.js"; import {PeerSyncMeta} from "../../network/peers/peersData.js"; -import {PartialDownload} from "../../network/reqresp/beaconBlocksMaybeBlobsByRange.js"; import {CustodyConfig} from "../../util/dataColumns.js"; import {ItTrigger} from "../../util/itTrigger.js"; import {PeerIdStr} from "../../util/peerId.js"; -import {wrapError} from "../../util/wrapError.js"; +import {WarnResult, wrapError} from "../../util/wrapError.js"; import {BATCH_BUFFER_SIZE, EPOCHS_PER_BATCH, MAX_LOOK_AHEAD_EPOCHS} from "../constants.js"; +import {DownloadByRangeError, DownloadByRangeErrorCode} from "../utils/downloadByRange.js"; import {RangeSyncType} from "../utils/remoteSyncType.js"; import {Batch, BatchError, BatchErrorCode, BatchMetadata, BatchStatus} from "./batch.js"; import { @@ -26,9 +29,11 @@ import { toBeDownloadedStartEpoch, validateBatchesStatus, } from "./utils/index.js"; +import {IClock} from "../../util/clock.js"; export type SyncChainModules = { config: ChainForkConfig; + clock: IClock; custodyConfig: CustodyConfig; logger: Logger; metrics: Metrics | null; @@ -39,20 +44,21 @@ export type SyncChainFns = { * Must return if ALL blocks are processed successfully * If SOME blocks are processed must throw BlockProcessorError() */ - processChainSegment: (blocks: BlockInput[], syncType: RangeSyncType) => Promise; + processChainSegment: (blocks: IBlockInput[], syncType: RangeSyncType) => Promise; /** Must download blocks, and validate their range */ - downloadBeaconBlocksByRange: ( + downloadByRange: ( peer: PeerSyncMeta, - request: phase0.BeaconBlocksByRangeRequest, - partialDownload: PartialDownload, + batch: Batch, syncType: RangeSyncType - ) => Promise<{blocks: BlockInput[]; pendingDataColumns: null | number[]}>; + ) => Promise>; /** Report peer for negative actions. Decouples from the full network instance */ reportPeer: (peer: PeerIdStr, action: PeerAction, actionName: string) => void; /** Gets current peer custodyColumns and earliestAvailableSlot */ getConnectedPeerSyncMeta: (peerId: string) => PeerSyncMeta; /** Hook called when Chain state completes */ onEnd: (err: Error | null, target: ChainTarget | null) => void; + /** Deletes an array of BlockInputs from the BlockInputCache */ + pruneBlockInputs: (blockInputs: IBlockInput[]) => void; }; /** @@ -117,9 +123,11 @@ export class SyncChain { private status = SyncChainStatus.Stopped; private readonly processChainSegment: SyncChainFns["processChainSegment"]; - private readonly downloadBeaconBlocksByRange: SyncChainFns["downloadBeaconBlocksByRange"]; + private readonly downloadByRange: SyncChainFns["downloadByRange"]; private readonly reportPeer: SyncChainFns["reportPeer"]; private readonly getConnectedPeerSyncMeta: SyncChainFns["getConnectedPeerSyncMeta"]; + private readonly pruneBlockInputs: SyncChainFns["pruneBlockInputs"]; + /** AsyncIterable that guarantees processChainSegment is run only at once at anytime */ private readonly batchProcessor = new ItTrigger(); /** Sorted map of batches undergoing some kind of processing. */ @@ -128,6 +136,8 @@ export class SyncChain { private readonly logger: Logger; private readonly config: ChainForkConfig; + private readonly clock: IClock; + private readonly metrics: Metrics | null; private readonly custodyConfig: CustodyConfig; constructor( @@ -137,16 +147,19 @@ export class SyncChain { fns: SyncChainFns, modules: SyncChainModules ) { - const {config, custodyConfig, logger, metrics} = modules; + const {config, clock, custodyConfig, logger, metrics} = modules; this.firstBatchEpoch = initialBatchEpoch; this.lastEpochWithProcessBlocks = initialBatchEpoch; this.target = initialTarget; this.syncType = syncType; this.processChainSegment = fns.processChainSegment; - this.downloadBeaconBlocksByRange = fns.downloadBeaconBlocksByRange; + this.downloadByRange = fns.downloadByRange; this.reportPeer = fns.reportPeer; + this.pruneBlockInputs = fns.pruneBlockInputs; this.getConnectedPeerSyncMeta = fns.getConnectedPeerSyncMeta; this.config = config; + this.clock = clock; + this.metrics = metrics; this.custodyConfig = custodyConfig; this.logger = logger; this.logId = `${syncType}-${nextChainId++}`; @@ -310,6 +323,10 @@ export class SyncChain { return; // Ignore } + for (const batch of this.batches.values()) { + this.pruneBlockInputs(batch.getBlocks()); + } + this.status = SyncChainStatus.Error; this.logger.verbose("SyncChain Error", {id: this.logId}, e as Error); @@ -432,7 +449,7 @@ export class SyncChain { return null; } - const batch = new Batch(startEpoch, this.config); + const batch = new Batch(startEpoch, this.config, this.clock, this.custodyConfig); this.batches.set(startEpoch, batch); return batch; } @@ -447,70 +464,107 @@ export class SyncChain { peer: prettyPrintPeerIdStr(peer.peerId), }); try { - const partialDownload = batch.startDownloading(peer.peerId); + batch.startDownloading(peer.peerId); // wrapError ensures to never call both batch success() and batch error() - const res = await wrapError( - this.downloadBeaconBlocksByRange(peer, batch.request, partialDownload, this.syncType) - ); - - if (!res.err) { - const downloadSuccessOutput = batch.downloadingSuccess(res.result); - if (downloadSuccessOutput.status === BatchStatus.AwaitingProcessing) { - const blocks = downloadSuccessOutput.blocks; - let hasPostDenebBlocks = false; - const blobs = blocks.reduce((acc, blockInput) => { - hasPostDenebBlocks ||= blockInput.type === BlockInputType.availableData; - return hasPostDenebBlocks - ? acc + - (blockInput.type === BlockInputType.availableData && - (blockInput.blockData.fork === ForkName.deneb || blockInput.blockData.fork === ForkName.electra) - ? blockInput.blockData.blobs.length - : 0) - : 0; - }, 0); - const dataColumns = blocks.reduce((acc, blockInput) => { - hasPostDenebBlocks ||= blockInput.type === BlockInputType.availableData; - return hasPostDenebBlocks - ? acc + - (blockInput.type === BlockInputType.availableData && isForkPostFulu(blockInput.blockData.fork) - ? (blockInput.blockData as BlockInputDataColumns).dataColumns.length - : 0) - : 0; - }, 0); - - const downloadInfo = {blocks: blocks.length}; - if (hasPostDenebBlocks) { - Object.assign(downloadInfo, {blobs, dataColumns}); + const res = await wrapError(this.downloadByRange(peer, batch, this.syncType)); + + if (res.err) { + // There's several known error cases where we want to take action on the peer + const errCode = (res.err as LodestarError<{code: string}>).type?.code; + this.metrics?.syncRange.downloadByRange.error.inc({client: peer.client, code: errCode ?? "UNKNOWN"}); + if (this.syncType === RangeSyncType.Finalized) { + // For finalized sync, we are stricter with peers as there is no ambiguity about which chain we're syncing. + // The below cases indicate the peer may be on a different chain, so are not penalized during head sync. + switch (errCode) { + case BlockInputErrorCode.MISMATCHED_ROOT_HEX: + case DownloadByRangeErrorCode.MISSING_BLOBS: + case DownloadByRangeErrorCode.EXTRA_BLOBS: + case DownloadByRangeErrorCode.MISSING_COLUMNS: + case DownloadByRangeErrorCode.EXTRA_COLUMNS: + case BlobSidecarErrorCode.INCORRECT_SIDECAR_COUNT: + case BlobSidecarErrorCode.INCORRECT_BLOCK: + case DataColumnSidecarErrorCode.INCORRECT_SIDECAR_COUNT: + case DataColumnSidecarErrorCode.INCORRECT_BLOCK: + this.reportPeer(peer.peerId, PeerAction.LowToleranceError, res.err.message); } - this.logger.debug("Downloaded batch", { - id: this.logId, - ...batch.getMetadata(), - ...downloadInfo, - peer: prettyPrintPeerIdStr(peer.peerId), - }); - this.triggerBatchProcessor(); - } else { - const pendingDataColumns = downloadSuccessOutput.pendingDataColumns.join(","); - this.logger.debug("Partially downloaded batch", { - id: this.logId, - ...batch.getMetadata(), - pendingDataColumns, - peer: peer.peerId, - }); - // the flow will continue to call triggerBatchDownloader() below } - } else { + switch (errCode) { + case DownloadByRangeErrorCode.EXTRA_BLOCKS: + case DownloadByRangeErrorCode.OUT_OF_ORDER_BLOCKS: + case DownloadByRangeErrorCode.OUT_OF_RANGE_BLOCKS: + case DownloadByRangeErrorCode.PARENT_ROOT_MISMATCH: + case BlobSidecarErrorCode.INCLUSION_PROOF_INVALID: + case BlobSidecarErrorCode.INVALID_KZG_PROOF_BATCH: + case DataColumnSidecarErrorCode.INCORRECT_KZG_COMMITMENTS_COUNT: + case DataColumnSidecarErrorCode.INCORRECT_KZG_PROOF_COUNT: + case DataColumnSidecarErrorCode.INVALID_KZG_PROOF_BATCH: + case DataColumnSidecarErrorCode.INCLUSION_PROOF_INVALID: + this.reportPeer(peer.peerId, PeerAction.LowToleranceError, res.err.message); + } this.logger.verbose( "Batch download error", {id: this.logId, ...batch.getMetadata(), peer: prettyPrintPeerIdStr(peer.peerId)}, res.err ); - batch.downloadingError(); // Throws after MAX_DOWNLOAD_ATTEMPTS + batch.downloadingError(peer.peerId); // Throws after MAX_DOWNLOAD_ATTEMPTS + } else { + this.logger.verbose("Batch download success", { + id: this.logId, + ...batch.getMetadata(), + peer: prettyPrintPeerIdStr(peer.peerId), + }); + this.metrics?.syncRange.downloadByRange.success.inc(); + const {warnings, result} = res.result; + const downloadSuccessOutput = batch.downloadingSuccess(peer.peerId, result); + const logMeta: Record = { + blockCount: downloadSuccessOutput.blocks.length, + }; + + if (warnings && warnings.length > 0) { + for (const warning of warnings) { + this.metrics?.syncRange.downloadByRange.warn.inc({client: peer.client, code: warning.type.code}); + this.logger.debug( + "Batch downloaded with warning", + {id: this.logId, epoch: batch.startEpoch, ...logMeta, peer: prettyPrintPeerIdStr(peer.peerId)}, + warning + ); + } + } + + for (const block of downloadSuccessOutput.blocks) { + if (isBlockInputBlobs(block)) { + const blockLogMeta = block.getLogMeta(); + const expectedBlobs = typeof blockLogMeta.expectedBlobs === "number" ? blockLogMeta.expectedBlobs : 0; + logMeta.expectedBlobCount = (logMeta.expectedBlobCount ?? 0) + expectedBlobs; + logMeta.receivedBlobCount = (logMeta.receivedBlobCount ?? 0) + blockLogMeta.receivedBlobs; + } else if (isBlockInputColumns(block)) { + logMeta.columnCount = (logMeta.columnCount ?? 0) + block.getLogMeta().receivedColumns; + } + } + + let logMessage: string; + if (downloadSuccessOutput.status === BatchStatus.AwaitingProcessing) { + logMessage = "Finished downloading batch by range"; + this.triggerBatchProcessor(); + } else { + logMessage = "Partially downloaded batch by range. Attempting another round of downloads"; + // the flow will continue to call triggerBatchDownloader() below + } + + this.logger.debug(logMessage, { + id: this.logId, + epoch: batch.startEpoch, + ...logMeta, + peer: prettyPrintPeerIdStr(peer.peerId), + }); } // Preemptively request more blocks from peers whilst we process current blocks - this.triggerBatchDownloader(); + // + // TODO(fulu): why is this second call here. should fall through to the one below the catch block. commenting + // for now and will resolve during PR process + // this.triggerBatchDownloader(); } catch (e) { // bubble the error up to the main async iterable loop this.batchProcessor.end(e as Error); @@ -531,6 +585,7 @@ export class SyncChain { if (!res.err) { batch.processingSuccess(); + this.pruneBlockInputs(batch.getBlocks()); // If the processed batch is not empty, validate previous AwaitingValidation blocks. if (blocks.length > 0) { @@ -583,12 +638,14 @@ export class SyncChain { const attemptOk = batch.validationSuccess(); for (const attempt of batch.failedProcessingAttempts) { if (attempt.hash !== attemptOk.hash) { - if (attemptOk.peer === attempt.peer.toString()) { - // The same peer corrected its previous attempt - this.reportPeer(attempt.peer, PeerAction.MidToleranceError, "SyncChainInvalidBatchSelf"); - } else { - // A different peer sent an bad batch - this.reportPeer(attempt.peer, PeerAction.LowToleranceError, "SyncChainInvalidBatchOther"); + for (const badAttemptPeer of attempt.peers) { + if (attemptOk.peers.find((goodPeer) => goodPeer === badAttemptPeer)) { + // The same peer corrected its previous attempt + this.reportPeer(badAttemptPeer, PeerAction.MidToleranceError, "SyncChainInvalidBatchSelf"); + } else { + // A different peer sent an bad batch + this.reportPeer(badAttemptPeer, PeerAction.LowToleranceError, "SyncChainInvalidBatchOther"); + } } } } @@ -649,8 +706,9 @@ export function shouldReportPeerOnBatchError( return {action: PeerAction.LowToleranceError, reason: "SyncChainMaxProcessingAttempts"}; // TODO: Should peers be reported for MAX_DOWNLOAD_ATTEMPTS? - case BatchErrorCode.WRONG_STATUS: case BatchErrorCode.MAX_DOWNLOAD_ATTEMPTS: + case BatchErrorCode.INVALID_COUNT: + case BatchErrorCode.WRONG_STATUS: case BatchErrorCode.MAX_EXECUTION_ENGINE_ERROR_ATTEMPTS: return null; } diff --git a/packages/beacon-node/src/sync/range/range.ts b/packages/beacon-node/src/sync/range/range.ts index 45e90f68c5c1..f6bbb28e09a9 100644 --- a/packages/beacon-node/src/sync/range/range.ts +++ b/packages/beacon-node/src/sync/range/range.ts @@ -8,11 +8,12 @@ import {AttestationImportOpt, ImportBlockOpts} from "../../chain/blocks/index.js import {IBeaconChain} from "../../chain/index.js"; import {Metrics} from "../../metrics/index.js"; import {INetwork} from "../../network/index.js"; -import {beaconBlocksMaybeBlobsByRange} from "../../network/reqresp/beaconBlocksMaybeBlobsByRange.js"; import {PeerIdStr} from "../../util/peerId.js"; +import {cacheByRangeResponses, downloadByRange} from "../utils/downloadByRange.js"; import {RangeSyncType, getRangeSyncTarget, rangeSyncTypes} from "../utils/remoteSyncType.js"; import {ChainTarget, SyncChain, SyncChainDebugState, SyncChainFns} from "./chain.js"; import {updateChains} from "./utils/index.js"; +import {IBlockInput} from "../../chain/blocks/blockInput/types.js"; export enum RangeSyncEvent { completedChain = "RangeSync-completedChain", @@ -199,24 +200,29 @@ export class RangeSync extends (EventEmitter as {new (): RangeSyncEmitter}) { } }; - /** Convenience method for `SyncChain` */ - private downloadBeaconBlocksByRange: SyncChainFns["downloadBeaconBlocksByRange"] = async ( - peer, - request, - partialDownload, - syncType: RangeSyncType - ) => { - return beaconBlocksMaybeBlobsByRange( - this.config, - this.network, - peer, - request, - this.chain.clock.currentEpoch, - partialDownload, - syncType, - this.metrics, - this.logger - ); + private downloadByRange: SyncChainFns["downloadByRange"] = async (peer, batch) => { + const batchBlocks = batch.getBlocks(); + const {result, warnings} = await downloadByRange({ + config: this.config, + network: this.network, + logger: this.logger, + peerIdStr: peer.peerId, + batchBlocks, + ...batch.getRequestsForPeer(peer), + }); + const cached = cacheByRangeResponses({ + cache: this.chain.seenBlockInputCache, + peerIdStr: peer.peerId, + responses: result, + batchBlocks, + }); + return {result: cached, warnings}; + }; + + private pruneBlockInputs: SyncChainFns["pruneBlockInputs"] = (blocks: IBlockInput[]) => { + for (const block of blocks) { + this.chain.seenBlockInputCache.prune(block.blockRootHex); + } }; /** Convenience method for `SyncChain` */ @@ -247,12 +253,19 @@ export class RangeSync extends (EventEmitter as {new (): RangeSyncEmitter}) { syncType, { processChainSegment: this.processChainSegment, - downloadBeaconBlocksByRange: this.downloadBeaconBlocksByRange, + downloadByRange: this.downloadByRange, reportPeer: this.reportPeer, getConnectedPeerSyncMeta: this.getConnectedPeerSyncMeta, + pruneBlockInputs: this.pruneBlockInputs, onEnd: this.onSyncChainEnd, }, - {config: this.config, logger: this.logger, custodyConfig: this.chain.custodyConfig, metrics: this.metrics} + { + config: this.config, + clock: this.chain.clock, + logger: this.logger, + custodyConfig: this.chain.custodyConfig, + metrics: this.metrics, + } ); this.chains.set(syncType, syncChain); diff --git a/packages/beacon-node/src/sync/range/utils/hashBlocks.ts b/packages/beacon-node/src/sync/range/utils/hashBlocks.ts index 050700217c8a..ee27224d1054 100644 --- a/packages/beacon-node/src/sync/range/utils/hashBlocks.ts +++ b/packages/beacon-node/src/sync/range/utils/hashBlocks.ts @@ -1,23 +1,24 @@ import {ChainForkConfig} from "@lodestar/config"; -import {RootHex} from "@lodestar/types"; +import {RootHex, SignedBeaconBlock} from "@lodestar/types"; import {toRootHex} from "@lodestar/utils"; -import {BlockInput} from "../../../chain/blocks/types.js"; +import {IBlockInput} from "../../../chain/blocks/blockInput/types.js"; /** * String to uniquely identify block segments. Used for peer scoring and to compare if batches are equivalent. */ -export function hashBlocks(blocks: BlockInput[], config: ChainForkConfig): RootHex { +export function hashBlocks(blocks: IBlockInput[], config: ChainForkConfig): RootHex { switch (blocks.length) { case 0: return "0x"; case 1: { - const block0 = blocks[0].block; + const block0 = blocks[0].getBlock(); return toRootHex(config.getForkTypes(block0.message.slot).SignedBeaconBlock.hashTreeRoot(block0)); } default: { - const block0 = blocks[0].block; - const blockN = blocks.at(-1)?.block as BlockInput["block"]; + const block0 = blocks[0].getBlock(); + const blockN = blocks.at(-1)?.getBlock() as SignedBeaconBlock; return ( + // TODO(fulu): should we be doing checks for presence to make sure these do not blow up? toRootHex(config.getForkTypes(block0.message.slot).SignedBeaconBlock.hashTreeRoot(block0)) + toRootHex(config.getForkTypes(blockN.message.slot).SignedBeaconBlock.hashTreeRoot(blockN)) ); diff --git a/packages/beacon-node/src/sync/range/utils/peerBalancer.ts b/packages/beacon-node/src/sync/range/utils/peerBalancer.ts index 2b80ac3caea5..b1f321d94005 100644 --- a/packages/beacon-node/src/sync/range/utils/peerBalancer.ts +++ b/packages/beacon-node/src/sync/range/utils/peerBalancer.ts @@ -1,3 +1,4 @@ +import {isForkPostFulu} from "@lodestar/params"; import {PeerSyncMeta} from "../../../network/peers/peersData.js"; import {CustodyConfig} from "../../../util/dataColumns.js"; import {PeerIdStr} from "../../../util/peerId.js"; @@ -57,8 +58,9 @@ export class ChainPeersBalancer { if (batch.state.status !== BatchStatus.AwaitingDownload) { return; } - const {partialDownload} = batch.state; - const pendingDataColumns = partialDownload?.pendingDataColumns ?? this.custodyConfig.sampledColumns; + const {columnsRequest} = batch.requests; + // TODO(fulu): This is fulu specific and hinders our peer selection PreFulu + const pendingDataColumns = columnsRequest?.columns ?? this.custodyConfig.sampledColumns; const eligiblePeers = this.filterPeers(batch, pendingDataColumns, false); const failedPeers = new Set(batch.getFailedPeers()); @@ -116,7 +118,7 @@ export class ChainPeersBalancer { } for (const peer of this.peers) { - const {earliestAvailableSlot, custodyGroups, target, peerId} = peer; + const {earliestAvailableSlot, target, peerId} = peer; const activeRequest = this.activeRequestsByPeer.get(peerId) ?? 0; if (noActiveRequest && activeRequest > 0) { @@ -129,23 +131,23 @@ export class ChainPeersBalancer { continue; } - if (target.slot < batch.request.startSlot) { + if (target.slot < batch.startSlot) { continue; } - if (batch.isPostFulu() && this.syncType === RangeSyncType.Head) { + if (isForkPostFulu(batch.forkName) && this.syncType === RangeSyncType.Head) { // for head sync, target slot is head slot and each peer may have a different head slot // we don't want to retry a batch with a peer that's not as up-to-date as the previous peer // see https://github.com/ChainSafe/lodestar/issues/8193 - const blocks = batch.state.partialDownload?.blocks; - const lastBlock = blocks?.at(-1)?.block; - const lastBlockSlot = lastBlock?.message?.slot; + const blocks = batch.state?.blocks; + const lastBlock = blocks?.at(-1); + const lastBlockSlot = lastBlock?.slot; if (lastBlockSlot && lastBlockSlot > target.slot) { continue; } } - if (!batch.isPostFulu()) { + if (!isForkPostFulu(batch.forkName)) { // pre-fulu logic, we don't care columns and earliestAvailableSlot eligiblePeers.push({syncInfo: peer, columns: 0, hasEarliestAvailableSlots: false}); continue; @@ -157,12 +159,11 @@ export class ChainPeersBalancer { continue; } - if (earliestAvailableSlot > batch.request.startSlot) { + if (earliestAvailableSlot > batch.startSlot) { continue; } - const peerColumns = custodyGroups; - const columns = peerColumns.reduce((acc, elem) => { + const columns = peer.custodyGroups.reduce((acc, elem) => { if (requestColumns.includes(elem)) { acc.push(elem); } diff --git a/packages/beacon-node/src/sync/sync.ts b/packages/beacon-node/src/sync/sync.ts index 1fa49f1f405b..3763c9bee78b 100644 --- a/packages/beacon-node/src/sync/sync.ts +++ b/packages/beacon-node/src/sync/sync.ts @@ -13,7 +13,7 @@ import {IBeaconSync, SyncModules, SyncingStatus} from "./interface.js"; import {SyncChainDebugState, SyncState, syncStateMetric} from "./interface.js"; import {SyncOptions} from "./options.js"; import {RangeSync, RangeSyncEvent, RangeSyncStatus} from "./range/range.js"; -import {UnknownBlockSync} from "./unknownBlock.js"; +import {BlockInputSync} from "./unknownBlock.js"; import {PeerSyncType, getPeerSyncType, peerSyncTypes} from "./utils/remoteSyncType.js"; export class BeaconSync implements IBeaconSync { @@ -24,7 +24,7 @@ export class BeaconSync implements IBeaconSync { private readonly opts: SyncOptions; private readonly rangeSync: RangeSync; - private readonly unknownBlockSync: UnknownBlockSync; + private readonly unknownBlockSync: BlockInputSync; /** For metrics only */ private readonly peerSyncType = new Map(); @@ -38,7 +38,7 @@ export class BeaconSync implements IBeaconSync { this.metrics = metrics; this.logger = logger; this.rangeSync = new RangeSync(modules, opts); - this.unknownBlockSync = new UnknownBlockSync(config, network, chain, logger, metrics, opts); + this.unknownBlockSync = new BlockInputSync(config, network, chain, logger, metrics, opts); this.slotImportTolerance = opts.slotImportTolerance ?? SLOTS_PER_EPOCH; // Subscribe to RangeSync completing a SyncChain and recompute sync state @@ -232,7 +232,7 @@ export class BeaconSync implements IBeaconSync { // also start searching for unknown blocks if (!this.unknownBlockSync.isSubscribedToNetwork()) { this.unknownBlockSync.subscribeToNetwork(); - this.metrics?.syncUnknownBlock.switchNetworkSubscriptions.inc({action: "subscribed"}); + this.metrics?.blockInputSync.switchNetworkSubscriptions.inc({action: "subscribed"}); } } @@ -256,7 +256,7 @@ export class BeaconSync implements IBeaconSync { // also stop searching for unknown blocks if (this.unknownBlockSync.isSubscribedToNetwork()) { this.unknownBlockSync.unsubscribeFromNetwork(); - this.metrics?.syncUnknownBlock.switchNetworkSubscriptions.inc({action: "unsubscribed"}); + this.metrics?.blockInputSync.switchNetworkSubscriptions.inc({action: "unsubscribed"}); } } } diff --git a/packages/beacon-node/src/sync/types.ts b/packages/beacon-node/src/sync/types.ts new file mode 100644 index 000000000000..d37dda1b5460 --- /dev/null +++ b/packages/beacon-node/src/sync/types.ts @@ -0,0 +1,57 @@ +import {IBlockInput} from "@lodestar/beacon-node/src/chain/blocks/blockInput/index.js"; +import {RootHex, Slot} from "@lodestar/types"; + +export enum PendingBlockType { + /** + * We got a block root (from a gossip attestation, for exxample) but we don't have the block in forkchoice. + */ + UNKNOWN_BLOCK_ROOT = "UnknownBlockRoot", + /** + * During gossip time, we may get a block but the parent root is unknown (not in forkchoice). + */ + UNKNOWN_PARENT = "unknown_parent", + /** + * During gossip we wait for a set amount of time to receive the complete block input but if it does not + * arrive in time we turn to req/resp to pull the remainder so that it can be processed + */ + INCOMPLETE_BLOCK_INPUT = "IncompleteBlockInput", + + UNKNOWN_DATA = "unknown_data", +} + +export enum PendingBlockInputStatus { + pending = "pending", + fetching = "fetching", + downloaded = "downloaded", + processing = "processing", +} + +export type PendingBlockInput = { + status: PendingBlockInputStatus; + blockInput: IBlockInput; + timeAddedSec: number; + timeSyncedSec?: number; + peerIdStrings: Set; +}; + +export type PendingRootHex = { + status: PendingBlockInputStatus.pending | PendingBlockInputStatus.fetching; + rootHex: RootHex; + timeAddedSec: number; + timeSyncedSec?: number; + peerIdStrings: Set; +}; + +export type BlockInputSyncCacheItem = PendingBlockInput | PendingRootHex; + +export function isPendingBlockInput(pending: BlockInputSyncCacheItem): pending is PendingBlockInput { + return "blockInput" in pending; +} + +export function getBlockInputSyncCacheItemRootHex(block: BlockInputSyncCacheItem): RootHex { + return isPendingBlockInput(block) ? block.blockInput.blockRootHex : block.rootHex; +} + +export function getBlockInputSyncCacheItemSlot(block: BlockInputSyncCacheItem): Slot | string { + return isPendingBlockInput(block) ? block.blockInput.slot : "unknown"; +} diff --git a/packages/beacon-node/src/sync/unknownBlock.ts b/packages/beacon-node/src/sync/unknownBlock.ts index d0d4f0982c8c..c0d462398f10 100644 --- a/packages/beacon-node/src/sync/unknownBlock.ts +++ b/packages/beacon-node/src/sync/unknownBlock.ts @@ -1,47 +1,78 @@ import {ChainForkConfig} from "@lodestar/config"; -import {ForkName, ForkSeq, INTERVALS_PER_SLOT} from "@lodestar/params"; -import {ColumnIndex, Root, RootHex, deneb} from "@lodestar/types"; -import {BlobAndProof} from "@lodestar/types/deneb"; -import {Logger, fromHex, pruneSetToMax, toRootHex} from "@lodestar/utils"; -import {sleep} from "@lodestar/utils"; -import {BlockInput, BlockInputType, CachedData, CachedDataColumns, NullBlockInput} from "../chain/blocks/types.js"; +import {ForkSeq, INTERVALS_PER_SLOT} from "@lodestar/params"; +import {RootHex} from "@lodestar/types"; +import {sleep, Logger, prettyBytes, prettyPrintIndices, pruneSetToMax} from "@lodestar/utils"; +import {isBlockInputBlobs, isBlockInputColumns} from "../chain/blocks/blockInput/blockInput.js"; +import {BlockInputSource, IBlockInput} from "../chain/blocks/blockInput/types.js"; import {BlockError, BlockErrorCode} from "../chain/errors/index.js"; -import {IBeaconChain} from "../chain/index.js"; +import {ChainEvent, ChainEventData, IBeaconChain} from "../chain/index.js"; import {Metrics} from "../metrics/index.js"; -import {INetwork, NetworkEvent, NetworkEventData} from "../network/index.js"; +import {INetwork, NetworkEvent, NetworkEventData, prettyPrintPeerIdStr} from "../network/index.js"; import {PeerSyncMeta} from "../network/peers/peersData.js"; -import {PartialDownload} from "../network/reqresp/beaconBlocksMaybeBlobsByRange.js"; -import { - beaconBlocksMaybeBlobsByRoot, - unavailableBeaconBlobsByRoot, -} from "../network/reqresp/beaconBlocksMaybeBlobsByRoot.js"; -import {byteArrayEquals} from "../util/bytes.js"; -import {CustodyConfig} from "../util/dataColumns.js"; import {PeerIdStr} from "../util/peerId.js"; import {shuffle} from "../util/shuffle.js"; import {sortBy} from "../util/sortBy.js"; -import {Result, wrapError} from "../util/wrapError.js"; +import {wrapError} from "../util/wrapError.js"; import {MAX_CONCURRENT_REQUESTS} from "./constants.js"; -import {PendingBlock, PendingBlockStatus, PendingBlockType} from "./interface.js"; import {SyncOptions} from "./options.js"; +import { + BlockInputSyncCacheItem, + PendingBlockInput, + PendingBlockInputStatus, + PendingBlockType, + getBlockInputSyncCacheItemRootHex, + getBlockInputSyncCacheItemSlot, + isPendingBlockInput, +} from "./types.js"; +import {DownloadByRootError, downloadByRoot} from "./utils/downloadByRoot.js"; import {getAllDescendantBlocks, getDescendantBlocks, getUnknownAndAncestorBlocks} from "./utils/pendingBlocksTree.js"; +import {RequestError} from "@lodestar/reqresp"; const MAX_ATTEMPTS_PER_BLOCK = 5; const MAX_KNOWN_BAD_BLOCKS = 500; const MAX_PENDING_BLOCKS = 100; -export class UnknownBlockSync { +/** + * BlockInputSync is a class that handles ReqResp to find blocks and data related to a specific blockRoot. The + * blockRoot may have been found via object gossip, or the API. Gossip objects that can trigger a search are block, + * blobs, columns, attestations, etc. In the case of blocks and data this is generally during the current slot but + * can also be for items that are received late but are not fully verified and thus not in fork-choice (old blocks on + * an unknown fork). It can also be triggered via an attestation (or sync committee message or any other item that + * gets gossiped) that references a blockRoot that is not in fork-choice. In rare (and realistically should not happen) + * situations it can get triggered via the API when the validator attempts to publish a block, attestation, aggregate + * and proof or a sync committee contribution that has unknown information included (parentRoot for instance). + * + * The goal of the class is to make sure that all information that is necessary for import into fork-choice is pulled + * from peers so that the block and data can be processed, and thus the object that triggered the search can be + * referenced and validated. + * + * The most common case for this search is a set of block/data that comes across gossip for the current slot, during + * normal chain operation, but not everything was received before the gossip cutoff window happens so it is necessary + * to pull remaining data via req/resp so that fork-choice can be updated prior to making an attestation for the + * current slot. + * + * Event sources for old UnknownBlock + * + * - publishBlock + * - gossipHandlers + * - searchUnknownSlotRoot + * = produceSyncCommitteeContribution + * = validateGossipFnRetryUnknownRoot + * * submitPoolAttestationsV2 + * * publishAggregateAndProofsV2 + * = onPendingGossipsubMessage + * * NetworkEvent.pendingGossipsubMessage + * - onGossipsubMessage + */ +export class BlockInputSync { /** * block RootHex -> PendingBlock. To avoid finding same root at the same time */ - private readonly pendingBlocks = new Map(); + private readonly pendingBlocks = new Map(); private readonly knownBadBlocks = new Set(); private readonly proposerBoostSecWindow: number; private readonly maxPendingBlocks; private subscribedToNetworkEvents = false; - - private engineGetBlobsCache = new Map(); - private blockInputsRetryTrackerCache = new Set(); private peerBalancer: UnknownBlockPeerBalancer; constructor( @@ -54,41 +85,41 @@ export class UnknownBlockSync { ) { this.maxPendingBlocks = opts?.maxPendingBlocks ?? MAX_PENDING_BLOCKS; this.proposerBoostSecWindow = this.config.SECONDS_PER_SLOT / INTERVALS_PER_SLOT; - this.peerBalancer = new UnknownBlockPeerBalancer(this.network.custodyConfig); + this.peerBalancer = new UnknownBlockPeerBalancer(); if (metrics) { - metrics.syncUnknownBlock.pendingBlocks.addCollect(() => { - metrics.syncUnknownBlock.pendingBlocks.set(this.pendingBlocks.size); - metrics.syncUnknownBlock.knownBadBlocks.set(this.knownBadBlocks.size); - metrics.syncUnknownBlock.peerBalancer.peersMetaCount.set(this.peerBalancer.peersMeta.size); - metrics.syncUnknownBlock.peerBalancer.peersActiveRequestCount.set(this.peerBalancer.activeRequests.size); - metrics.syncUnknownBlock.peerBalancer.totalActiveRequests.set(this.peerBalancer.getTotalActiveRequests()); - }); + metrics.blockInputSync.pendingBlocks.addCollect(() => + metrics.blockInputSync.pendingBlocks.set(this.pendingBlocks.size) + ); + metrics.blockInputSync.knownBadBlocks.addCollect(() => + metrics.blockInputSync.knownBadBlocks.set(this.knownBadBlocks.size) + ); } } subscribeToNetwork(): void { - if (!this.opts?.disableUnknownBlockSync) { - // cannot chain to the above if or the log will be incorrect - if (!this.subscribedToNetworkEvents) { - this.logger.verbose("UnknownBlockSync enabled."); - this.network.events.on(NetworkEvent.unknownBlock, this.onUnknownBlock); - this.network.events.on(NetworkEvent.unknownBlockInput, this.onUnknownBlockInput); - this.network.events.on(NetworkEvent.unknownBlockParent, this.onUnknownParent); - this.network.events.on(NetworkEvent.peerConnected, this.onPeerConnected); - this.network.events.on(NetworkEvent.peerDisconnected, this.onPeerDisconnected); - this.subscribedToNetworkEvents = true; - } - } else { - this.logger.verbose("UnknownBlockSync disabled by disableUnknownBlockSync option."); + if (this.opts?.disableBlockInputSync) { + this.logger.verbose("BlockInputSync disabled by disableBlockInputSync option."); + return; + } + + // cannot chain to the above if or the log will be incorrect + if (!this.subscribedToNetworkEvents) { + this.logger.verbose("BlockInputSync enabled."); + this.chain.emitter.on(ChainEvent.unknownBlockRoot, this.onUnknownBlockRoot); + this.chain.emitter.on(ChainEvent.incompleteBlockInput, this.onIncompleteBlockInput); + this.chain.emitter.on(ChainEvent.unknownParent, this.onUnknownParent); + this.network.events.on(NetworkEvent.peerConnected, this.onPeerConnected); + this.network.events.on(NetworkEvent.peerDisconnected, this.onPeerDisconnected); + this.subscribedToNetworkEvents = true; } } unsubscribeFromNetwork(): void { - this.logger.verbose("UnknownBlockSync disabled."); - this.network.events.off(NetworkEvent.unknownBlock, this.onUnknownBlock); - this.network.events.off(NetworkEvent.unknownBlockInput, this.onUnknownBlockInput); - this.network.events.off(NetworkEvent.unknownBlockParent, this.onUnknownParent); + this.logger.verbose("BlockInputSync disabled."); + this.chain.emitter.off(ChainEvent.unknownBlockRoot, this.onUnknownBlockRoot); + this.chain.emitter.off(ChainEvent.incompleteBlockInput, this.onIncompleteBlockInput); + this.chain.emitter.off(ChainEvent.unknownParent, this.onUnknownParent); this.network.events.off(NetworkEvent.peerConnected, this.onPeerConnected); this.network.events.off(NetworkEvent.peerDisconnected, this.onPeerDisconnected); this.subscribedToNetworkEvents = false; @@ -96,7 +127,6 @@ export class UnknownBlockSync { close(): void { this.unsubscribeFromNetwork(); - // add more in the future if needed } isSubscribedToNetwork(): boolean { @@ -106,147 +136,104 @@ export class UnknownBlockSync { /** * Process an unknownBlock event and register the block in `pendingBlocks` Map. */ - private onUnknownBlock = (data: NetworkEventData[NetworkEvent.unknownBlock]): void => { + private onUnknownBlockRoot = (data: ChainEventData[ChainEvent.unknownBlockRoot]): void => { try { - const unknownBlockType = this.addUnknownBlock(data.rootHex, data.peer); + this.addByRootHex(data.rootHex, data.peer); this.triggerUnknownBlockSearch(); - this.metrics?.syncUnknownBlock.requests.inc({type: unknownBlockType}); + this.metrics?.blockInputSync.requests.inc({type: PendingBlockType.UNKNOWN_BLOCK_ROOT}); + this.metrics?.blockInputSync.source.inc({source: data.source}); } catch (e) { - this.logger.debug("Error handling unknownBlock event", {}, e as Error); + this.logger.debug("Error handling unknownBlockRoot event", {}, e as Error); } }; /** * Process an unknownBlockInput event and register the block in `pendingBlocks` Map. */ - private onUnknownBlockInput = (data: NetworkEventData[NetworkEvent.unknownBlockInput]): void => { + private onIncompleteBlockInput = (data: ChainEventData[ChainEvent.incompleteBlockInput]): void => { try { - const unknownBlockType = this.addUnknownBlock(data.blockInput, data.peer); + this.addByBlockInput(data.blockInput, data.peer); this.triggerUnknownBlockSearch(); - this.metrics?.syncUnknownBlock.requests.inc({type: unknownBlockType}); + this.metrics?.blockInputSync.requests.inc({type: PendingBlockType.INCOMPLETE_BLOCK_INPUT}); + this.metrics?.blockInputSync.source.inc({source: data.source}); } catch (e) { - this.logger.debug("Error handling unknownBlockInput event", {}, e as Error); + this.logger.debug("Error handling incompleteBlockInput event", {}, e as Error); } }; /** * Process an unknownBlockParent event and register the block in `pendingBlocks` Map. */ - private onUnknownParent = (data: NetworkEventData[NetworkEvent.unknownBlockParent]): void => { + private onUnknownParent = (data: ChainEventData[ChainEvent.unknownParent]): void => { try { - this.addUnknownParent(data.blockInput, data.peer); + this.addByRootHex(data.blockInput.parentRootHex, data.peer); + this.addByBlockInput(data.blockInput, data.peer); this.triggerUnknownBlockSearch(); - this.metrics?.syncUnknownBlock.requests.inc({type: PendingBlockType.UNKNOWN_PARENT}); + this.metrics?.blockInputSync.requests.inc({type: PendingBlockType.UNKNOWN_PARENT}); + this.metrics?.blockInputSync.source.inc({source: data.source}); } catch (e) { - this.logger.debug("Error handling unknownBlockParent event", {}, e as Error); + this.logger.debug("Error handling unknownParent event", {}, e as Error); } }; - /** - * When a blockInput comes with an unknown parent: - * - add the block to pendingBlocks with status downloaded or pending blockRootHex as key. This is similar to - * an `onUnknownBlock` event, but the blocks is downloaded. - * - add the parent root to pendingBlocks with status pending, parentBlockRootHex as key. This is - * the same to an `onUnknownBlock` event with parentBlockRootHex as root. - */ - private addUnknownParent(blockInput: BlockInput, peerIdStr: string): void { - const block = blockInput.block.message; - const blockRoot = this.config.getForkTypes(block.slot).BeaconBlock.hashTreeRoot(block); - const blockRootHex = toRootHex(blockRoot); - const parentBlockRootHex = toRootHex(block.parentRoot); - - // add 1 pending block with status downloaded - let pendingBlock = this.pendingBlocks.get(blockRootHex); + private addByRootHex = (rootHex: RootHex, peerIdStr?: PeerIdStr): void => { + let pendingBlock = this.pendingBlocks.get(rootHex); if (!pendingBlock) { - pendingBlock = - blockInput.type === BlockInputType.dataPromise - ? { - unknownBlockType: PendingBlockType.UNKNOWN_DATA, - blockRootHex, - // this will be set after we download block - parentBlockRootHex: null, - blockInput, - peerIdStrs: new Set(), - status: PendingBlockStatus.pending, - downloadAttempts: 0, - } - : { - blockRootHex, - parentBlockRootHex, - blockInput, - peerIdStrs: new Set(), - status: PendingBlockStatus.downloaded, - downloadAttempts: 0, - }; - this.pendingBlocks.set(blockRootHex, pendingBlock); - this.logger.verbose("Added unknown block parent to pendingBlocks", { - root: blockRootHex, - parent: parentBlockRootHex, + pendingBlock = { + status: PendingBlockInputStatus.pending, + rootHex: rootHex, + peerIdStrings: new Set(), + timeAddedSec: Date.now() / 1000, + }; + this.pendingBlocks.set(rootHex, pendingBlock); + + this.logger.verbose("Added new rootHex to BlockInputSync.pendingBlocks", { + rootHex: prettyBytes(pendingBlock.rootHex), + peerIdStr: peerIdStr ?? "unknown peer", }); } - pendingBlock.peerIdStrs.add(peerIdStr); - // add 1 pending block with status pending - this.addUnknownBlock(parentBlockRootHex, peerIdStr); - } + if (peerIdStr) { + pendingBlock.peerIdStrings.add(peerIdStr); + } - private addUnknownBlock( - blockInputOrRootHex: RootHex | BlockInput | NullBlockInput, - peerIdStr?: string - ): Exclude { - let blockRootHex: RootHex; - let blockInput: BlockInput | NullBlockInput | null; - let unknownBlockType: Exclude; - - if (typeof blockInputOrRootHex === "string") { - blockRootHex = blockInputOrRootHex; - blockInput = null; - unknownBlockType = PendingBlockType.UNKNOWN_BLOCK; - } else { - if (blockInputOrRootHex.block !== null) { - const {block} = blockInputOrRootHex; - blockRootHex = toRootHex(this.config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message)); - unknownBlockType = PendingBlockType.UNKNOWN_DATA; - } else { - unknownBlockType = PendingBlockType.UNKNOWN_BLOCKINPUT; - blockRootHex = blockInputOrRootHex.blockRootHex; - } - blockInput = blockInputOrRootHex; + // TODO: check this prune methodology + // Limit pending blocks to prevent DOS attacks that cause OOM + const prunedItemCount = pruneSetToMax(this.pendingBlocks, this.maxPendingBlocks); + if (prunedItemCount > 0) { + this.logger.verbose(`Pruned ${prunedItemCount} items from BlockInputSync.pendingBlocks`); } + }; - let pendingBlock = this.pendingBlocks.get(blockRootHex); - if (!pendingBlock) { + private addByBlockInput = (blockInput: IBlockInput, peerIdStr?: string): void => { + let pendingBlock = this.pendingBlocks.get(blockInput.blockRootHex); + // if entry is missing or was added via rootHex and now we have more complete information overwrite + // the existing information with the more complete cache entry + if (!pendingBlock || !isPendingBlockInput(pendingBlock)) { pendingBlock = { - unknownBlockType, - blockRootHex, - // this will be set after we download block - parentBlockRootHex: null, + // can be added via unknown parent and we may already have full block input. need to check and set correctly + // so we pull the data if its missing or handle the block correctly in getIncompleteAndAncestorBlocks + status: blockInput.hasBlockAndAllData() ? PendingBlockInputStatus.downloaded : PendingBlockInputStatus.pending, blockInput, - peerIdStrs: new Set(), - status: PendingBlockStatus.pending, - downloadAttempts: 0, - } as PendingBlock; - this.pendingBlocks.set(blockRootHex, pendingBlock); - - this.logger.verbose("Added unknown block to pendingBlocks", { - unknownBlockType, - root: blockRootHex, - slot: blockInput?.block?.message.slot ?? "unknown", - }); + peerIdStrings: new Set(), + timeAddedSec: Date.now() / 1000, + }; + this.pendingBlocks.set(blockInput.blockRootHex, pendingBlock); + + this.logger.verbose("Added blockInput to BlockInputSync.pendingBlocks", pendingBlock.blockInput.getLogMeta()); } if (peerIdStr) { - pendingBlock.peerIdStrs.add(peerIdStr); + pendingBlock.peerIdStrings.add(peerIdStr); } + // TODO: check this prune methodology // Limit pending blocks to prevent DOS attacks that cause OOM const prunedItemCount = pruneSetToMax(this.pendingBlocks, this.maxPendingBlocks); if (prunedItemCount > 0) { - this.logger.warn(`Pruned ${prunedItemCount} pending blocks from UnknownBlockSync`); + this.logger.verbose(`Pruned ${prunedItemCount} items from BlockInputSync.pendingBlocks`); } - - return unknownBlockType; - } + }; private onPeerConnected = (data: NetworkEventData[NetworkEvent.peerConnected]): void => { try { @@ -288,7 +275,7 @@ export class UnknownBlockSync { for (const block of ancestors) { // when this happens, it's likely the block and parent block are processed by head sync - if (this.chain.forkChoice.hasBlockHex(block.parentBlockRootHex)) { + if (this.chain.forkChoice.hasBlockHex(block.blockInput.parentRootHex)) { processedBlocks++; this.processBlock(block).catch((e) => { this.logger.debug("Unexpected error - process old downloaded block", {}, e); @@ -307,72 +294,50 @@ export class UnknownBlockSync { // most of the time there is exactly 1 unknown block for (const block of unknowns) { this.downloadBlock(block).catch((e) => { - this.logger.debug("Unexpected error - downloadBlock", {root: block.blockRootHex}, e); + this.logger.debug("Unexpected error - downloadBlock", {root: getBlockInputSyncCacheItemRootHex(block)}, e); }); } }; - private async downloadBlock(block: PendingBlock): Promise { - if (block.status !== PendingBlockStatus.pending) { + private async downloadBlock(block: BlockInputSyncCacheItem): Promise { + if (block.status !== PendingBlockInputStatus.pending) { return; } - const unknownBlockType = block.unknownBlockType; + const rootHex = getBlockInputSyncCacheItemRootHex(block); const logCtx = { - root: block.blockRootHex, + slot: getBlockInputSyncCacheItemSlot(block), + blockRoot: rootHex, pendingBlocks: this.pendingBlocks.size, - slot: block.blockInput?.block?.message.slot ?? "unknown", - unknownBlockType, }; - this.logger.verbose("Downloading unknown block", logCtx); - - block.status = PendingBlockStatus.fetching; + this.logger.verbose("BlockInputSync.downloadBlock()", logCtx); - let res: Result<{blockInput: BlockInput; peerIdStr: string}>; - if (block.blockInput === null) { - // we only have block root, and nothing else - res = await wrapError(this.fetchUnknownBlockRoot(fromHex(block.blockRootHex))); - } else { - res = await wrapError(this.fetchUnavailableBlockInput(block.blockInput)); - } + block.status = PendingBlockInputStatus.fetching; - if (res.err) this.metrics?.syncUnknownBlock.downloadedBlocksError.inc(); - else this.metrics?.syncUnknownBlock.downloadedBlocksSuccess.inc(); + const res = await wrapError(this.fetchBlockInput(block)); if (!res.err) { - const {blockInput, peerIdStr} = res.result; - // fetchUnknownBlockRoot and fetchUnavailableBlockInput should return available data BlockInput, throw error if not - if (blockInput.type === BlockInputType.dataPromise) { - // if there were any peers who would have had the missing datacolumns, it would have resulted in err - throw Error(`Expected BlockInput to be available, got dataPromise for ${block.blockRootHex}`); - } - - block = { - ...block, - status: PendingBlockStatus.downloaded, - blockInput, - parentBlockRootHex: toRootHex(blockInput.block.message.parentRoot), - }; - this.pendingBlocks.set(block.blockRootHex, block); - const blockSlot = blockInput.block.message.slot; + this.metrics?.blockInputSync.downloadedBlocksSuccess.inc(); + const pending = res.result; + this.pendingBlocks.set(pending.blockInput.blockRootHex, pending); + const blockSlot = pending.blockInput.slot; const finalizedSlot = this.chain.forkChoice.getFinalizedBlock().slot; const delaySec = Date.now() / 1000 - (this.chain.genesisTime + blockSlot * this.config.SECONDS_PER_SLOT); - this.metrics?.syncUnknownBlock.elapsedTimeTillReceived.observe(delaySec); + this.metrics?.blockInputSync.elapsedTimeTillReceived.observe(delaySec); - const parentInForkchoice = this.chain.forkChoice.hasBlock(blockInput.block.message.parentRoot); - this.logger.verbose("Downloaded unknown block", { - root: block.blockRootHex, - pendingBlocks: this.pendingBlocks.size, - parentInForkchoice, - blockInputType: blockInput.type, - unknownBlockType, - }); + const parentInForkChoice = this.chain.forkChoice.hasBlockHex(pending.blockInput.parentRootHex); + const logCtx2 = { + ...logCtx, + slot: blockSlot, + parentInForkChoice, + }; + this.logger.verbose("Downloaded unknown block", logCtx2); - if (parentInForkchoice) { + if (parentInForkChoice) { // Bingo! Process block. Add to pending blocks anyway for recycle the cache that prevents duplicate processing - this.processBlock(block).catch((e) => { - this.logger.debug("Unexpected error - process newly downloaded block", {}, e); + this.processBlock(pending).catch((e) => { + this.logger.debug("Unexpected error - process newly downloaded block", logCtx2, e); }); } else if (blockSlot <= finalizedSlot) { // the common ancestor of the downloading chain and canonical chain should be at least the finalized slot and @@ -380,31 +345,18 @@ export class UnknownBlockSync { // 0 - 1 - ... - n - finalizedSlot // \ // parent 1 - parent 2 - ... - unknownParent block - const blockRoot = this.config.getForkTypes(blockSlot).BeaconBlock.hashTreeRoot(blockInput.block.message); this.logger.debug("Downloaded block is before finalized slot", { + ...logCtx2, finalizedSlot, - blockSlot, - parentRoot: toRootHex(blockRoot), - unknownBlockType, }); - this.removeAndDownscoreAllDescendants(block); + this.removeAndDownScoreAllDescendants(block); } else { - this.onUnknownParent({blockInput, peer: peerIdStr}); + this.onUnknownBlockRoot({rootHex: pending.blockInput.parentRootHex, source: BlockInputSource.byRoot}); } } else { - // block download has error, this allows to retry the download of the block - block.status = PendingBlockStatus.pending; - // parentSlot > finalizedSlot, continue downloading parent of parent - block.downloadAttempts++; - const errorData = {root: block.blockRootHex, attempts: block.downloadAttempts, unknownBlockType}; - if (block.downloadAttempts > MAX_ATTEMPTS_PER_BLOCK) { - // Give up on this block and assume it does not exist, penalizing all peers as if it was a bad block - this.logger.debug("Ignoring unknown block root after many failed downloads", errorData, res.err); - this.removeAndDownscoreAllDescendants(block); - } else { - // Try again when a new peer connects, its status changes, or a new unknownBlockParent event happens - this.logger.debug("Error downloading unknown block root", errorData, res.err); - } + this.metrics?.blockInputSync.downloadedBlocksError.inc(); + this.logger.debug("Ignoring unknown block root after many failed downloads", logCtx, res.err); + this.removeAndDownScoreAllDescendants(block); } } @@ -413,14 +365,16 @@ export class UnknownBlockSync { * On error, remove and downscore all descendants. * This function could run recursively for all descendant blocks */ - private async processBlock(pendingBlock: PendingBlock): Promise { + private async processBlock(pendingBlock: PendingBlockInput): Promise { // pending block status is `downloaded` right after `downloadBlock` // but could be `pending` if added by `onUnknownBlockParent` event and this function is called recursively - if (pendingBlock.status !== PendingBlockStatus.downloaded) { - if (pendingBlock.status === PendingBlockStatus.pending) { + if (pendingBlock.status !== PendingBlockInputStatus.downloaded) { + if (pendingBlock.status === PendingBlockInputStatus.pending) { const connectedPeers = this.network.getConnectedPeers(); if (connectedPeers.length === 0) { - this.logger.debug("No connected peers, skipping download block", {blockRoot: pendingBlock.blockRootHex}); + this.logger.debug("No connected peers, skipping download block", { + blockRoot: pendingBlock.blockInput.blockRootHex, + }); return; } // if the download is a success we'll call `processBlock()` for this block @@ -429,22 +383,19 @@ export class UnknownBlockSync { return; } - pendingBlock.status = PendingBlockStatus.processing; + pendingBlock.status = PendingBlockInputStatus.processing; // this prevents unbundling attack // see https://lighthouse-blog.sigmaprime.io/mev-unbundling-rpc.html - const {slot: blockSlot, proposerIndex} = pendingBlock.blockInput.block.message; + const {slot: blockSlot, proposerIndex} = pendingBlock.blockInput.getBlock().message; if ( this.chain.clock.secFromSlot(blockSlot) < this.proposerBoostSecWindow && this.chain.seenBlockProposers.isKnown(blockSlot, proposerIndex) ) { // proposer is known by a gossip block already, wait a bit to make sure this block is not // eligible for proposer boost to prevent unbundling attack - const blockRoot = this.config - .getForkTypes(blockSlot) - .BeaconBlock.hashTreeRoot(pendingBlock.blockInput.block.message); this.logger.verbose("Avoid proposer boost for this block of known proposer", { blockSlot, - blockRoot: toRootHex(blockRoot), + blockRoot: prettyBytes(pendingBlock.blockInput.blockRootHex), proposerIndex, }); await sleep(this.proposerBoostSecWindow * 1000); @@ -463,25 +414,29 @@ export class UnknownBlockSync { ignoreIfFinalized: true, blsVerifyOnMainThread: true, // block is validated with correct root, we want to process it as soon as possible - eagerPersistBlock: true, + // however, due to other optimizations, we don't eagerly persist the block + eagerPersistBlock: false, }) ); - if (res.err) this.metrics?.syncUnknownBlock.processedBlocksError.inc(); - else this.metrics?.syncUnknownBlock.processedBlocksSuccess.inc(); + if (res.err) this.metrics?.blockInputSync.processedBlocksError.inc(); + else this.metrics?.blockInputSync.processedBlocksSuccess.inc(); if (!res.err) { // no need to update status to "processed", delete anyway - this.pendingBlocks.delete(pendingBlock.blockRootHex); + this.pendingBlocks.delete(pendingBlock.blockInput.blockRootHex); + this.chain.seenBlockInputCache.prune(pendingBlock.blockInput.blockRootHex); // Send child blocks to the processor - for (const descendantBlock of getDescendantBlocks(pendingBlock.blockRootHex, this.pendingBlocks)) { - this.processBlock(descendantBlock).catch((e) => { - this.logger.debug("Unexpected error - process descendant block", {}, e); - }); + for (const descendantBlock of getDescendantBlocks(pendingBlock.blockInput.blockRootHex, this.pendingBlocks)) { + if (isPendingBlockInput(descendantBlock)) { + this.processBlock(descendantBlock).catch((e) => { + this.logger.debug("Unexpected error - process descendant block", {}, e); + }); + } } } else { - const errorData = {root: pendingBlock.blockRootHex, slot: pendingBlock.blockInput.block.message.slot}; + const errorData = {root: pendingBlock.blockInput.blockRootHex, slot: pendingBlock.blockInput.slot}; if (res.err instanceof BlockError) { switch (res.err.type.code) { // This cases are already handled with `{ignoreIfKnown: true}` @@ -492,7 +447,7 @@ export class UnknownBlockSync { case BlockErrorCode.PRESTATE_MISSING: // Should not happen, mark as downloaded to try again latter this.logger.debug("Attempted to process block but its parent was still unknown", errorData, res.err); - pendingBlock.status = PendingBlockStatus.downloaded; + pendingBlock.status = PendingBlockInputStatus.downloaded; break; case BlockErrorCode.EXECUTION_ENGINE_ERROR: @@ -504,14 +459,14 @@ export class UnknownBlockSync { default: // Block is not correct with respect to our chain. Log error loudly this.logger.debug("Error processing block from unknown parent sync", errorData, res.err); - this.removeAndDownscoreAllDescendants(pendingBlock); + this.removeAndDownScoreAllDescendants(pendingBlock); } } // Probably a queue error or something unwanted happened, mark as pending to try again latter else { this.logger.debug("Unknown error processing block from unknown block sync", errorData, res.err); - pendingBlock.status = PendingBlockStatus.downloaded; + pendingBlock.status = PendingBlockInputStatus.downloaded; } } } @@ -525,188 +480,104 @@ export class UnknownBlockSync { * prefulu, will attempt a max of `MAX_ATTEMPTS_PER_BLOCK` on different peers, postfulu we may attempt more as defined in `getMaxDownloadAttempts()` function * Also verifies the received block root + returns the peer that provided the block for future downscoring. */ - private async fetchUnknownBlockRoot(blockRoot: Root): Promise<{blockInput: BlockInput; peerIdStr: string}> { - const blockRootHex = toRootHex(blockRoot); - + private async fetchBlockInput(cacheItem: BlockInputSyncCacheItem): Promise { + const rootHex = getBlockInputSyncCacheItemRootHex(cacheItem); const excludedPeers = new Set(); - let partialDownload: PartialDownload | null = null; const defaultPendingColumns = this.config.getForkSeq(this.chain.clock.currentSlot) >= ForkSeq.fulu - ? new Set(this.network.custodyConfig.sampleGroups) + ? new Set(this.network.custodyConfig.sampledColumns) : null; - let lastError: Error | null = null; + let i = 0; while (i++ < this.getMaxDownloadAttempts()) { - // pendingDataColumns is null prefulu - const peer = this.peerBalancer.bestPeerForPendingColumns( - partialDownload ? new Set(partialDownload.pendingDataColumns) : defaultPendingColumns, - excludedPeers - ); - if (peer === null) { + const pendingColumns = + isPendingBlockInput(cacheItem) && isBlockInputColumns(cacheItem.blockInput) + ? new Set(cacheItem.blockInput.getMissingSampledColumnMeta().missing) + : defaultPendingColumns; + // pendingDataColumns is null pre-fulu + const peerMeta = this.peerBalancer.bestPeerForPendingColumns(pendingColumns, excludedPeers); + if (peerMeta === null) { // no more peer with needed columns to try, throw error - throw Error( - `Error fetching UnknownBlockRoot after ${i}: cannot find peer with needed columns ${partialDownload?.pendingDataColumns.join(", ")}` - ); + let message = `Error fetching UnknownBlockRoot after ${i}: cannot find peer`; + if (pendingColumns) { + message += ` with needed columns=${prettyPrintIndices(Array.from(pendingColumns))}`; + } + throw Error(message); } - const {peerId, client: peerClient} = peer; + const {peerId, client: peerClient} = peerMeta; excludedPeers.add(peerId); - try { - const { - blocks: [blockInput], - pendingDataColumns, - } = await beaconBlocksMaybeBlobsByRoot( - this.config, - this.network, - peerId, - [blockRoot], - partialDownload, - peerClient, - this.metrics, - this.logger - ); - - // Peer does not have the block, try with next peer - if (blockInput === undefined) { - continue; - } - - if (pendingDataColumns !== null) { - partialDownload = {blocks: [blockInput], pendingDataColumns}; - continue; - } + cacheItem.peerIdStrings.add(peerId); - // data is available, verify block root is correct - const block = blockInput.block.message; - const receivedBlockRoot = this.config.getForkTypes(block.slot).BeaconBlock.hashTreeRoot(block); - if (!byteArrayEquals(receivedBlockRoot, blockRoot)) { - throw Error(`Wrong block received by peer, got ${toRootHex(receivedBlockRoot)} expected ${blockRootHex}`); + try { + const downloadResult = await downloadByRoot({ + config: this.config, + network: this.network, + seenCache: this.chain.seenBlockInputCache, + peerMeta, + cacheItem, + }); + cacheItem = downloadResult.result; + const logCtx = {slot: cacheItem.blockInput.slot, rootHex, peerId, peerClient}; + this.logger.verbose("BlockInputSync.fetchBlockInput: successful download", logCtx); + this.metrics?.blockInputSync.downloadByRoot.success.inc(); + const warnings = downloadResult.warnings; + if (warnings) { + for (const warning of warnings) { + this.logger.debug("BlockInputSync.fetchBlockInput: downloaded with warning", logCtx, warning); + this.metrics?.blockInputSync.downloadByRoot.warn.inc({code: warning.type.code, client: peerClient}); + } + // TODO: penalize peer? } - - return {blockInput, peerIdStr: peerId}; } catch (e) { - this.logger.debug("Error fetching UnknownBlockRoot", {attempt: i, blockRootHex, peer: peerId}, e as Error); - lastError = e as Error; + this.logger.debug( + "Error downloading in BlockInputSync.fetchBlockInput", + {attempt: i, rootHex, peer: peerId, peerClient}, + e as Error + ); + const downloadByRootMetrics = this.metrics?.blockInputSync.downloadByRoot; + // TODO: penalize peer? + if (e instanceof DownloadByRootError) { + const errorCode = e.type.code; + downloadByRootMetrics?.error.inc({code: errorCode, client: peerClient}); + } else if (e instanceof RequestError) { + // should look into req_resp metrics in this case + downloadByRootMetrics?.error.inc({code: "req_resp", client: peerClient}); + } else { + // investigate if this happens + downloadByRootMetrics?.error.inc({code: "unknown", client: peerClient}); + } } finally { this.peerBalancer.onRequestCompleted(peerId); } - } - if (lastError) { - lastError.message = `Error fetching UnknownBlockRoot after ${i} attempts: ${lastError.message}`; - throw lastError; - } + this.pendingBlocks.set(getBlockInputSyncCacheItemRootHex(cacheItem), cacheItem); - throw Error( - `Error fetching UnknownBlockRoot after ${i}: cannot download all blobs or data columns for block ${blockRootHex}` - ); - } - - /** - * We have partial block input: - * - we have block but not have all blobs (deneb) or needed columns (fulu) - * - we don't have block and have some blobs (deneb) or some columns (fulu) - * Fetches missing block/data columns/block for the blockinput. This function returns either preData or availableData BlockInput. - */ - private async fetchUnavailableBlockInput( - unavailableBlockInput: BlockInput | NullBlockInput - ): Promise<{blockInput: BlockInput; peerIdStr: string}> { - if (unavailableBlockInput.block !== null && unavailableBlockInput.type !== BlockInputType.dataPromise) { - return {blockInput: unavailableBlockInput, peerIdStr: ""}; - } - - let blockRootHex: RootHex; - let blobKzgCommitmentsLen: number | undefined; - let blockRoot: Uint8Array; - const dataMeta: Record = {}; - let sampledColumns: ColumnIndex[] = []; - - if (unavailableBlockInput.block === null) { - blockRootHex = unavailableBlockInput.blockRootHex; - blockRoot = fromHex(blockRootHex); - } else { - const {cachedData, block: unavailableBlock} = unavailableBlockInput; - blockRoot = this.config - .getForkTypes(unavailableBlock.message.slot) - .BeaconBlock.hashTreeRoot(unavailableBlock.message); - blockRootHex = toRootHex(blockRoot); - blobKzgCommitmentsLen = (unavailableBlock.message.body as deneb.BeaconBlockBody).blobKzgCommitments.length; - - if (cachedData.fork === ForkName.deneb || cachedData.fork === ForkName.electra) { - const pendingBlobs = blobKzgCommitmentsLen - cachedData.blobsCache.size; - Object.assign(dataMeta, {pendingBlobs}); - } else if (cachedData.fork === ForkName.fulu) { - sampledColumns = this.network.custodyConfig.sampledColumns; - const pendingColumns = sampledColumns.length - (cachedData as CachedDataColumns).dataColumnsCache.size; - Object.assign(dataMeta, {pendingColumns}); + if (cacheItem.status === PendingBlockInputStatus.downloaded) { + return cacheItem; } } - let lastError: Error | null = null; - let i = 0; - const excludedPeers = new Set(); - while (i++ < this.getMaxDownloadAttempts()) { - const bestPeer = this.peerBalancer.bestPeerForBlockInput(unavailableBlockInput, excludedPeers); - if (bestPeer === null) { - // no more peer to try, throw error - throw Error( - `Error fetching UnavailableBlockInput after ${i}: cannot find peer with needed columns ${sampledColumns.join(", ")}` - ); - } - const {peerId, client: peerClient} = bestPeer; - excludedPeers.add(peerId); - - try { - const blockInput = await unavailableBeaconBlobsByRoot( - this.config, - this.network, - peerId, - peerClient, - unavailableBlockInput, - { - metrics: this.metrics, - logger: this.logger, - executionEngine: this.chain.executionEngine, - emitter: this.chain.emitter, - blockInputsRetryTrackerCache: this.blockInputsRetryTrackerCache, - engineGetBlobsCache: this.engineGetBlobsCache, - } - ); - - if (unavailableBlockInput.block !== null && blockInput.type === BlockInputType.dataPromise) { - // all datacolumns were not downloaded we can continue with other peers - // as unavailableBlockInput.block's dataColumnsCache would be updated - continue; - } - - // data is available, verify block root is correct - const block = blockInput.block.message; - const receivedBlockRoot = this.config.getForkTypes(block.slot).BeaconBlock.hashTreeRoot(block); - - if (!byteArrayEquals(receivedBlockRoot, blockRoot)) { - throw Error(`Wrong block received by peer, got ${toRootHex(receivedBlockRoot)} expected ${blockRootHex}`); + let message = `Error fetching BlockInput with blockRoot=${prettyBytes(rootHex)} after ${i} attempts.`; + if (!isPendingBlockInput(cacheItem)) { + message += " No block and no data was found"; + } else { + if (!cacheItem.blockInput.hasBlock()) { + message += " Block was not found."; + } else if (isBlockInputBlobs(cacheItem.blockInput)) { + const missing = cacheItem.blockInput.getMissingBlobMeta().map((b) => b.index); + if (missing.length) { + message += ` Missing blob indices=${prettyPrintIndices(missing)}`; } - if (unavailableBlockInput.block === null) { - this.logger.debug("Fetched NullBlockInput", {attempts: i, blockRootHex}); - } else { - this.logger.debug("Fetched UnavailableBlockInput", {attempts: i, ...dataMeta, blobKzgCommitmentsLen}); + } else if (isBlockInputColumns(cacheItem.blockInput)) { + const missing = cacheItem.blockInput.getMissingSampledColumnMeta().missing; + if (missing.length) { + message += ` Missing column indices=${prettyPrintIndices(missing)}`; } - - return {blockInput, peerIdStr: peerId}; - } catch (e) { - this.logger.debug("Error fetching UnavailableBlockInput", {attempt: i, blockRootHex, peer: peerId}, e as Error); - lastError = e as Error; - } finally { - this.peerBalancer.onRequestCompleted(peerId); } } - if (lastError) { - lastError.message = `Error fetching UnavailableBlockInput after ${i} attempts: ${lastError.message}`; - throw lastError; - } - - throw Error(`Error fetching UnavailableBlockInput after ${i}: unknown error`); + throw Error(message); } /** @@ -715,21 +586,26 @@ export class UnknownBlockSync { * Downscore all peers that have referenced any of this bad blocks. May report peers multiple times if they have * referenced more than one bad block. */ - private removeAndDownscoreAllDescendants(block: PendingBlock): void { + private removeAndDownScoreAllDescendants(block: BlockInputSyncCacheItem): void { // Get all blocks that are a descendant of this one const badPendingBlocks = this.removeAllDescendants(block); // just console log and do not penalize on pending/bad blocks for debugging // console.log("removeAndDownscoreAllDescendants", {block}); for (const block of badPendingBlocks) { + // + // TODO(fulu): why is this commented out here? + // // this.knownBadBlocks.add(block.blockRootHex); // for (const peerIdStr of block.peerIdStrs) { // // TODO: Refactor peerRpcScores to work with peerIdStr only // this.network.reportPeer(peerIdStr, PeerAction.LowToleranceError, "BadBlockByRoot"); // } this.logger.debug("ignored Banning unknown block", { - root: block.blockRootHex, - peerIdStrs: Array.from(block.peerIdStrs).join(","), + root: getBlockInputSyncCacheItemRootHex(block), + peerIdStrings: Array.from(block.peerIdStrings) + .map((id) => prettyPrintPeerIdStr(id)) + .join(","), }); } @@ -737,16 +613,19 @@ export class UnknownBlockSync { pruneSetToMax(this.knownBadBlocks, MAX_KNOWN_BAD_BLOCKS); } - private removeAllDescendants(block: PendingBlock): PendingBlock[] { + private removeAllDescendants(block: BlockInputSyncCacheItem): BlockInputSyncCacheItem[] { + const rootHex = getBlockInputSyncCacheItemRootHex(block); // Get all blocks that are a descendant of this one - const badPendingBlocks = [block, ...getAllDescendantBlocks(block.blockRootHex, this.pendingBlocks)]; + const badPendingBlocks = [block, ...getAllDescendantBlocks(rootHex, this.pendingBlocks)]; - this.metrics?.syncUnknownBlock.removedBlocks.inc(badPendingBlocks.length); + this.metrics?.blockInputSync.removedBlocks.inc(badPendingBlocks.length); for (const block of badPendingBlocks) { - this.pendingBlocks.delete(block.blockRootHex); - this.logger.debug("Removing unknown parent block", { - root: block.blockRootHex, + const rootHex = getBlockInputSyncCacheItemRootHex(block); + this.pendingBlocks.delete(rootHex); + this.chain.seenBlockInputCache.prune(rootHex); + this.logger.debug("Removing bad/unknown/incomplete BlockInputSyncCacheItem", { + blockRoot: rootHex, }); } @@ -773,12 +652,10 @@ export class UnknownBlockSync { export class UnknownBlockPeerBalancer { readonly peersMeta: Map; readonly activeRequests: Map; - private readonly custodyConfig: CustodyConfig; - constructor(custodyConfig: CustodyConfig) { + constructor() { this.peersMeta = new Map(); this.activeRequests = new Map(); - this.custodyConfig = custodyConfig; } /** Trigger on each peer re-status */ @@ -821,41 +698,16 @@ export class UnknownBlockPeerBalancer { * called from fetchUnavailableBlockInput() where we have either BlockInput or NullBlockInput * excludedPeers are the peers that we requested already so we don't want to try again */ - bestPeerForBlockInput( - unavailableBlockInput: BlockInput | NullBlockInput, - excludedPeers: Set - ): PeerSyncMeta | null { - let cachedData: CachedData | undefined = undefined; - if (unavailableBlockInput.block === null) { - // NullBlockInput - cachedData = unavailableBlockInput.cachedData; - } else { - // BlockInput - if (unavailableBlockInput.type !== BlockInputType.dataPromise) { - throw Error( - `bestPeerForBlockInput called with BlockInput type ${unavailableBlockInput.type}, expected dataPromise` - ); - } - cachedData = unavailableBlockInput.cachedData; - } - + bestPeerForBlockInput(blockInput: IBlockInput, excludedPeers: Set): PeerSyncMeta | null { const eligiblePeers: PeerIdStr[] = []; - if (cachedData.fork === ForkName.fulu) { - // cached data is CachedDataColumns - const {dataColumnsCache} = cachedData; - const pendingDataColumns: Set = new Set(); - for (const column of this.custodyConfig.sampledColumns) { - if (!dataColumnsCache.has(column)) { - pendingDataColumns.add(column); - } - } - // there could be no pending column in case of NullBlockInput + if (isBlockInputColumns(blockInput)) { + const pendingDataColumns: Set = new Set(blockInput.getMissingSampledColumnMeta().missing); + // there could be no pending column in case when block is still missing eligiblePeers.push(...this.filterPeers(pendingDataColumns, excludedPeers)); } else { // prefulu - const pendingDataColumns = null; - eligiblePeers.push(...this.filterPeers(pendingDataColumns, excludedPeers)); + eligiblePeers.push(...this.filterPeers(null, excludedPeers)); } if (eligiblePeers.length === 0) { diff --git a/packages/beacon-node/src/sync/utils/downloadByRange.ts b/packages/beacon-node/src/sync/utils/downloadByRange.ts new file mode 100644 index 000000000000..e0fad8f63664 --- /dev/null +++ b/packages/beacon-node/src/sync/utils/downloadByRange.ts @@ -0,0 +1,808 @@ +import {ChainForkConfig} from "@lodestar/config"; +import {ForkPostDeneb, ForkPostFulu} from "@lodestar/params"; +import {SignedBeaconBlock, Slot, deneb, fulu, phase0} from "@lodestar/types"; +import {LodestarError, Logger, fromHex, prettyBytes, prettyPrintIndices, toRootHex} from "@lodestar/utils"; +import { + BlockInputSource, + DAType, + IBlockInput, + isBlockInputBlobs, + isBlockInputColumns, +} from "../../chain/blocks/blockInput/index.js"; +import {SeenBlockInput} from "../../chain/seenCache/seenGossipBlockInput.js"; +import {validateBlockBlobSidecars} from "../../chain/validation/blobSidecar.js"; +import {validateBlockDataColumnSidecars} from "../../chain/validation/dataColumnSidecar.js"; +import {INetwork} from "../../network/index.js"; +import {PeerIdStr} from "../../util/peerId.js"; +import {DownloadByRootErrorCode} from "./downloadByRoot.js"; +import {WarnResult} from "../../util/wrapError.js"; + +export type DownloadByRangeRequests = { + blocksRequest?: phase0.BeaconBlocksByRangeRequest; + blobsRequest?: deneb.BlobSidecarsByRangeRequest; + columnsRequest?: fulu.DataColumnSidecarsByRangeRequest; +}; + +export type DownloadByRangeResponses = { + blocks?: SignedBeaconBlock[]; + blobSidecars?: deneb.BlobSidecars; + columnSidecars?: fulu.DataColumnSidecars; +}; + +export type DownloadAndCacheByRangeProps = DownloadByRangeRequests & { + config: ChainForkConfig; + cache: SeenBlockInput; + network: INetwork; + logger: Logger; + peerIdStr: string; + batchBlocks?: IBlockInput[]; +}; + +export type CacheByRangeResponsesProps = { + cache: SeenBlockInput; + peerIdStr: string; + responses: ValidatedResponses; + batchBlocks: IBlockInput[]; +}; + +export type ValidatedBlock = { + blockRoot: Uint8Array; + block: SignedBeaconBlock; +}; + +export type ValidatedBlobSidecars = { + blockRoot: Uint8Array; + blobSidecars: deneb.BlobSidecars; +}; + +export type ValidatedColumnSidecars = { + blockRoot: Uint8Array; + columnSidecars: fulu.DataColumnSidecars; +}; + +export type ValidatedResponses = { + validatedBlocks?: ValidatedBlock[]; + validatedBlobSidecars?: ValidatedBlobSidecars[]; + validatedColumnSidecars?: ValidatedColumnSidecars[]; +}; + +/** + * Given existing cached batch block inputs and newly validated responses, update the cache with the new data + */ +export function cacheByRangeResponses({ + cache, + peerIdStr, + responses, + batchBlocks, +}: CacheByRangeResponsesProps): IBlockInput[] { + const source = BlockInputSource.byRange; + const seenTimestampSec = Date.now() / 1000; + const updatedBatchBlocks = new Map(batchBlocks.map((block) => [block.slot, block])); + + const blocks = responses.validatedBlocks ?? []; + for (let i = 0; i < blocks.length; i++) { + const {block, blockRoot} = blocks[i]; + const blockRootHex = toRootHex(blockRoot); + + const existing = updatedBatchBlocks.get(block.message.slot); + if (existing) { + // In practice this code block shouldn't be reached because we shouldn't be refetching a block we already have, see Batch#getRequests. + // Will throw if root hex does not match (meaning we are following the wrong chain) + existing.addBlock( + { + block, + blockRootHex, + source, + peerIdStr, + seenTimestampSec, + }, + {throwOnDuplicateAdd: false} + ); + } else { + const blockInput = cache.getByBlock({ + block, + blockRootHex, + source, + peerIdStr, + seenTimestampSec, + }); + updatedBatchBlocks.set(blockInput.slot, blockInput); + } + } + + for (const {blockRoot, blobSidecars} of responses.validatedBlobSidecars ?? []) { + const existing = updatedBatchBlocks.get(blobSidecars[0].signedBlockHeader.message.slot); + const blockRootHex = toRootHex(blockRoot); + + if (!existing) { + throw new Error("Coding error: blockInput must exist when adding blobs"); + } + + if (!isBlockInputBlobs(existing)) { + throw new DownloadByRangeError({ + code: DownloadByRangeErrorCode.MISMATCH_BLOCK_INPUT_TYPE, + slot: existing.slot, + blockRoot: prettyBytes(existing.blockRootHex), + expected: DAType.Blobs, + actual: existing.type, + }); + } + for (const blobSidecar of blobSidecars) { + // will throw if root hex does not match (meaning we are following the wrong chain) + existing.addBlob( + { + blobSidecar, + blockRootHex, + seenTimestampSec, + peerIdStr, + source, + }, + {throwOnDuplicateAdd: false} + ); + } + } + + for (const {blockRoot, columnSidecars} of responses.validatedColumnSidecars ?? []) { + const existing = updatedBatchBlocks.get(columnSidecars[0].signedBlockHeader.message.slot); + const blockRootHex = toRootHex(blockRoot); + + if (!existing) { + throw new Error("Coding error: blockInput must exist when adding blobs"); + } + + if (!isBlockInputColumns(existing)) { + throw new DownloadByRangeError({ + code: DownloadByRangeErrorCode.MISMATCH_BLOCK_INPUT_TYPE, + slot: existing.slot, + blockRoot: prettyBytes(existing.blockRootHex), + expected: DAType.Columns, + actual: existing.type, + }); + } + for (const columnSidecar of columnSidecars) { + // will throw if root hex does not match (meaning we are following the wrong chain) + existing.addColumn( + { + columnSidecar, + blockRootHex, + seenTimestampSec, + peerIdStr, + source, + }, + {throwOnDuplicateAdd: false} + ); + } + } + + return Array.from(updatedBatchBlocks.values()); +} + +export async function downloadByRange({ + config, + network, + peerIdStr, + batchBlocks, + blocksRequest, + blobsRequest, + columnsRequest, +}: Omit): Promise> { + let response: DownloadByRangeResponses; + try { + response = await requestByRange({ + network, + peerIdStr, + blocksRequest, + blobsRequest, + columnsRequest, + }); + } catch (err) { + throw new DownloadByRangeError({ + code: DownloadByRangeErrorCode.REQ_RESP_ERROR, + reason: (err as Error).message, + ...requestsLogMeta({blocksRequest, blobsRequest, columnsRequest}), + }); + } + + const validated = await validateResponses({ + config, + batchBlocks, + blocksRequest, + blobsRequest, + columnsRequest, + ...response, + }); + + return validated; +} + +/** + * Should not be called directly. Only exported for unit testing purposes + */ +export async function requestByRange({ + network, + peerIdStr, + blocksRequest, + blobsRequest, + columnsRequest, +}: DownloadByRangeRequests & { + network: INetwork; + peerIdStr: PeerIdStr; +}): Promise { + let blocks: undefined | SignedBeaconBlock[]; + let blobSidecars: undefined | deneb.BlobSidecars; + let columnSidecars: undefined | fulu.DataColumnSidecars; + + const requests: Promise[] = []; + + if (blocksRequest) { + requests.push( + network.sendBeaconBlocksByRange(peerIdStr, blocksRequest).then((blockResponse) => { + blocks = blockResponse.map(({data}) => data); + }) + ); + } + + if (blobsRequest) { + requests.push( + network.sendBlobSidecarsByRange(peerIdStr, blobsRequest).then((blobResponse) => { + blobSidecars = blobResponse; + }) + ); + } + + if (columnsRequest) { + requests.push( + network.sendDataColumnSidecarsByRange(peerIdStr, columnsRequest).then((columnResponse) => { + columnSidecars = columnResponse; + }) + ); + } + + await Promise.all(requests); + + return { + blocks, + blobSidecars, + columnSidecars, + }; +} + +/** + * Should not be called directly. Only exported for unit testing purposes + */ +export async function validateResponses({ + config, + batchBlocks, + blocksRequest, + blobsRequest, + columnsRequest, + blocks, + blobSidecars, + columnSidecars, +}: DownloadByRangeRequests & + DownloadByRangeResponses & { + config: ChainForkConfig; + batchBlocks?: IBlockInput[]; + }): Promise> { + // Blocks are always required for blob/column validation + // If a blocksRequest is provided, blocks have just been downloaded + // If no blocksRequest is provided, batchBlocks must have been provided from cache + if ((blobsRequest || columnsRequest) && !(blocks || batchBlocks)) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.MISSING_BLOCKS, + ...requestsLogMeta({blobsRequest, columnsRequest}), + }, + "No blocks to validate data requests against" + ); + } + + const validatedResponses: ValidatedResponses = {}; + let warnings: DownloadByRangeError[] | null = null; + + if (blocksRequest) { + validatedResponses.validatedBlocks = validateBlockByRangeResponse(config, blocksRequest, blocks ?? []); + } + + const dataRequest = blobsRequest ?? columnsRequest; + if (!dataRequest) { + return {result: validatedResponses, warnings: null}; + } + + const dataRequestBlocks = getBlocksForDataValidation( + dataRequest, + batchBlocks, + blocksRequest ? validatedResponses.validatedBlocks : undefined + ); + + if (!dataRequestBlocks.length) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.MISSING_BLOCKS, + ...requestsLogMeta({blobsRequest, columnsRequest}), + }, + "No blocks in data request slot range to validate data response against" + ); + } + + if (blobsRequest) { + if (!blobSidecars) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.MISSING_BLOBS_RESPONSE, + ...requestsLogMeta({blobsRequest, columnsRequest}), + }, + "No blobSidecars to validate against blobsRequest" + ); + } + + validatedResponses.validatedBlobSidecars = await validateBlobsByRangeResponse(dataRequestBlocks, blobSidecars); + } + + if (columnsRequest) { + if (!columnSidecars) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.MISSING_COLUMNS_RESPONSE, + ...requestsLogMeta({blobsRequest, columnsRequest}), + }, + "No columnSidecars to check columnRequest against" + ); + } + + const validatedColumnSidecarsResult = await validateColumnsByRangeResponse( + columnsRequest, + dataRequestBlocks, + columnSidecars + ); + validatedResponses.validatedColumnSidecars = validatedColumnSidecarsResult.result; + warnings = validatedColumnSidecarsResult.warnings; + } + + return {result: validatedResponses, warnings}; +} + +/** + * Should not be called directly. Only exported for unit testing purposes + * + * - check all slots are within range of startSlot (inclusive) through startSlot + count (exclusive) + * - don't have more than count number of blocks + * - slots are in ascending order + * - must allow for skip slots + * - check is a chain of blocks where via parentRoot matches hashTreeRoot of block before + */ +export function validateBlockByRangeResponse( + config: ChainForkConfig, + blocksRequest: phase0.BeaconBlocksByRangeRequest, + blocks: SignedBeaconBlock[] +): ValidatedBlock[] { + const {startSlot, count} = blocksRequest; + + // TODO(fulu): This was added by @twoeths in #8150 but it breaks for epochs with 0 blocks during chain + // liveness issues. See comment https://github.com/ChainSafe/lodestar/issues/8147#issuecomment-3246434697 + // if (!blocks.length) { + // throw new DownloadByRangeError( + // { + // code: DownloadByRangeErrorCode.MISSING_BLOCKS_RESPONSE, + // expectedCount: blocksRequest.count, + // }, + // "Zero blocks in response" + // ); + // } + + if (blocks.length > count) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.EXTRA_BLOCKS, + expected: count, + actual: blocks.length - count, + }, + "Extra blocks received in BeaconBlocksByRange response" + ); + } + + const lastValidSlot = startSlot + count - 1; + for (let i = 0; i < blocks.length; i++) { + const slot = blocks[i].message.slot; + + if (slot < startSlot || slot > lastValidSlot) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.OUT_OF_RANGE_BLOCKS, + slot, + }, + "Blocks in response outside of requested slot range" + ); + } + + // do not check for out of order on first block, and for subsequent blocks make sure that + // the current block in a later slot than the one prior + if (i !== 0 && slot <= blocks[i - 1].message.slot) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.OUT_OF_ORDER_BLOCKS, + }, + "Blocks out of order in BeaconBlocksByRange response" + ); + } + } + + // assumes all blocks are from the same fork. Batch only generated epoch-wise requests starting at slot + // 0 of the epoch + const type = config.getForkTypes(blocks[0].message.slot).BeaconBlock; + const response: {block: SignedBeaconBlock; blockRoot: Uint8Array}[] = []; + + for (let i = 0; i < blocks.length; i++) { + const block = blocks[i]; + const blockRoot = type.hashTreeRoot(block.message); + response.push({block, blockRoot}); + + if (i < blocks.length - 1) { + // compare the block root against the next block's parent root + const parentRoot = blocks[i + 1].message.parentRoot; + if (Buffer.compare(blockRoot, parentRoot) !== 0) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.PARENT_ROOT_MISMATCH, + slot: blocks[i].message.slot, + expected: prettyBytes(blockRoot), + actual: prettyBytes(parentRoot), + }, + `Block parent root does not match the previous block's root in BeaconBlocksByRange response` + ); + } + } + } + + return response; +} + +/** + * Should not be called directly. Only exported for unit testing purposes + */ +export async function validateBlobsByRangeResponse( + dataRequestBlocks: ValidatedBlock[], + blobSidecars: deneb.BlobSidecars +): Promise { + const expectedBlobCount = dataRequestBlocks.reduce( + (acc, {block}) => (block as SignedBeaconBlock).message.body.blobKzgCommitments.length + acc, + 0 + ); + if (blobSidecars.length > expectedBlobCount) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.EXTRA_BLOBS, + expected: expectedBlobCount, + actual: blobSidecars.length, + }, + "Extra blobs received in BlobSidecarsByRange response" + ); + } + if (blobSidecars.length < expectedBlobCount) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.MISSING_BLOBS, + expected: expectedBlobCount, + actual: blobSidecars.length, + }, + "Missing blobs in BlobSidecarsByRange response" + ); + } + + const validateSidecarsPromises: Promise[] = []; + for (let blockIndex = 0, blobSidecarIndex = 0; blockIndex < dataRequestBlocks.length; blockIndex++) { + const {block, blockRoot} = dataRequestBlocks[blockIndex]; + const blockKzgCommitments = (block as SignedBeaconBlock).message.body.blobKzgCommitments; + if (blockKzgCommitments.length === 0) { + continue; + } + + const blockBlobSidecars = blobSidecars.slice(blobSidecarIndex, blobSidecarIndex + blockKzgCommitments.length); + blobSidecarIndex += blockKzgCommitments.length; + + for (let i = 0; i < blockBlobSidecars.length; i++) { + if (blockBlobSidecars[i].index !== i) { + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.OUT_OF_ORDER_BLOBS, + slot: block.message.slot, + }, + "Blob sidecars not in order or do not match expected indexes in BlobSidecarsByRange response" + ); + } + } + + validateSidecarsPromises.push( + validateBlockBlobSidecars(block.message.slot, blockRoot, blockKzgCommitments.length, blockBlobSidecars).then( + () => ({blockRoot, blobSidecars: blockBlobSidecars}) + ) + ); + } + + // Await all sidecar validations in parallel + return Promise.all(validateSidecarsPromises); +} + +/** + * Should not be called directly. Only exported for unit testing purposes + */ +export async function validateColumnsByRangeResponse( + request: fulu.DataColumnSidecarsByRangeRequest, + dataRequestBlocks: ValidatedBlock[], + columnSidecars: fulu.DataColumnSidecars +): Promise> { + // Expected column count considering currently-validated batch blocks + const expectedColumnCount = dataRequestBlocks.reduce((acc, {block}) => { + return (block as SignedBeaconBlock).message.body.blobKzgCommitments.length > 0 + ? request.columns.length + acc + : acc; + }, 0); + const nextSlot = dataRequestBlocks.length + ? (dataRequestBlocks.at(-1) as ValidatedBlock).block.message.slot + 1 + : request.startSlot; + const possiblyMissingBlocks = nextSlot - request.startSlot + request.count; + + // Allow for extra columns if some blocks are missing from the end of a batch + // Eg: If we requested 10 blocks but only 8 were returned, allow for up to 2 * columns.length extra columns + const maxColumnCount = expectedColumnCount + possiblyMissingBlocks * request.columns.length; + + if (columnSidecars.length > maxColumnCount) { + // this never happens on devnet, so throw error for now + throw new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.OVER_COLUMNS, + max: maxColumnCount, + actual: columnSidecars.length, + }, + "Extra data columns received in DataColumnSidecarsByRange response" + ); + } + + const warnings: DownloadByRangeError[] = []; + // no need to check for columnSidecars.length vs expectedColumnCount here, will be checked per-block below + const requestedColumns = new Set(request.columns); + const validateSidecarsPromises: Promise[] = []; + for (let blockIndex = 0, columnSidecarIndex = 0; blockIndex < dataRequestBlocks.length; blockIndex++) { + const {block, blockRoot} = dataRequestBlocks[blockIndex]; + const slot = block.message.slot; + const blockRootHex = toRootHex(blockRoot); + const blockKzgCommitments = (block as SignedBeaconBlock).message.body.blobKzgCommitments; + const expectedColumns = blockKzgCommitments.length ? request.columns.length : 0; + + if (expectedColumns === 0) { + continue; + } + const blockColumnSidecars: fulu.DataColumnSidecar[] = []; + while (columnSidecarIndex < columnSidecars.length) { + const columnSidecar = columnSidecars[columnSidecarIndex]; + if (columnSidecar.signedBlockHeader.message.slot !== block.message.slot) { + // We've reached columns for the next block + break; + } + blockColumnSidecars.push(columnSidecar); + columnSidecarIndex++; + } + + const returnedColumns = new Set(blockColumnSidecars.map((c) => c.index)); + const missingIndices = request.columns.filter((i) => !returnedColumns.has(i)); + if (missingIndices.length > 0) { + warnings.push( + new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.MISSING_COLUMNS, + slot, + blockRoot: blockRootHex, + missingIndices: prettyPrintIndices(missingIndices), + }, + "Missing data columns in DataColumnSidecarsByRange response" + ) + ); + } + + const extraIndices = [...returnedColumns].filter((i) => !requestedColumns.has(i)); + if (extraIndices.length > 0) { + warnings.push( + new DownloadByRangeError( + { + code: DownloadByRangeErrorCode.EXTRA_COLUMNS, + slot, + blockRoot: blockRootHex, + invalidIndices: prettyPrintIndices(extraIndices), + }, + "Data column in not in requested columns in DataColumnSidecarsByRange response" + ) + ); + } + + validateSidecarsPromises.push( + validateBlockDataColumnSidecars(slot, blockRoot, blockKzgCommitments.length, blockColumnSidecars).then(() => ({ + blockRoot, + columnSidecars: blockColumnSidecars, + })) + ); + } + + // Await all sidecar validations in parallel + const result = await Promise.all(validateSidecarsPromises); + return {result, warnings: warnings.length ? warnings : null}; +} + +/** + * Given a data request, return only the blocks and roots that correspond to the data request (sorted). Assumes that + * cached have slots that are all before the current batch of downloaded blocks + */ +export function getBlocksForDataValidation( + dataRequest: {startSlot: Slot; count: number}, + cached: IBlockInput[] | undefined, + current: ValidatedBlock[] | undefined +): ValidatedBlock[] { + const startSlot = dataRequest.startSlot; + const endSlot = startSlot + dataRequest.count; + + // Organize cached blocks and current blocks, only including those in the requested slot range + const dataRequestBlocks: ValidatedBlock[] = []; + let lastSlot = startSlot - 1; + + if (cached) { + for (let i = 0; i < cached.length; i++) { + const blockInput = cached[i]; + if (blockInput.slot >= startSlot && blockInput.slot < endSlot && blockInput.slot > lastSlot) { + dataRequestBlocks.push({block: blockInput.getBlock(), blockRoot: fromHex(blockInput.blockRootHex)}); + lastSlot = blockInput.slot; + } + } + } + + if (current) { + for (let i = 0; i < current.length; i++) { + const block = current[i].block; + if (block.message.slot >= startSlot && block.message.slot < endSlot && block.message.slot > lastSlot) { + dataRequestBlocks.push(current[i]); + lastSlot = block.message.slot; + } + } + } + + return dataRequestBlocks; +} + +function requestsLogMeta({blocksRequest, blobsRequest, columnsRequest}: DownloadByRangeRequests) { + const logMeta: { + blockStartSlot?: number; + blockCount?: number; + blobStartSlot?: number; + blobCount?: number; + columnStartSlot?: number; + columnCount?: number; + } = {}; + if (blocksRequest) { + logMeta.blockStartSlot = blocksRequest.startSlot; + logMeta.blockCount = blocksRequest.count; + } + if (blobsRequest) { + logMeta.blobStartSlot = blobsRequest.startSlot; + logMeta.blobCount = blobsRequest.count; + } + if (columnsRequest) { + logMeta.columnStartSlot = columnsRequest.startSlot; + logMeta.columnCount = columnsRequest.count; + } + return logMeta; +} + +export enum DownloadByRangeErrorCode { + MISSING_BLOCKS = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOCKS", + MISSING_BLOBS_RESPONSE = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOBS_RESPONSE", + MISSING_COLUMNS_RESPONSE = "DOWNLOAD_BY_RANGE_ERROR_MISSING_COLUMNS_RESPONSE", + + /** Error at the reqresp layer */ + REQ_RESP_ERROR = "DOWNLOAD_BY_RANGE_ERROR_REQ_RESP_ERROR", + + // Errors validating a chain of blocks (not considering associated data) + + PARENT_ROOT_MISMATCH = "DOWNLOAD_BY_RANGE_ERROR_PARENT_ROOT_MISMATCH", + EXTRA_BLOCKS = "DOWNLOAD_BY_RANGE_ERROR_EXTRA_BLOCKS", + OUT_OF_RANGE_BLOCKS = "DOWNLOAD_BY_RANGE_OUT_OF_RANGE_BLOCKS", + OUT_OF_ORDER_BLOCKS = "DOWNLOAD_BY_RANGE_OUT_OF_ORDER_BLOCKS", + + MISSING_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_MISSING_BLOBS", + OUT_OF_ORDER_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_OUT_OF_ORDER_BLOBS", + EXTRA_BLOBS = "DOWNLOAD_BY_RANGE_ERROR_EXTRA_BLOBS", + + MISSING_COLUMNS = "DOWNLOAD_BY_RANGE_ERROR_MISSING_COLUMNS", + OVER_COLUMNS = "DOWNLOAD_BY_RANGE_ERROR_OVER_COLUMNS", + EXTRA_COLUMNS = "DOWNLOAD_BY_RANGE_ERROR_EXTRA_COLUMNS", + + /** Cached block input type mismatches new data */ + MISMATCH_BLOCK_INPUT_TYPE = "DOWNLOAD_BY_RANGE_ERROR_MISMATCH_BLOCK_INPUT_TYPE", +} + +export type DownloadByRangeErrorType = + | { + code: DownloadByRootErrorCode.MISSING_BLOCK_RESPONSE; + expectedCount: number; + } + | { + code: + | DownloadByRangeErrorCode.MISSING_BLOCKS + | DownloadByRangeErrorCode.MISSING_BLOBS_RESPONSE + | DownloadByRangeErrorCode.MISSING_COLUMNS_RESPONSE; + blockStartSlot?: number; + blockCount?: number; + blobStartSlot?: number; + blobCount?: number; + columnStartSlot?: number; + columnCount?: number; + } + | { + code: DownloadByRootErrorCode.MISSING_BLOCK_RESPONSE; + expectedCount: number; + } + | { + code: DownloadByRangeErrorCode.OUT_OF_RANGE_BLOCKS; + slot: number; + } + | { + code: DownloadByRangeErrorCode.OUT_OF_ORDER_BLOCKS; + } + | { + code: DownloadByRangeErrorCode.REQ_RESP_ERROR; + blockStartSlot?: number; + blockCount?: number; + blobStartSlot?: number; + blobCount?: number; + columnStartSlot?: number; + columnCount?: number; + reason: string; + } + | { + code: DownloadByRangeErrorCode.PARENT_ROOT_MISMATCH; + slot: number; + expected: string; + actual: string; + } + | { + code: DownloadByRangeErrorCode.EXTRA_BLOCKS; + expected: number; + actual: number; + } + | { + code: DownloadByRangeErrorCode.MISSING_BLOBS; + expected: number; + actual: number; + } + | { + code: DownloadByRangeErrorCode.OUT_OF_ORDER_BLOBS; + slot: number; + } + | { + code: DownloadByRangeErrorCode.EXTRA_BLOBS; + expected: number; + actual: number; + } + | { + code: DownloadByRangeErrorCode.OVER_COLUMNS; + max: number; + actual: number; + } + | { + code: DownloadByRangeErrorCode.MISSING_COLUMNS; + slot: Slot; + blockRoot: string; + missingIndices: string; + } + | { + code: DownloadByRangeErrorCode.EXTRA_COLUMNS; + slot: Slot; + blockRoot: string; + invalidIndices: string; + } + | { + code: DownloadByRangeErrorCode.MISMATCH_BLOCK_INPUT_TYPE; + slot: number; + blockRoot: string; + expected: DAType; + actual: DAType; + }; + +export class DownloadByRangeError extends LodestarError {} diff --git a/packages/beacon-node/src/sync/utils/downloadByRoot.ts b/packages/beacon-node/src/sync/utils/downloadByRoot.ts new file mode 100644 index 000000000000..8c9260f077c2 --- /dev/null +++ b/packages/beacon-node/src/sync/utils/downloadByRoot.ts @@ -0,0 +1,533 @@ +import {ChainForkConfig} from "@lodestar/config"; +import {ForkPostDeneb, ForkPostFulu, ForkPreFulu, isForkPostDeneb, isForkPostFulu} from "@lodestar/params"; +import {SignedBeaconBlock, Slot, deneb, fulu} from "@lodestar/types"; +import {LodestarError, fromHex, prettyBytes, prettyPrintIndices, toRootHex} from "@lodestar/utils"; +import {isBlockInputBlobs, isBlockInputColumns} from "../../chain/blocks/blockInput/blockInput.js"; +import {BlobMeta, BlockInputSource, IBlockInput, MissingColumnMeta} from "../../chain/blocks/blockInput/types.js"; +import {SeenBlockInput} from "../../chain/seenCache/seenGossipBlockInput.js"; +import {validateBlockBlobSidecars} from "../../chain/validation/blobSidecar.js"; +import {validateBlockDataColumnSidecars} from "../../chain/validation/dataColumnSidecar.js"; +import {INetwork} from "../../network/interface.js"; +import {prettyPrintPeerIdStr} from "../../network/util.js"; +import {kzgCommitmentToVersionedHash} from "../../util/blobs.js"; +import {byteArrayEquals} from "../../util/bytes.js"; +import { + BlockInputSyncCacheItem, + PendingBlockInput, + PendingBlockInputStatus, + getBlockInputSyncCacheItemRootHex, + isPendingBlockInput, +} from "../types.js"; +import {PeerSyncMeta} from "../../network/peers/peersData.js"; +import {PeerIdStr} from "../../util/peerId.js"; +import {WarnResult} from "../../util/wrapError.js"; + +export type FetchByRootCoreProps = { + config: ChainForkConfig; + network: INetwork; + peerMeta: PeerSyncMeta; +}; +export type FetchByRootProps = FetchByRootCoreProps & { + cacheItem: BlockInputSyncCacheItem; + blockRoot: Uint8Array; +}; +export type FetchByRootAndValidateBlockProps = Omit & { + peerIdStr: PeerIdStr; + blockRoot: Uint8Array; +}; +export type FetchByRootAndValidateBlobsProps = FetchByRootAndValidateBlockProps & { + forkName: ForkPreFulu; + block: SignedBeaconBlock; + blobMeta: BlobMeta[]; +}; +export type FetchByRootAndValidateColumnsProps = FetchByRootCoreProps & { + blockRoot: Uint8Array; + forkName: ForkPostFulu; + block: SignedBeaconBlock; + columnMeta: MissingColumnMeta; +}; +export type FetchByRootResponses = { + block: SignedBeaconBlock; + blobSidecars?: deneb.BlobSidecars; + columnSidecars?: fulu.DataColumnSidecars; +}; + +export type DownloadByRootProps = FetchByRootCoreProps & { + cacheItem: BlockInputSyncCacheItem; + seenCache: SeenBlockInput; +}; +export async function downloadByRoot({ + config, + seenCache, + network, + peerMeta, + cacheItem, +}: DownloadByRootProps): Promise> { + const rootHex = getBlockInputSyncCacheItemRootHex(cacheItem); + const blockRoot = fromHex(rootHex); + const {peerId: peerIdStr} = peerMeta; + + const { + result: {block, blobSidecars, columnSidecars}, + warnings, + } = await fetchByRoot({ + config, + network, + cacheItem, + blockRoot, + peerMeta, + }); + + let blockInput: IBlockInput; + if (isPendingBlockInput(cacheItem)) { + blockInput = cacheItem.blockInput; + if (!blockInput.hasBlock()) { + blockInput.addBlock({ + block, + blockRootHex: rootHex, + source: BlockInputSource.byRoot, + seenTimestampSec: Date.now(), + peerIdStr, + }); + } + } else { + blockInput = seenCache.getByBlock({ + block, + peerIdStr, + blockRootHex: rootHex, + seenTimestampSec: Date.now(), + source: BlockInputSource.byRoot, + }); + } + + const hasAllDataPreDownload = blockInput.hasBlockAndAllData(); + + if (isBlockInputBlobs(blockInput) && !hasAllDataPreDownload) { + // blobSidecars could be undefined if gossip resulted in full block+blobs so we don't download any + if (!blobSidecars) { + throw new DownloadByRootError({ + code: DownloadByRootErrorCode.MISSING_BLOB_RESPONSE, + blockRoot: prettyBytes(rootHex), + peer: peerIdStr, + }); + } + for (const blobSidecar of blobSidecars) { + blockInput.addBlob({ + blobSidecar, + blockRootHex: rootHex, + seenTimestampSec: Date.now(), + source: BlockInputSource.byRoot, + peerIdStr, + }); + } + } + + if (isBlockInputColumns(blockInput) && !hasAllDataPreDownload) { + // columnSidecars could be undefined if gossip resulted in full block+columns so we don't download any + if (!columnSidecars) { + throw new DownloadByRootError({ + code: DownloadByRootErrorCode.MISSING_COLUMN_RESPONSE, + blockRoot: prettyBytes(rootHex), + peer: peerIdStr, + }); + } + for (const columnSidecar of columnSidecars) { + blockInput.addColumn( + { + columnSidecar, + blockRootHex: rootHex, + seenTimestampSec: Date.now(), + source: BlockInputSource.byRoot, + peerIdStr, + }, + // the same DataColumnSidecar may be added by gossip while waiting for fetchByRoot + {throwOnDuplicateAdd: false} + ); + } + } + + let status: PendingBlockInputStatus; + let timeSyncedSec: number | undefined; + if (blockInput.hasBlockAndAllData()) { + status = PendingBlockInputStatus.downloaded; + timeSyncedSec = Date.now() / 1000; + } else { + status = PendingBlockInputStatus.pending; + } + + return { + result: { + status, + blockInput, + timeSyncedSec, + timeAddedSec: cacheItem.timeAddedSec, + peerIdStrings: cacheItem.peerIdStrings, + }, + warnings, + }; +} + +export async function fetchByRoot({ + config, + network, + peerMeta, + blockRoot, + cacheItem, +}: FetchByRootProps): Promise> { + let block: SignedBeaconBlock; + let blobSidecars: deneb.BlobSidecars | undefined; + let columnSidecarResult: WarnResult | undefined; + const {peerId: peerIdStr} = peerMeta; + + if (isPendingBlockInput(cacheItem)) { + if (cacheItem.blockInput.hasBlock()) { + block = cacheItem.blockInput.getBlock(); + } else { + block = await fetchAndValidateBlock({ + config, + network, + peerIdStr, + blockRoot, + }); + } + + const forkName = config.getForkName(block.message.slot); + if (!cacheItem.blockInput.hasAllData()) { + if (isBlockInputBlobs(cacheItem.blockInput)) { + blobSidecars = await fetchAndValidateBlobs({ + config, + network, + peerIdStr, + forkName: forkName as ForkPreFulu, + block: block as SignedBeaconBlock, + blockRoot, + blobMeta: cacheItem.blockInput.getMissingBlobMeta(), + }); + } + if (isBlockInputColumns(cacheItem.blockInput)) { + columnSidecarResult = await fetchAndValidateColumns({ + config, + network, + peerMeta, + forkName: forkName as ForkPostFulu, + block: block as SignedBeaconBlock, + blockRoot, + columnMeta: cacheItem.blockInput.getMissingSampledColumnMeta(), + }); + } + } + } else { + block = await fetchAndValidateBlock({ + config, + network, + peerIdStr, + blockRoot, + }); + const forkName = config.getForkName(block.message.slot); + if (isForkPostFulu(forkName)) { + columnSidecarResult = await fetchAndValidateColumns({ + config, + network, + peerMeta, + forkName, + blockRoot, + block: block as SignedBeaconBlock, + columnMeta: { + missing: network.custodyConfig.sampledColumns, + versionedHashes: (block as SignedBeaconBlock).message.body.blobKzgCommitments.map((c) => + kzgCommitmentToVersionedHash(c) + ), + }, + }); + } else if (isForkPostDeneb(forkName)) { + const commitments = (block as SignedBeaconBlock).message.body.blobKzgCommitments; + const blobCount = commitments.length; + blobSidecars = await fetchAndValidateBlobs({ + config, + network, + peerIdStr, + forkName: forkName as ForkPreFulu, + blockRoot, + block: block as SignedBeaconBlock, + blobMeta: Array.from({length: blobCount}, (_, i) => ({ + index: i, + blockRoot, + versionedHash: kzgCommitmentToVersionedHash(commitments[i]), + })), + }); + } + } + + return { + result: { + block, + blobSidecars, + columnSidecars: columnSidecarResult?.result, + }, + warnings: columnSidecarResult?.warnings ?? null, + }; +} + +export async function fetchAndValidateBlock({ + config, + network, + peerIdStr, + blockRoot, +}: FetchByRootAndValidateBlockProps): Promise { + const response = await network.sendBeaconBlocksByRoot(peerIdStr, [blockRoot]); + const block = response.at(0)?.data; + if (!block) { + throw new DownloadByRootError({ + code: DownloadByRootErrorCode.MISSING_BLOCK_RESPONSE, + peer: prettyPrintPeerIdStr(peerIdStr), + blockRoot: prettyBytes(blockRoot), + }); + } + const receivedRoot = config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message); + if (!byteArrayEquals(receivedRoot, blockRoot)) { + throw new DownloadByRootError( + { + code: DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT, + peer: prettyPrintPeerIdStr(peerIdStr), + requestedBlockRoot: prettyBytes(blockRoot), + receivedBlockRoot: prettyBytes(toRootHex(receivedRoot)), + }, + "block does not match requested root" + ); + } + return block; +} + +export async function fetchAndValidateBlobs({ + network, + peerIdStr, + blockRoot, + block, + blobMeta, +}: FetchByRootAndValidateBlobsProps): Promise { + const blobSidecars: deneb.BlobSidecars = await fetchBlobsByRoot({ + network, + peerIdStr, + blobMeta, + }); + + await validateBlockBlobSidecars(block.message.slot, blockRoot, blobMeta.length, blobSidecars); + + return blobSidecars; +} + +export async function fetchBlobsByRoot({ + network, + peerIdStr, + blobMeta, + indicesInPossession = [], +}: Pick & { + indicesInPossession?: number[]; +}): Promise { + const blobsRequest = blobMeta + .filter(({index}) => !indicesInPossession.includes(index)) + .map(({blockRoot, index}) => ({blockRoot, index})); + if (!blobsRequest.length) { + return []; + } + return await network.sendBlobSidecarsByRoot(peerIdStr, blobsRequest); +} + +export async function fetchAndValidateColumns({ + network, + peerMeta, + block, + blockRoot, + columnMeta, +}: FetchByRootAndValidateColumnsProps): Promise> { + const {peerId: peerIdStr} = peerMeta; + const slot = block.message.slot; + const blobCount = block.message.body.blobKzgCommitments.length; + if (blobCount === 0) { + return {result: [], warnings: null}; + } + + const blockRootHex = toRootHex(blockRoot); + const peerColumns = new Set(peerMeta.custodyGroups ?? []); + const requestedColumns = columnMeta.missing.filter((c) => peerColumns.has(c)); + const columnSidecars = await network.sendDataColumnSidecarsByRoot(peerIdStr, [ + {blockRoot, columns: requestedColumns}, + ]); + + const warnings: DownloadByRootError[] = []; + + // it's not acceptable if no sidecar is returned with >0 blobCount + if (columnSidecars.length === 0) { + throw new DownloadByRootError({ + code: DownloadByRootErrorCode.NO_SIDECAR_RECEIVED, + peer: prettyPrintPeerIdStr(peerIdStr), + slot, + blockRoot: blockRootHex, + }); + } + + // it's ok if only some sidecars are returned, we will try to get the rest from other peers + const requestedColumnsSet = new Set(requestedColumns); + const returnedColumns = columnSidecars.map((c) => c.index); + const returnedColumnsSet = new Set(returnedColumns); + const missingIndices = requestedColumns.filter((c) => !returnedColumnsSet.has(c)); + if (missingIndices.length > 0) { + warnings.push( + new DownloadByRootError( + { + code: DownloadByRootErrorCode.NOT_ENOUGH_SIDECARS_RECEIVED, + peer: prettyPrintPeerIdStr(peerIdStr), + slot, + blockRoot: blockRootHex, + missingIndices: prettyPrintIndices(missingIndices), + }, + "Did not receive all of the requested columnSidecars" + ) + ); + } + + // check extra returned columnSidecar + const extraIndices = returnedColumns.filter((c) => !requestedColumnsSet.has(c)); + if (extraIndices.length > 0) { + warnings.push( + new DownloadByRootError( + { + code: DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED, + peer: prettyPrintPeerIdStr(peerIdStr), + slot, + blockRoot: blockRootHex, + invalidIndices: prettyPrintIndices(extraIndices), + }, + "Received columnSidecars that were not requested" + ) + ); + } + + await validateBlockDataColumnSidecars(slot, blockRoot, blobCount, columnSidecars); + + return {result: columnSidecars, warnings: warnings.length > 0 ? warnings : null}; +} + +// TODO(fulu) not in use, remove? +export async function fetchColumnsByRoot({ + network, + peerMeta, + blockRoot, + columnMeta, +}: Pick< + FetchByRootAndValidateColumnsProps, + "network" | "peerMeta" | "blockRoot" | "columnMeta" +>): Promise { + return await network.sendDataColumnSidecarsByRoot(peerMeta.peerId, [{blockRoot, columns: columnMeta.missing}]); +} + +// TODO(fulu) not in use, remove? +export type ValidateColumnSidecarsProps = Pick< + FetchByRootAndValidateColumnsProps, + "config" | "peerMeta" | "blockRoot" | "columnMeta" +> & { + slot: number; + blobCount: number; + needed?: fulu.DataColumnSidecars; + needToPublish?: fulu.DataColumnSidecars; +}; + +// TODO(fulu) not in use, remove? +export async function validateColumnSidecars({ + peerMeta, + slot, + blockRoot, + blobCount, + columnMeta, + needed = [], + needToPublish = [], +}: ValidateColumnSidecarsProps): Promise { + const requestedIndices = columnMeta.missing; + const extraIndices: number[] = []; + for (const columnSidecar of needed) { + if (!requestedIndices.includes(columnSidecar.index)) { + extraIndices.push(columnSidecar.index); + } + } + if (extraIndices.length > 0) { + throw new DownloadByRootError( + { + code: DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED, + peer: prettyPrintPeerIdStr(peerMeta.peerId), + slot, + blockRoot: prettyBytes(blockRoot), + invalidIndices: prettyPrintIndices(extraIndices), + }, + "Received a columnSidecar that was not requested" + ); + } + await validateBlockDataColumnSidecars(slot, blockRoot, blobCount, [...needed, ...needToPublish]); +} + +export enum DownloadByRootErrorCode { + MISMATCH_BLOCK_ROOT = "DOWNLOAD_BY_ROOT_ERROR_MISMATCH_BLOCK_ROOT", + EXTRA_SIDECAR_RECEIVED = "DOWNLOAD_BY_ROOT_ERROR_EXTRA_SIDECAR_RECEIVED", + NO_SIDECAR_RECEIVED = "DOWNLOAD_BY_ROOT_ERROR_NO_SIDECAR_RECEIVED", + NOT_ENOUGH_SIDECARS_RECEIVED = "DOWNLOAD_BY_ROOT_ERROR_NOT_ENOUGH_SIDECARS_RECEIVED", + INVALID_INCLUSION_PROOF = "DOWNLOAD_BY_ROOT_ERROR_INVALID_INCLUSION_PROOF", + INVALID_KZG_PROOF = "DOWNLOAD_BY_ROOT_ERROR_INVALID_KZG_PROOF", + MISSING_BLOCK_RESPONSE = "DOWNLOAD_BY_ROOT_ERROR_MISSING_BLOCK_RESPONSE", + MISSING_BLOB_RESPONSE = "DOWNLOAD_BY_ROOT_ERROR_MISSING_BLOB_RESPONSE", + MISSING_COLUMN_RESPONSE = "DOWNLOAD_BY_ROOT_ERROR_MISSING_COLUMN_RESPONSE", + Z = "DOWNLOAD_BY_ROOT_ERROR_Z", +} +export type DownloadByRootErrorType = + | { + code: DownloadByRootErrorCode.MISMATCH_BLOCK_ROOT; + peer: string; + requestedBlockRoot: string; + receivedBlockRoot: string; + } + | { + code: DownloadByRootErrorCode.EXTRA_SIDECAR_RECEIVED; + peer: string; + slot: Slot; + blockRoot: string; + invalidIndices: string; + } + | { + code: DownloadByRootErrorCode.NO_SIDECAR_RECEIVED; + peer: string; + slot: Slot; + blockRoot: string; + } + | { + code: DownloadByRootErrorCode.NOT_ENOUGH_SIDECARS_RECEIVED; + peer: string; + slot: Slot; + blockRoot: string; + missingIndices: string; + } + | { + code: DownloadByRootErrorCode.INVALID_INCLUSION_PROOF; + peer: string; + blockRoot: string; + sidecarIndex: number; + } + | { + code: DownloadByRootErrorCode.INVALID_KZG_PROOF; + peer: string; + blockRoot: string; + } + | { + code: DownloadByRootErrorCode.MISSING_BLOCK_RESPONSE; + peer: string; + blockRoot: string; + } + | { + code: DownloadByRootErrorCode.MISSING_BLOB_RESPONSE; + peer: string; + blockRoot: string; + } + | { + code: DownloadByRootErrorCode.MISSING_COLUMN_RESPONSE; + peer: string; + blockRoot: string; + }; + +export class DownloadByRootError extends LodestarError {} diff --git a/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts b/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts index deefba91f366..04c4d1346a3c 100644 --- a/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts +++ b/packages/beacon-node/src/sync/utils/pendingBlocksTree.ts @@ -1,20 +1,22 @@ import {RootHex} from "@lodestar/types"; import {MapDef} from "@lodestar/utils"; -import {BlockInputType} from "../../chain/blocks/types.js"; import { - DownloadedBlock, - PendingBlock, - PendingBlockStatus, - UnknownAndAncestorBlocks, - UnknownBlock, -} from "../interface.js"; + BlockInputSyncCacheItem, + PendingBlockInput, + PendingBlockInputStatus, + getBlockInputSyncCacheItemRootHex, + isPendingBlockInput, +} from "../types.js"; -export function getAllDescendantBlocks(blockRootHex: RootHex, blocks: Map): PendingBlock[] { +export function getAllDescendantBlocks( + blockRootHex: RootHex, + blocks: Map +): BlockInputSyncCacheItem[] { // Do one pass over all blocks to index by parent - const byParent = new MapDef(() => []); + const byParent = new MapDef(() => []); for (const block of blocks.values()) { - if (block.parentBlockRootHex != null) { - byParent.getOrDefault(block.parentBlockRootHex).push(block); + if (isPendingBlockInput(block)) { + byParent.getOrDefault(block.blockInput.parentRootHex).push(block); } } @@ -25,24 +27,27 @@ export function getAllDescendantBlocks(blockRootHex: RootHex, blocks: Map, - descendantBlocks: PendingBlock[] = [] -): PendingBlock[] { + byParent: Map, + descendantBlocks: BlockInputSyncCacheItem[] = [] +): BlockInputSyncCacheItem[] { const firstDescendantBlocks = byParent.get(childBlockRootHex); if (firstDescendantBlocks) { for (const firstDescendantBlock of firstDescendantBlocks) { descendantBlocks.push(firstDescendantBlock); - addToDescendantBlocks(firstDescendantBlock.blockRootHex, byParent, descendantBlocks); + addToDescendantBlocks(getBlockInputSyncCacheItemRootHex(firstDescendantBlock), byParent, descendantBlocks); } } return descendantBlocks; } -export function getDescendantBlocks(blockRootHex: RootHex, blocks: Map): PendingBlock[] { - const descendantBlocks: PendingBlock[] = []; +export function getDescendantBlocks( + blockRootHex: RootHex, + blocks: Map +): BlockInputSyncCacheItem[] { + const descendantBlocks: BlockInputSyncCacheItem[] = []; for (const block of blocks.values()) { - if (block.parentBlockRootHex === blockRootHex) { + if ((isPendingBlockInput(block) ? block.blockInput.parentRootHex : undefined) === blockRootHex) { descendantBlocks.push(block); } } @@ -50,31 +55,43 @@ export function getDescendantBlocks(blockRootHex: RootHex, blocks: Map downloaded block n + 1 => downloaded block n + 2 - * return `{unknowns: [n], ancestors: []}` + * return `{incomplete: [n], ancestors: []}` * * Given this chain segment: downloaded block n => downloaded block n + 1 => downloaded block n + 2 - * return {unknowns: [], ancestors: [n]} + * return {incomplete: [], ancestors: [n]} */ -export function getUnknownAndAncestorBlocks(blocks: Map): UnknownAndAncestorBlocks { - const unknowns: UnknownBlock[] = []; - const ancestors: DownloadedBlock[] = []; +export function getUnknownAndAncestorBlocks(blocks: Map): UnknownAndAncestorBlocks { + const unknowns = new Map(); + const ancestors = new Map(); for (const block of blocks.values()) { - const parentHex = block.parentBlockRootHex; if ( - block.status === PendingBlockStatus.pending && - (block.blockInput?.block == null || block.blockInput?.type === BlockInputType.dataPromise) && - parentHex == null + block.status === PendingBlockInputStatus.pending && + (isPendingBlockInput(block) ? !block.blockInput.hasBlockAndAllData() : true) ) { - unknowns.push(block); - } - - if (block.status === PendingBlockStatus.downloaded && parentHex && !blocks.has(parentHex)) { - ancestors.push(block); + unknowns.set(getBlockInputSyncCacheItemRootHex(block), block); + } else if ( + isPendingBlockInput(block) && + block.status === PendingBlockInputStatus.downloaded && + !blocks.has(block.blockInput.parentRootHex) + ) { + ancestors.set(block.blockInput.blockRootHex, block); } } - return {unknowns, ancestors}; + return { + unknowns: Array.from(unknowns.values()), + ancestors: Array.from(ancestors.values()), + }; } diff --git a/packages/beacon-node/src/util/blobs.ts b/packages/beacon-node/src/util/blobs.ts index 245771780135..d21e251eb624 100644 --- a/packages/beacon-node/src/util/blobs.ts +++ b/packages/beacon-node/src/util/blobs.ts @@ -25,7 +25,7 @@ export function kzgCommitmentToVersionedHash(kzgCommitment: deneb.KZGCommitment) return hash; } -export function computeInclusionProof( +export function computePreFuluKzgCommitmentsInclusionProof( fork: ForkName, body: BeaconBlockBody, index: number @@ -56,7 +56,11 @@ export function getBlobSidecars( return blobKzgCommitments.map((kzgCommitment, index) => { const blob = blobs[index]; const kzgProof = proofs[index]; - const kzgCommitmentInclusionProof = computeInclusionProof(fork, signedBlock.message.body, index); + const kzgCommitmentInclusionProof = computePreFuluKzgCommitmentsInclusionProof( + fork, + signedBlock.message.body, + index + ); return {index, blob, kzgCommitment, kzgProof, signedBlockHeader, kzgCommitmentInclusionProof}; }); @@ -66,7 +70,7 @@ export function getBlobSidecars( * If the node obtains 50%+ of all the columns, it SHOULD reconstruct the full data matrix via the recover_matrix helper * See https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.4/specs/fulu/das-core.md#recover_matrix */ -export async function recoverDataColumnSidecars( +export async function dataColumnMatrixRecovery( partialSidecars: Map ): Promise { const columnCount = partialSidecars.size; @@ -156,7 +160,7 @@ export async function reconstructBlobs(sidecars: fulu.DataColumnSidecars): Promi fullSidecars = sidecars; } else { const sidecarsByIndex = new Map(sidecars.map((sc) => [sc.index, sc])); - const recoveredSidecars = await recoverDataColumnSidecars(sidecarsByIndex); + const recoveredSidecars = await dataColumnMatrixRecovery(sidecarsByIndex); if (recoveredSidecars === null) { // Should not happen because we check the column count above throw Error("Failed to reconstruct the full data matrix"); diff --git a/packages/beacon-node/src/util/clock.ts b/packages/beacon-node/src/util/clock.ts index 36eb6f6f7f2c..8304db8b36e6 100644 --- a/packages/beacon-node/src/util/clock.ts +++ b/packages/beacon-node/src/util/clock.ts @@ -1,5 +1,5 @@ import EventEmitter from "node:events"; -import {ChainForkConfig} from "@lodestar/config"; +import {ChainConfig, ChainForkConfig} from "@lodestar/config"; import {computeEpochAtSlot, computeTimeAtSlot, getCurrentSlot} from "@lodestar/state-transition"; import type {Epoch, Slot} from "@lodestar/types"; import {ErrorAborted} from "@lodestar/utils"; @@ -202,3 +202,14 @@ export class Clock extends EventEmitter implements IClock { return milliSecondsPerSlot - (diffInMilliSeconds % milliSecondsPerSlot); } } + +export function getCutoffTimeMs( + chain: {config: ChainConfig; genesisTime: number}, + blockSlot: Slot, + cutoffMsFromSlotStart: number +): number { + return Math.max( + computeTimeAtSlot(chain.config, blockSlot, chain.genesisTime) * 1000 + cutoffMsFromSlotStart - Date.now(), + 0 + ); +} diff --git a/packages/beacon-node/src/util/dataColumns.ts b/packages/beacon-node/src/util/dataColumns.ts index 9e6b9ac24bc5..f35dd7f4d444 100644 --- a/packages/beacon-node/src/util/dataColumns.ts +++ b/packages/beacon-node/src/util/dataColumns.ts @@ -14,23 +14,14 @@ import { fulu, } from "@lodestar/types"; import {ssz} from "@lodestar/types"; -import {bytesToBigInt} from "@lodestar/utils"; -import { - BlockInputDataColumns, - BlockSource, - DataColumnsCacheMap, - DataColumnsSource, - getBlockInput, - getBlockInputDataColumns, -} from "../chain/blocks/types.js"; -import {ChainEvent, ChainEventEmitter} from "../chain/emitter.js"; -import {BlockInputCacheType} from "../chain/seenCache/seenGossipBlockInput.js"; -import {IExecutionEngine} from "../execution/engine/interface.js"; -import {Metrics} from "../metrics/metrics.js"; +import {bytesToBigInt, LodestarError} from "@lodestar/utils"; import {NodeId} from "../network/subnets/index.js"; -import {kzgCommitmentToVersionedHash, recoverDataColumnSidecars as recover} from "./blobs.js"; -import {IClock} from "./clock.js"; import {kzg} from "./kzg.js"; +import {dataColumnMatrixRecovery} from "./blobs.js"; +import {BlockInputColumns} from "../chain/blocks/blockInput/blockInput.js"; +import {Metrics} from "../metrics/metrics.js"; +import {BlockInputSource} from "../chain/blocks/blockInput/types.js"; +import {ChainEvent, ChainEventEmitter} from "../chain/emitter.js"; export enum RecoverResult { // the recover is not attempted because we have less than `NUMBER_OF_COLUMNS / 2` columns @@ -231,7 +222,7 @@ export function getCustodyGroups(config: ChainForkConfig, nodeId: NodeId, custod return custodyGroups; } -export function computeKzgCommitmentsInclusionProof( +export function computePostFuluKzgCommitmentsInclusionProof( fork: ForkName, body: BeaconBlockBody ): fulu.KzgCommitmentsInclusionProof { @@ -252,11 +243,15 @@ export function getDataColumns(config: ChainForkConfig, nodeId: NodeId, custodyG * SPEC FUNCTION (note: spec currently computes proofs, but we already have them) * https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.4/specs/fulu/das-core.md#compute_matrix */ -export function getCellsAndProofs(blobBundles: fulu.BlobAndProofV2[]): {cells: Uint8Array[]; proofs: Uint8Array[]}[] { - return blobBundles.map(({blob, proofs}) => { - const cells = kzg.computeCells(blob); - return {cells, proofs}; - }); +export async function getCellsAndProofs( + blobBundles: fulu.BlobAndProofV2[] +): Promise<{cells: Uint8Array[]; proofs: Uint8Array[]}[]> { + const blobsAndProofs: {cells: Uint8Array[]; proofs: Uint8Array[]}[] = []; + for (const {blob, proofs} of blobBundles) { + const cells = await kzg.asyncComputeCells(blob); + blobsAndProofs.push({cells, proofs}); + } + return blobsAndProofs; } /** @@ -318,7 +313,7 @@ export function getDataColumnSidecarsFromBlock( const fork = config.getForkName(signedBlock.message.slot); const signedBlockHeader = signedBlockToSignedHeader(config, signedBlock); - const kzgCommitmentsInclusionProof = computeKzgCommitmentsInclusionProof(fork, signedBlock.message.body); + const kzgCommitmentsInclusionProof = computePostFuluKzgCommitmentsInclusionProof(fork, signedBlock.message.body); return getDataColumnSidecars(signedBlockHeader, blobKzgCommitments, kzgCommitmentsInclusionProof, cellsAndKzgProofs); } @@ -346,187 +341,95 @@ export function getDataColumnSidecarsFromColumnSidecar( * If we receive more than half of NUMBER_OF_COLUMNS (64) we should recover all remaining columns */ export async function recoverDataColumnSidecars( - dataColumnCache: DataColumnsCacheMap, - clock: IClock, + blockInput: BlockInputColumns, + emitter: ChainEventEmitter, metrics: Metrics | null -): Promise { - const columnCount = dataColumnCache.size; +): Promise { + const existingColumns = blockInput.getAllColumns(); + const columnCount = existingColumns.length; if (columnCount >= NUMBER_OF_COLUMNS) { // We have all columns - return RecoverResult.NotAttemptedFull; + metrics?.recoverDataColumnSidecars.reconstructionResult.inc({ + result: DataColumnReconstructionCode.NotAttemptedAlreadyFull, + }); + return; } if (columnCount < NUMBER_OF_COLUMNS / 2) { // We don't have enough columns to recover - return RecoverResult.NotAttemptedLessThanHalf; + metrics?.recoverDataColumnSidecars.reconstructionResult.inc({ + result: DataColumnReconstructionCode.NotAttemptedHaveLessThanHalf, + }); + return; } - const partialColumns = dataColumnCache.size; - metrics?.recoverDataColumnSidecars.custodyBeforeReconstruction.set(partialColumns); + metrics?.recoverDataColumnSidecars.custodyBeforeReconstruction.set(columnCount); const partialSidecars = new Map(); - for (const [columnIndex, {dataColumn}] of dataColumnCache.entries()) { + for (const columnSidecar of existingColumns) { // the more columns we put, the slower the recover if (partialSidecars.size >= NUMBER_OF_COLUMNS / 2) { break; } - partialSidecars.set(columnIndex, dataColumn); + partialSidecars.set(columnSidecar.index, columnSidecar); } - const timer = metrics?.peerDas.dataColumnsReconstructionTime.startTimer(); + const timer = metrics?.recoverDataColumnSidecars.recoverTime.startTimer(); // if this function throws, we catch at the consumer side - const fullSidecars = await recover(partialSidecars); + const fullSidecars = await dataColumnMatrixRecovery(partialSidecars).catch(() => null); timer?.(); if (fullSidecars == null) { - return RecoverResult.Failed; - } - - const firstDataColumn = dataColumnCache.values().next().value?.dataColumn; - if (firstDataColumn == null) { - // should not happen because we checked the size of the cache before this - throw new Error("No data column found in cache to recover from"); + metrics?.recoverDataColumnSidecars.reconstructionResult.inc({ + result: DataColumnReconstructionCode.ReconstructionFailed, + }); + return; } - const slot = firstDataColumn.signedBlockHeader.message.slot; - const secFromSlot = clock.secFromSlot(slot); - metrics?.recoverDataColumnSidecars.elapsedTimeTillReconstructed.observe(secFromSlot); - - if (dataColumnCache.size === NUMBER_OF_COLUMNS) { + if (blockInput.getAllColumns().length === NUMBER_OF_COLUMNS) { // either gossip or getBlobsV2 resolved availability while we were recovering - return RecoverResult.SuccessLate; + metrics?.recoverDataColumnSidecars.reconstructionResult.inc({ + result: DataColumnReconstructionCode.ReceivedAllDuringReconstruction, + }); + return; } - // We successfully recovered the data columns, update the cache - for (let columnIndex = 0; columnIndex < NUMBER_OF_COLUMNS; columnIndex++) { - if (dataColumnCache.has(columnIndex)) { - // We already have this column - continue; - } - - const sidecar = fullSidecars[columnIndex]; - if (sidecar === undefined) { - throw new Error(`full sidecars is undefined at index ${columnIndex}`); + // Once the node obtains a column through reconstruction, + // the node MUST expose the new column as if it had received it over the network. + // If the node is subscribed to the subnet corresponding to the column, + // it MUST send the reconstructed DataColumnSidecar to its topic mesh neighbors. + // If instead the node is not subscribed to the corresponding subnet, + // it SHOULD still expose the availability of the DataColumnSidecar as part of the gossip emission process. + // After exposing the reconstructed DataColumnSidecar to the network, + // the node MAY delete the DataColumnSidecar if it is not part of the node's custody requirement. + const sidecarsToPublish = []; + for (const columnSidecar of fullSidecars) { + if (!blockInput.hasColumn(columnSidecar.index)) { + blockInput.addColumn({ + blockRootHex: blockInput.blockRootHex, + columnSidecar, + seenTimestampSec: Date.now(), + source: BlockInputSource.recovery, + }); + sidecarsToPublish.push(columnSidecar); } - dataColumnCache.set(columnIndex, {dataColumn: sidecar, dataColumnBytes: null}); - metrics?.peerDas.reconstructedColumns.inc(NUMBER_OF_COLUMNS - partialColumns); } + emitter.emit(ChainEvent.publishDataColumns, sidecarsToPublish); - return RecoverResult.SuccessResolved; + metrics?.recoverDataColumnSidecars.reconstructionResult.inc({result: DataColumnReconstructionCode.Success}); } -export function hasSampledDataColumns(custodyConfig: CustodyConfig, dataColumnCache: DataColumnsCacheMap): boolean { - return ( - dataColumnCache.size >= custodyConfig.sampledColumns.length && - custodyConfig.sampledColumns.reduce((acc, columnIndex) => acc && dataColumnCache.has(columnIndex), true) - ); +export enum DataColumnReconstructionCode { + NotAttemptedAlreadyFull = "DATA_COLUMN_RECONSTRUCTION_NOT_ATTEMPTED_ALREADY_FULL", + NotAttemptedHaveLessThanHalf = "DATA_COLUMN_RECONSTRUCTION_NOT_ATTEMPTED_HAVE_LESS_THAN_HALF", + ReconstructionFailed = "DATA_COLUMN_RECONSTRUCTION_RECONSTRUCTION_FAILED", + ReceivedAllDuringReconstruction = "DATA_COLUMN_RECONSTRUCTION_RECEIVED_ALL_DURING_RECONSTRUCTION", + Success = "DATA_COLUMN_RECONSTRUCTION_SUCCESS", } -export async function getDataColumnsFromExecution( - config: ChainForkConfig, - custodyConfig: CustodyConfig, - executionEngine: IExecutionEngine, - emitter: ChainEventEmitter, - blockCache: BlockInputCacheType, - metrics: Metrics | null -): Promise { - if (blockCache.fork !== ForkName.fulu) { - return false; - } - - if (!blockCache.cachedData) { - // this condition should never get hit... just a sanity check - throw new Error("invalid blockCache"); - } - - if (blockCache.cachedData.fork !== ForkName.fulu) { - return false; - } - - // If already have all columns, exit - if (hasSampledDataColumns(custodyConfig, blockCache.cachedData.dataColumnsCache)) { - return true; - } - - let commitments: undefined | Uint8Array[]; - if (blockCache.block) { - const block = blockCache.block as fulu.SignedBeaconBlock; - commitments = block.message.body.blobKzgCommitments; - } else { - const firstSidecar = blockCache.cachedData.dataColumnsCache.values().next().value; - commitments = firstSidecar?.dataColumn.kzgCommitments; - } - - if (!commitments) { - throw new Error("blockInputCache missing both block and cachedData"); - } - - // Return if block has no blobs - if (commitments.length === 0) { - return true; - } - - // Process KZG commitments into versioned hashes - const versionedHashes: Uint8Array[] = commitments.map(kzgCommitmentToVersionedHash); - - // Get blobs from execution engine - metrics?.peerDas.getBlobsV2Requests.inc(); - const timer = metrics?.peerDas.getBlobsV2RequestDuration.startTimer(); - const blobs = await executionEngine.getBlobs(blockCache.fork, versionedHashes); - timer?.(); - - // Execution engine was unable to find one or more blobs - if (blobs === null) { - return false; - } - metrics?.peerDas.getBlobsV2Responses.inc(); - - // Return if we received all data columns while waiting for getBlobs - if (hasSampledDataColumns(custodyConfig, blockCache.cachedData.dataColumnsCache)) { - return true; - } - - let dataColumnSidecars: fulu.DataColumnSidecars; - const cellsAndProofs = getCellsAndProofs(blobs); - if (blockCache.block) { - dataColumnSidecars = getDataColumnSidecarsFromBlock( - config, - blockCache.block as fulu.SignedBeaconBlock, - cellsAndProofs - ); - } else { - const firstSidecar = blockCache.cachedData.dataColumnsCache.values().next().value; - if (!firstSidecar) { - throw new Error("blockInputCache missing both block and data column sidecar"); - } - dataColumnSidecars = getDataColumnSidecarsFromColumnSidecar(firstSidecar.dataColumn, cellsAndProofs); - } - - // Publish columns if and only if subscribed to them - const sampledColumns = custodyConfig.sampledColumns.map((columnIndex) => dataColumnSidecars[columnIndex]); - - // for columns that we already seen, it will be ignored through `ignoreDuplicatePublishError` gossip option - emitter.emit(ChainEvent.publishDataColumns, sampledColumns); - - for (const column of sampledColumns) { - blockCache.cachedData.dataColumnsCache.set(column.index, {dataColumn: column, dataColumnBytes: null}); - } - - const allDataColumns = getBlockInputDataColumns(blockCache.cachedData.dataColumnsCache, custodyConfig.sampledColumns); - // TODO: Add metrics - // metrics?.syncUnknownBlock.resolveAvailabilitySource.inc({source: BlockInputAvailabilitySource.GOSSIP}); - const blockData: BlockInputDataColumns = { - fork: blockCache.cachedData.fork, - ...allDataColumns, - dataColumnsSource: DataColumnsSource.engine, - }; - const partialColumns = blockCache.cachedData.dataColumnsCache.size; - blockCache.cachedData.resolveAvailability(blockData); - metrics?.dataColumns.bySource.inc({source: DataColumnsSource.engine}, NUMBER_OF_COLUMNS - partialColumns); - - if (blockCache.block !== undefined) { - const blockInput = getBlockInput.availableData(config, blockCache.block, BlockSource.gossip, blockData); - - blockCache.resolveBlockInput(blockInput); - } +type DataColumnReconstructionErrorType = { + code: + | DataColumnReconstructionCode.NotAttemptedHaveLessThanHalf + | DataColumnReconstructionCode.ReceivedAllDuringReconstruction + | DataColumnReconstructionCode.ReconstructionFailed; +}; - return true; -} +export class DataColumnReconstructionError extends LodestarError {} diff --git a/packages/beacon-node/src/util/execution.ts b/packages/beacon-node/src/util/execution.ts new file mode 100644 index 000000000000..ba3e484fab11 --- /dev/null +++ b/packages/beacon-node/src/util/execution.ts @@ -0,0 +1,102 @@ +import {ChainForkConfig} from "@lodestar/config"; +import { + getCellsAndProofs, + getDataColumnSidecarsFromBlock, + getDataColumnSidecarsFromColumnSidecar, +} from "./dataColumns.js"; +import {IExecutionEngine} from "../execution/index.js"; +import {ChainEvent, ChainEventEmitter} from "../chain/emitter.js"; +import {BlockInputSource, IBlockInput} from "../chain/blocks/blockInput/types.js"; +import {Metrics} from "../metrics/index.js"; +import {fulu} from "@lodestar/types"; +import {isBlockInputColumns} from "../chain/blocks/blockInput/blockInput.js"; +import {ForkPostFulu} from "@lodestar/params"; +import {BLOB_AND_PROOF_V2_RPC_BYTES} from "../execution/engine/types.js"; + +/** + * Post fulu, call getBlobsV2 from execution engine once per slot whenever we see either beacon_block or data_column_sidecar gossip message + */ +export async function getDataColumnSidecarsFromExecution( + config: ChainForkConfig, + executionEngine: IExecutionEngine, + emitter: ChainEventEmitter, + blockInput: IBlockInput, + metrics: Metrics | null, + blobAndProofBuffers?: Uint8Array[] +): Promise { + // If its not a column block input, exit + if (!isBlockInputColumns(blockInput)) { + return; + } + + // If already have all columns, exit + if (blockInput.hasAllData()) { + return; + } + + const versionedHashes = blockInput.getVersionedHashes(); + + // If there are no blobs in this block, exit + if (versionedHashes.length === 0) { + return; + } + + // Get blobs from execution engine + metrics?.peerDas.getBlobsV2Requests.inc(); + const timer = metrics?.peerDas.getBlobsV2RequestDuration.startTimer(); + if (blobAndProofBuffers) { + for (let i = 0; i < versionedHashes.length; i++) { + if (blobAndProofBuffers[i] === undefined) { + blobAndProofBuffers[i] = new Uint8Array(BLOB_AND_PROOF_V2_RPC_BYTES); + } + } + } + const blobs = await executionEngine.getBlobs( + blockInput.forkName as ForkPostFulu, + versionedHashes, + blobAndProofBuffers + ); + timer?.(); + + // Execution engine was unable to find one or more blobs + if (blobs === null) { + return; + } + metrics?.peerDas.getBlobsV2Responses.inc(); + + // Return if we received all data columns while waiting for getBlobs + if (blockInput.hasAllData()) { + return; + } + + let dataColumnSidecars: fulu.DataColumnSidecars; + const cellsAndProofs = await getCellsAndProofs(blobs); + if (blockInput.hasBlock()) { + dataColumnSidecars = getDataColumnSidecarsFromBlock( + config, + blockInput.getBlock() as fulu.SignedBeaconBlock, + cellsAndProofs + ); + } else { + const firstSidecar = blockInput.getAllColumns()[0]; + dataColumnSidecars = getDataColumnSidecarsFromColumnSidecar(firstSidecar, cellsAndProofs); + } + + // Publish columns if and only if subscribed to them + const previouslyMissingColumns = blockInput.getMissingSampledColumnMeta().missing; + const sampledColumns = previouslyMissingColumns.map((columnIndex) => dataColumnSidecars[columnIndex]); + + // for columns that we already seen, it will be ignored through `ignoreDuplicatePublishError` gossip option + emitter.emit(ChainEvent.publishDataColumns, sampledColumns); + + // add all sampled columns to the block input, even if we didn't sample them + const seenTimestampSec = Date.now() / 1000; + for (const columnSidecar of sampledColumns) { + blockInput.addColumn( + {columnSidecar, blockRootHex: blockInput.blockRootHex, source: BlockInputSource.engine, seenTimestampSec}, + {throwOnDuplicateAdd: false} // columns may have been added while waiting + ); + } + + metrics?.dataColumns.bySource.inc({source: BlockInputSource.engine}, previouslyMissingColumns.length); +} diff --git a/packages/beacon-node/src/util/sszBytes.ts b/packages/beacon-node/src/util/sszBytes.ts index b8c38e179973..833e161afb48 100644 --- a/packages/beacon-node/src/util/sszBytes.ts +++ b/packages/beacon-node/src/util/sszBytes.ts @@ -46,7 +46,7 @@ export type CommitteeBitsBase64 = string; const VARIABLE_FIELD_OFFSET = 4; const ATTESTATION_BEACON_BLOCK_ROOT_OFFSET = VARIABLE_FIELD_OFFSET + 8 + 8; -const ROOT_SIZE = 32; +export const ROOT_SIZE = 32; const SLOT_SIZE = 8; const COMMITTEE_INDEX_SIZE = 8; const ATTESTATION_DATA_SIZE = 128; diff --git a/packages/beacon-node/src/util/wrapError.ts b/packages/beacon-node/src/util/wrapError.ts index 3b25da203c47..61abb9ed0dad 100644 --- a/packages/beacon-node/src/util/wrapError.ts +++ b/packages/beacon-node/src/util/wrapError.ts @@ -20,3 +20,8 @@ export async function wrapError(promise: Promise): Promise> { return {err: err as Error}; } } + +/** + * Some functions may want to return a result and some warning typed as Error + */ +export type WarnResult = {result: T; warnings: E[] | null}; diff --git a/packages/beacon-node/test/e2e/api/lodestar/lodestar.test.ts b/packages/beacon-node/test/e2e/api/lodestar/lodestar.test.ts index 7501460779f3..310143202dfb 100644 --- a/packages/beacon-node/test/e2e/api/lodestar/lodestar.test.ts +++ b/packages/beacon-node/test/e2e/api/lodestar/lodestar.test.ts @@ -15,23 +15,35 @@ describe("api / impl / validator", () => { describe("getLiveness endpoint", () => { let bn: BeaconNode | undefined; - const SECONDS_PER_SLOT = 2; - const ALTAIR_FORK_EPOCH = 0; - const validatorCount = 8; const restPort = 9596; - const testParams: Pick = { - SECONDS_PER_SLOT: SECONDS_PER_SLOT, - ALTAIR_FORK_EPOCH: ALTAIR_FORK_EPOCH, + const validatorCount = 8; + const ELECTRA_FORK_EPOCH = 0; + const FULU_FORK_EPOCH = 1; + const SECONDS_PER_SLOT = 2; + const testParams: Partial = { + SECONDS_PER_SLOT, + ALTAIR_FORK_EPOCH: ELECTRA_FORK_EPOCH, + BELLATRIX_FORK_EPOCH: ELECTRA_FORK_EPOCH, + CAPELLA_FORK_EPOCH: ELECTRA_FORK_EPOCH, + DENEB_FORK_EPOCH: ELECTRA_FORK_EPOCH, + ELECTRA_FORK_EPOCH: ELECTRA_FORK_EPOCH, + FULU_FORK_EPOCH: FULU_FORK_EPOCH, + BLOB_SCHEDULE: [ + { + EPOCH: 1, + MAX_BLOBS_PER_BLOCK: 3, + }, + ], }; const genesisSlotsDelay = 5; - const timeout = (SLOTS_PER_EPOCH + genesisSlotsDelay) * testParams.SECONDS_PER_SLOT * 1000; + const timeout = (SLOTS_PER_EPOCH + genesisSlotsDelay) * SECONDS_PER_SLOT * 1000; afterEach(async () => { if (bn) await bn.close(); }); it("Should return validator indices that are live", async () => { - const chainConfig: ChainConfig = {...chainConfigDef, SECONDS_PER_SLOT, ALTAIR_FORK_EPOCH}; + const chainConfig: ChainConfig = {...chainConfigDef, ...testParams}; const genesisValidatorsRoot = Buffer.alloc(32, 0xaa); const config = createBeaconConfig(chainConfig, genesisValidatorsRoot); @@ -72,7 +84,7 @@ describe("api / impl / validator", () => { }); it("Should return only for previous, current and next epoch", async () => { - const chainConfig: ChainConfig = {...chainConfigDef, SECONDS_PER_SLOT, ALTAIR_FORK_EPOCH}; + const chainConfig: ChainConfig = {...chainConfigDef, ...testParams}; const genesisValidatorsRoot = Buffer.alloc(32, 0xaa); const config = createBeaconConfig(chainConfig, genesisValidatorsRoot); diff --git a/packages/beacon-node/test/e2e/chain/proposerBoostReorg.test.ts b/packages/beacon-node/test/e2e/chain/proposerBoostReorg.test.ts index 70f2d87e2456..313c9bc718ee 100644 --- a/packages/beacon-node/test/e2e/chain/proposerBoostReorg.test.ts +++ b/packages/beacon-node/test/e2e/chain/proposerBoostReorg.test.ts @@ -16,12 +16,27 @@ describe("proposer boost reorg", () => { vi.setConfig({testTimeout: 60000}); const validatorCount = 8; - const testParams: Pick = { - SECONDS_PER_SLOT: 2, + const ELECTRA_FORK_EPOCH = 0; + const FULU_FORK_EPOCH = 1; + const SECONDS_PER_SLOT = 2; + const testParams: Partial = { + SECONDS_PER_SLOT, + ALTAIR_FORK_EPOCH: ELECTRA_FORK_EPOCH, + BELLATRIX_FORK_EPOCH: ELECTRA_FORK_EPOCH, + CAPELLA_FORK_EPOCH: ELECTRA_FORK_EPOCH, + DENEB_FORK_EPOCH: ELECTRA_FORK_EPOCH, + ELECTRA_FORK_EPOCH: ELECTRA_FORK_EPOCH, + FULU_FORK_EPOCH: FULU_FORK_EPOCH, // need this to make block `reorgSlot - 1` strong enough REORG_PARENT_WEIGHT_THRESHOLD: 80, // need this to make block `reorgSlot + 1` to become the head PROPOSER_SCORE_BOOST: 120, + BLOB_SCHEDULE: [ + { + EPOCH: 1, + MAX_BLOBS_PER_BLOCK: 3, + }, + ], }; const afterEachCallbacks: (() => Promise | void)[] = []; @@ -46,14 +61,14 @@ describe("proposer boost reorg", () => { it(`should reorg a late block at slot ${reorgSlot}`, async () => { // the node needs time to transpile/initialize bls worker threads const genesisSlotsDelay = 7; - const genesisTime = Math.floor(Date.now() / 1000) + genesisSlotsDelay * testParams.SECONDS_PER_SLOT; + const genesisTime = Math.floor(Date.now() / 1000) + genesisSlotsDelay * SECONDS_PER_SLOT; const testLoggerOpts: TestLoggerOpts = { level: LogLevel.debug, timestampFormat: { format: TimestampFormatCode.EpochSlot, genesisTime, slotsPerEpoch: SLOTS_PER_EPOCH, - secondsPerSlot: testParams.SECONDS_PER_SLOT, + secondsPerSlot: SECONDS_PER_SLOT, }, }; const logger = testLogger("BeaconNode", testLoggerOpts); diff --git a/packages/beacon-node/test/e2e/chain/stateCache/nHistoricalStates.test.ts b/packages/beacon-node/test/e2e/chain/stateCache/nHistoricalStates.test.ts index 875e3fe13b03..c8fb59ad68b5 100644 --- a/packages/beacon-node/test/e2e/chain/stateCache/nHistoricalStates.test.ts +++ b/packages/beacon-node/test/e2e/chain/stateCache/nHistoricalStates.test.ts @@ -23,8 +23,23 @@ describe("regen/reload states with n-historical states configuration", () => { vi.setConfig({testTimeout: 96_000}); const validatorCount = 8; - const testParams: Pick = { - SECONDS_PER_SLOT: 2, + const ELECTRA_FORK_EPOCH = 0; + const FULU_FORK_EPOCH = 1; + const SECONDS_PER_SLOT = 2; + const testParams: Partial = { + SECONDS_PER_SLOT, + ALTAIR_FORK_EPOCH: ELECTRA_FORK_EPOCH, + BELLATRIX_FORK_EPOCH: ELECTRA_FORK_EPOCH, + CAPELLA_FORK_EPOCH: ELECTRA_FORK_EPOCH, + DENEB_FORK_EPOCH: ELECTRA_FORK_EPOCH, + ELECTRA_FORK_EPOCH: ELECTRA_FORK_EPOCH, + FULU_FORK_EPOCH: FULU_FORK_EPOCH, + BLOB_SCHEDULE: [ + { + EPOCH: 1, + MAX_BLOBS_PER_BLOCK: 3, + }, + ], }; const afterEachCallbacks: (() => Promise | void)[] = []; @@ -269,14 +284,14 @@ describe("regen/reload states with n-historical states configuration", () => { wrappedIt(`${name} reorgedSlot=${reorgedSlot} reorgDistance=${reorgDistance}`, async () => { // the node needs time to transpile/initialize bls worker threads const genesisSlotsDelay = 7; - const genesisTime = Math.floor(Date.now() / 1000) + genesisSlotsDelay * testParams.SECONDS_PER_SLOT; + const genesisTime = Math.floor(Date.now() / 1000) + genesisSlotsDelay * SECONDS_PER_SLOT; const testLoggerOpts: TestLoggerOpts = { level: LogLevel.debug, timestampFormat: { format: TimestampFormatCode.EpochSlot, genesisTime, slotsPerEpoch: SLOTS_PER_EPOCH, - secondsPerSlot: testParams.SECONDS_PER_SLOT, + secondsPerSlot: SECONDS_PER_SLOT, }, }; @@ -354,7 +369,7 @@ describe("regen/reload states with n-historical states configuration", () => { waitForEvent( bn.chain.emitter, ChainEvent.checkpoint, - (cpSlot + genesisSlotsDelay + 1) * testParams.SECONDS_PER_SLOT * 1000, + (cpSlot + genesisSlotsDelay + 1) * SECONDS_PER_SLOT * 1000, (cp) => cp.epoch === cpEpoch ) ) @@ -377,7 +392,7 @@ describe("regen/reload states with n-historical states configuration", () => { bn.chain.emitter, routes.events.EventType.chainReorg, // reorged event happens at reorgedSlot + 1 - (reorgedSlot + 1 - cpSlot + 1) * testParams.SECONDS_PER_SLOT * 1000, + (reorgedSlot + 1 - cpSlot + 1) * SECONDS_PER_SLOT * 1000, (reorgData) => reorgData.slot === reorgedSlot + 1 ) ) diff --git a/packages/beacon-node/test/e2e/network/onWorker/dataSerialization.test.ts b/packages/beacon-node/test/e2e/network/onWorker/dataSerialization.test.ts index c247aeb1da7f..ae903cfc550f 100644 --- a/packages/beacon-node/test/e2e/network/onWorker/dataSerialization.test.ts +++ b/packages/beacon-node/test/e2e/network/onWorker/dataSerialization.test.ts @@ -5,7 +5,6 @@ import {config} from "@lodestar/config/default"; import {ForkName} from "@lodestar/params"; import {ssz} from "@lodestar/types"; import {afterAll, beforeAll, describe, expect, it} from "vitest"; -import {BlockInput, BlockInputType, BlockSource, DataColumnsSource} from "../../../../src/chain/blocks/types.js"; import {ZERO_HASH, ZERO_HASH_HEX} from "../../../../src/constants/constants.js"; import {ReqRespBridgeEventData} from "../../../../src/network/core/events.js"; import {ReqRespBridgeEvent} from "../../../../src/network/core/events.js"; @@ -79,22 +78,6 @@ describe("data serialization through worker boundary", () => { request: {method: ReqRespMethod.Status, body: statusZero}, peer: getValidPeerId(), }, - [NetworkEvent.unknownBlockParent]: { - blockInput: { - type: BlockInputType.preData, - block: ssz.capella.SignedBeaconBlock.defaultValue(), - source: BlockSource.gossip, - }, - peer, - }, - [NetworkEvent.unknownBlock]: { - rootHex: ZERO_HASH_HEX, - peer, - }, - [NetworkEvent.unknownBlockInput]: { - blockInput: getEmptyBlockInput(), - peer, - }, [NetworkEvent.pendingGossipsubMessage]: { topic: {type: GossipType.beacon_block, boundary: {fork: ForkName.altair, epoch: config.ALTAIR_FORK_EPOCH}}, msg: { @@ -253,18 +236,3 @@ describe("data serialization through worker boundary", () => { }); type Resolves> = T extends Promise ? (U extends void ? null : U) : never; - -function getEmptyBlockInput(): BlockInput { - // cannot return BlockInputType.dataPromise because it cannot be cloned through worker boundary - return { - block: ssz.fulu.SignedBeaconBlock.defaultValue(), - source: BlockSource.gossip, - type: BlockInputType.availableData, - blockData: { - fork: ForkName.fulu, - dataColumns: ssz.fulu.DataColumnSidecars.defaultValue(), - dataColumnsBytes: [], - dataColumnsSource: DataColumnsSource.gossip, - }, - }; -} diff --git a/packages/beacon-node/test/e2e/sync/unknownBlockSync.test.ts b/packages/beacon-node/test/e2e/sync/unknownBlockSync.test.ts index 4dff2c581878..5f812f9d5d07 100644 --- a/packages/beacon-node/test/e2e/sync/unknownBlockSync.test.ts +++ b/packages/beacon-node/test/e2e/sync/unknownBlockSync.test.ts @@ -2,13 +2,13 @@ import {fromHexString} from "@chainsafe/ssz"; import {routes} from "@lodestar/api"; import {EventData, EventType} from "@lodestar/api/lib/beacon/routes/events.js"; import {ChainConfig} from "@lodestar/config"; -import {config} from "@lodestar/config/default"; import {TimestampFormatCode} from "@lodestar/logger"; import {SLOTS_PER_EPOCH} from "@lodestar/params"; import {afterEach, describe, it, vi} from "vitest"; -import {BlockSource, getBlockInput} from "../../../src/chain/blocks/types.js"; +import {BlockInputPreData} from "../../../src/chain/blocks/blockInput/blockInput.js"; +import {BlockInputSource} from "../../../src/chain/blocks/blockInput/types.js"; +import {ChainEvent} from "../../../src/chain/emitter.js"; import {BlockError, BlockErrorCode} from "../../../src/chain/errors/index.js"; -import {NetworkEvent} from "../../../src/network/index.js"; import {INTEROP_BLOCK_HASH} from "../../../src/node/utils/interop/state.js"; import {waitForEvent} from "../../utils/events/resolver.js"; import {LogLevel, TestLoggerOpts, testLogger} from "../../utils/logger.js"; @@ -47,14 +47,14 @@ describe("sync / unknown block sync for fulu", () => { } }); - const testCases: {id: string; event: NetworkEvent}[] = [ + const testCases: {id: string; event: ChainEvent}[] = [ { id: "should do an unknown block parent sync from another BN", - event: NetworkEvent.unknownBlockParent, + event: ChainEvent.unknownParent, }, { id: "should do an unknown block sync from another BN", - event: NetworkEvent.unknownBlock, + event: ChainEvent.unknownBlockRoot, }, // TODO: new event postfulu for unknownBlockInput ]; @@ -144,27 +144,36 @@ describe("sync / unknown block sync for fulu", () => { await connected; loggerNodeA.info("Node A connected to Node B"); - const headInput = getBlockInput.preData(config, head, BlockSource.gossip); + const headInput = BlockInputPreData.createFromBlock({ + block: head, + blockRootHex: headSummary.blockRoot, + source: BlockInputSource.gossip, + seenTimestampSec: Math.floor(Date.now() / 1000), + forkName: bn.chain.config.getForkName(head.message.slot), + daOutOfRange: false, + }); switch (event) { - case NetworkEvent.unknownBlockParent: + case ChainEvent.unknownParent: await bn2.chain.processBlock(headInput).catch((e) => { - loggerNodeB.info("Error processing block", {slot: headInput.block.message.slot, code: e.type.code}); + loggerNodeB.info("Error processing block", {slot: headInput.slot, code: e.type.code}); if (e instanceof BlockError && e.type.code === BlockErrorCode.PARENT_UNKNOWN) { // Expected - bn2.network.events.emit(NetworkEvent.unknownBlockParent, { + bn2.chain.emitter.emit(ChainEvent.unknownParent, { blockInput: headInput, peer: bn2.network.peerId.toString(), + source: BlockInputSource.gossip, }); } else { throw e; } }); break; - case NetworkEvent.unknownBlock: - bn2.network.events.emit(NetworkEvent.unknownBlock, { + case ChainEvent.unknownBlockRoot: + bn2.chain.emitter.emit(ChainEvent.unknownBlockRoot, { rootHex: headSummary.blockRoot, peer: bn2.network.peerId.toString(), + source: BlockInputSource.gossip, }); break; default: diff --git a/packages/beacon-node/test/perf/chain/verifyImportBlocks.test.ts b/packages/beacon-node/test/perf/chain/verifyImportBlocks.test.ts index 0a37f6c6bf8d..ec62707ee936 100644 --- a/packages/beacon-node/test/perf/chain/verifyImportBlocks.test.ts +++ b/packages/beacon-node/test/perf/chain/verifyImportBlocks.test.ts @@ -3,12 +3,14 @@ import {generateKeyPair} from "@libp2p/crypto/keys"; import {config} from "@lodestar/config/default"; import {LevelDbController} from "@lodestar/db"; import {SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY, SLOTS_PER_EPOCH} from "@lodestar/params"; -import {sleep} from "@lodestar/utils"; +import {sleep, toHex} from "@lodestar/utils"; import {defaultOptions as defaultValidatorOptions} from "@lodestar/validator"; import {rangeSyncTest} from "../../../../state-transition/test/perf/params.js"; import {beforeValue} from "../../../../state-transition/test/utils/beforeValueBenchmark.js"; import {getNetworkCachedBlock, getNetworkCachedState} from "../../../../state-transition/test/utils/testFileCache.js"; -import {AttestationImportOpt, BlockSource, getBlockInput} from "../../../src/chain/blocks/types.js"; +import {BlockInputPreData} from "../../../src/chain/blocks/blockInput/blockInput.js"; +import {BlockInputSource} from "../../../src/chain/blocks/blockInput/types.js"; +import {AttestationImportOpt} from "../../../src/chain/blocks/types.js"; import {BeaconChain} from "../../../src/chain/index.js"; import {Eth1ForBlockProductionDisabled} from "../../../src/eth1/index.js"; import {ExecutionEngineDisabled} from "../../../src/execution/engine/index.js"; @@ -110,7 +112,20 @@ describe.skip("verify+import blocks - range sync perf test", () => { return chain; }, fn: async (chain) => { - const blocksImport = blocks.value.map((block) => getBlockInput.preData(chain.config, block, BlockSource.byRange)); + const blocksImport = blocks.value.map((block) => { + const blockRootHex = toHex( + chain.config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message) + ); + const forkName = chain.config.getForkName(block.message.slot); + return BlockInputPreData.createFromBlock({ + block, + blockRootHex, + forkName, + daOutOfRange: true, + source: BlockInputSource.byRange, + seenTimestampSec: Math.floor(Date.now() / 1000), + }); + }); await chain.processChainSegment(blocksImport, { // Only skip importing attestations for finalized sync. For head sync attestation are valuable. diff --git a/packages/beacon-node/test/spec/presets/fork_choice.test.ts b/packages/beacon-node/test/spec/presets/fork_choice.test.ts index 3c4b5249d64f..a80fc26f2c7f 100644 --- a/packages/beacon-node/test/spec/presets/fork_choice.test.ts +++ b/packages/beacon-node/test/spec/presets/fork_choice.test.ts @@ -3,7 +3,7 @@ import {toHexString} from "@chainsafe/ssz"; import {generateKeyPair} from "@libp2p/crypto/keys"; import {createBeaconConfig} from "@lodestar/config"; import {CheckpointWithHex, ForkChoice} from "@lodestar/fork-choice"; -import {ACTIVE_PRESET, ForkName, ForkSeq} from "@lodestar/params"; +import {ACTIVE_PRESET, ForkPostDeneb, ForkPostFulu, ForkSeq} from "@lodestar/params"; import {InputType} from "@lodestar/spec-test-util"; import {BeaconStateAllForks, isExecutionStateType, signedBlockToSignedHeader} from "@lodestar/state-transition"; import { @@ -18,31 +18,25 @@ import { ssz, sszTypesFor, } from "@lodestar/types"; -import {bnToNum, fromHex} from "@lodestar/utils"; +import {bnToNum, fromHex, toHex} from "@lodestar/utils"; import {expect} from "vitest"; import { - AttestationImportOpt, - BlobSidecarValidation, - BlobsSource, - BlockInputDataColumns, - BlockSource, - DataColumnsSource, - getBlockInput, -} from "../../../src/chain/blocks/types.js"; + BlockInputBlobs, + BlockInputColumns, + BlockInputPreData, + BlockInputSource, +} from "../../../src/chain/blocks/blockInput/index.js"; +import {AttestationImportOpt, BlobSidecarValidation} from "../../../src/chain/blocks/types.js"; import {BeaconChain, ChainEvent} from "../../../src/chain/index.js"; import {defaultChainOptions} from "../../../src/chain/options.js"; -import { - verifyDataColumnSidecar, - verifyDataColumnSidecarInclusionProof, - verifyDataColumnSidecarKzgProofs, -} from "../../../src/chain/validation/dataColumnSidecar.js"; +import {validateBlockDataColumnSidecars} from "../../../src/chain/validation/dataColumnSidecar.js"; import {ZERO_HASH_HEX} from "../../../src/constants/constants.js"; import {Eth1ForBlockProductionDisabled} from "../../../src/eth1/index.js"; import {PowMergeBlock} from "../../../src/eth1/interface.js"; import {ExecutionPayloadStatus} from "../../../src/execution/engine/interface.js"; import {ExecutionEngineMockBackend} from "../../../src/execution/engine/mock.js"; import {getExecutionEngineFromBackend} from "../../../src/execution/index.js"; -import {computeInclusionProof} from "../../../src/util/blobs.js"; +import {computePreFuluKzgCommitmentsInclusionProof} from "../../../src/util/blobs.js"; import {ClockEvent} from "../../../src/util/clock.js"; import {ClockStopped} from "../../mocks/clock.js"; import {getMockedBeaconDb} from "../../mocks/mockedBeaconDb.js"; @@ -69,7 +63,7 @@ const forkChoiceTest = (opts: {onlyPredefinedResponses: boolean}): TestRunnerFn => (fork) => { return { - testFunction: async (testcase) => { + testFunction: async (testcase, _directoryName, testCaseName) => { const {steps, anchorState} = testcase; const currentSlot = anchorState.slot; const config = getConfig(fork); @@ -205,6 +199,7 @@ const forkChoiceTest = const blockRoot = config .getForkTypes(signedBlock.message.slot) .BeaconBlock.hashTreeRoot(signedBlock.message); + const blockRootHex = toHex(blockRoot); logger.debug(`Step ${i}/${stepsLen} block`, { slot, id: step.block, @@ -222,25 +217,40 @@ const forkChoiceTest = columns = []; } + await validateBlockDataColumnSidecars( + slot, + blockRoot, + (signedBlock as SignedBeaconBlock).message.body.blobKzgCommitments.length, + columns + ); + + blockImport = BlockInputColumns.createFromBlock({ + forkName: fork, + block: signedBlock as SignedBeaconBlock, + blockRootHex, + custodyColumns: + // in most test case instances we do not want to assign any custody as there are no columns provided + // with the test case. For on_block_peerdas__not_available the exact situation that is being tested + // is no availability so block processing should fail. For this one test case add some default + // custody so that the await will fail in verifyBlocksDataAvailability.ts + testCaseName !== "on_block_peerdas__not_available" ? columns.map((c) => c.index) : [2, 4, 6, 8], + sampledColumns: + testCaseName !== "on_block_peerdas__not_available" + ? columns.map((c) => c.index) + : [2, 4, 6, 8, 10, 12, 14, 16], + source: BlockInputSource.gossip, + seenTimestampSec: 0, + daOutOfRange: false, + }); for (const column of columns) { - verifyDataColumnSidecar(column); - verifyDataColumnSidecarInclusionProof(column); - await verifyDataColumnSidecarKzgProofs( - column.kzgCommitments, - Array.from({length: column.column.length}, () => column.index), - column.column, - column.kzgProofs - ); + blockImport.addColumn({ + blockRootHex, + columnSidecar: column, + source: BlockInputSource.gossip, + seenTimestampSec: 0, + }); } - - const blockData = { - fork, - dataColumns: columns, - dataColumnsBytes: columns.map(() => null), - dataColumnsSource: DataColumnsSource.gossip, - } as BlockInputDataColumns; - - blockImport = getBlockInput.availableData(config, signedBlock, BlockSource.gossip, blockData); + // getBlockInput.availableData(config, signedBlock, BlockSource.gossip, blockData); } else if (forkSeq >= ForkSeq.deneb && forkSeq < ForkSeq.fulu) { if (blobs === undefined) { // seems like some deneb tests don't have this and we are supposed to assume empty @@ -266,17 +276,39 @@ const forkChoiceTest = kzgCommitment: commitments[index], kzgProof: (proofs ?? [])[index], signedBlockHeader: signedBlockToSignedHeader(config, signedBlock), - kzgCommitmentInclusionProof: computeInclusionProof(fork, signedBlock.message.body, index), + kzgCommitmentInclusionProof: computePreFuluKzgCommitmentsInclusionProof( + fork, + signedBlock.message.body, + index + ), }; }); - blockImport = getBlockInput.availableData(config, signedBlock, BlockSource.gossip, { - fork: ForkName.deneb, - blobs: blobSidecars, - blobsSource: BlobsSource.gossip, + blockImport = BlockInputBlobs.createFromBlock({ + forkName: fork, + block: signedBlock as SignedBeaconBlock, + blockRootHex, + source: BlockInputSource.gossip, + seenTimestampSec: 0, + daOutOfRange: false, }); + for (const blob of blobSidecars) { + blockImport.addBlob({ + blockRootHex, + blobSidecar: blob, + source: BlockInputSource.gossip, + seenTimestampSec: 0, + }); + } } else { - blockImport = getBlockInput.preData(config, signedBlock, BlockSource.gossip); + blockImport = BlockInputPreData.createFromBlock({ + forkName: fork, + block: signedBlock as SignedBeaconBlock, + blockRootHex, + source: BlockInputSource.gossip, + seenTimestampSec: 0, + daOutOfRange: false, + }); } await chain.processBlock(blockImport, { @@ -465,7 +497,8 @@ const forkChoiceTest = attesterSlashings, }; }, - timeout: 10000, + // timeout needs to be set longer than BLOB_AVAILABILITY_TIMEOUT so that on_block_peerdas__not_available fails + timeout: 15000, expectFunc: () => {}, // Do not manually skip tests here, do it in packages/beacon-node/test/spec/presets/index.test.ts // EXCEPTION : this test skipped here because prefix match can't be don't for this particular test diff --git a/packages/beacon-node/test/spec/utils/types.ts b/packages/beacon-node/test/spec/utils/types.ts index 00bebffd7d7e..f148674c7bca 100644 --- a/packages/beacon-node/test/spec/utils/types.ts +++ b/packages/beacon-node/test/spec/utils/types.ts @@ -11,7 +11,7 @@ export type TestRunnerFn = ( testHandler: string, testSuite: string ) => { - testFunction: (testCase: TestCase, directoryName: string) => Result | Promise; + testFunction: (testCase: TestCase, directoryName: string, testCaseName: string) => Result | Promise; options: Partial>; }; diff --git a/packages/beacon-node/test/unit/chain/blocks/blockInput.test.ts b/packages/beacon-node/test/unit/chain/blocks/blockInput.test.ts index d4b44ee9d127..f3dc93538dba 100644 --- a/packages/beacon-node/test/unit/chain/blocks/blockInput.test.ts +++ b/packages/beacon-node/test/unit/chain/blocks/blockInput.test.ts @@ -135,10 +135,8 @@ describe("BlockInput", () => { blockRootHex: rootHex, daOutOfRange: false, forkName: ForkName.deneb, - source: { - source: BlockInputSource.gossip, - seenTimestampSec: Date.now(), - }, + source: BlockInputSource.gossip, + seenTimestampSec: Date.now(), } as AddBlock & CreateBlockInputMeta); for (const blobSidecar of blobSidecars) { testArray.push({ diff --git a/packages/beacon-node/test/unit/chain/blocks/verifyBlocksSanityChecks.test.ts b/packages/beacon-node/test/unit/chain/blocks/verifyBlocksSanityChecks.test.ts index abd81e7b3e29..02a519ce1298 100644 --- a/packages/beacon-node/test/unit/chain/blocks/verifyBlocksSanityChecks.test.ts +++ b/packages/beacon-node/test/unit/chain/blocks/verifyBlocksSanityChecks.test.ts @@ -5,7 +5,8 @@ import {SignedBeaconBlock, Slot, ssz} from "@lodestar/types"; import {toHex} from "@lodestar/utils"; import {toRootHex} from "@lodestar/utils"; import {beforeEach, describe, expect, it} from "vitest"; -import {BlockSource, getBlockInput} from "../../../../src/chain/blocks/types.js"; +import {BlockInputPreData} from "../../../../src/chain/blocks/blockInput/blockInput.js"; +import {BlockInputSource} from "../../../../src/chain/blocks/blockInput/index.js"; import {verifyBlocksSanityChecks as verifyBlocksImportSanityChecks} from "../../../../src/chain/blocks/verifyBlocksSanityChecks.js"; import {BlockErrorCode} from "../../../../src/chain/errors/index.js"; import {IChainOptions} from "../../../../src/chain/options.js"; @@ -131,11 +132,24 @@ function verifyBlocksSanityChecks( ): {relevantBlocks: SignedBeaconBlock[]; parentSlots: Slot[]; parentBlock: ProtoBlock | null} { const {relevantBlocks, parentSlots, parentBlock} = verifyBlocksImportSanityChecks( modules, - blocks.map((block) => getBlockInput.preData(config, block, BlockSource.byRange)), + blocks.map((block) => { + const blockRootHex = toHex( + modules.config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message) + ); + const forkName = modules.config.getForkName(block.message.slot); + return BlockInputPreData.createFromBlock({ + block, + blockRootHex, + forkName, + daOutOfRange: true, + source: BlockInputSource.byRange, + seenTimestampSec: Math.floor(Date.now() / 1000), + }); + }), opts ); return { - relevantBlocks: relevantBlocks.map(({block}) => block), + relevantBlocks: relevantBlocks.map((blockInput) => blockInput.getBlock()), parentSlots, parentBlock, }; diff --git a/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts b/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts index 6c11d9133a7c..68118332e2a4 100644 --- a/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts +++ b/packages/beacon-node/test/unit/chain/seenCache/seenBlockInput.test.ts @@ -1,118 +1,47 @@ -import {createChainForkConfig, defaultChainConfig} from "@lodestar/config"; -import {ForkName, ForkPostCapella, ForkPostDeneb} from "@lodestar/params"; -import {computeStartSlotAtEpoch, signedBlockToSignedHeader} from "@lodestar/state-transition"; -import {SignedBeaconBlock, deneb, ssz} from "@lodestar/types"; +import {generateKeyPair} from "@libp2p/crypto/keys"; +import {ForkName, ForkPostFulu} from "@lodestar/params"; +import {signedBlockToSignedHeader} from "@lodestar/state-transition"; +import {SignedBeaconBlock} from "@lodestar/types"; import {toRootHex} from "@lodestar/utils"; import {beforeEach, describe, expect, it} from "vitest"; import { BlockInputSource, IBlockInput, isBlockInputBlobs, + isBlockInputColumns, isBlockInputPreDeneb, } from "../../../../src/chain/blocks/blockInput/index.js"; import {ChainEvent, ChainEventEmitter} from "../../../../src/chain/emitter.js"; -import {SeenBlockInputCache} from "../../../../src/chain/seenCache/seenBlockInput.js"; +import {SeenBlockInput} from "../../../../src/chain/seenCache/seenGossipBlockInput.js"; +import {computeNodeIdFromPrivateKey} from "../../../../src/network/subnets/index.js"; import {Clock} from "../../../../src/util/clock.js"; +import {CustodyConfig} from "../../../../src/util/dataColumns.js"; +import { + config, + generateBlock, + generateBlockWithBlobSidecars, + generateChainOfBlocks, +} from "../../../utils/blocksAndData.js"; import {testLogger} from "../../../utils/logger.js"; -describe("SeenBlockInputCache", () => { - let cache: SeenBlockInputCache; +describe("SeenBlockInputCache", async () => { + let cache: SeenBlockInput; let abortController: AbortController; let chainEvents: ChainEventEmitter; - const CAPELLA_FORK_EPOCH = 0; - const DENEB_FORK_EPOCH = 1; - const ELECTRA_FORK_EPOCH = 2; - const FULU_FORK_EPOCH = 3; - const GLOAS_FORK_EPOCH = 4; - const config = createChainForkConfig({ - ...defaultChainConfig, - CAPELLA_FORK_EPOCH, - DENEB_FORK_EPOCH, - ELECTRA_FORK_EPOCH, - FULU_FORK_EPOCH, - GLOAS_FORK_EPOCH, - }); - - const slots: Record = { - capella: computeStartSlotAtEpoch(CAPELLA_FORK_EPOCH), - deneb: computeStartSlotAtEpoch(DENEB_FORK_EPOCH), - electra: computeStartSlotAtEpoch(ELECTRA_FORK_EPOCH), - fulu: computeStartSlotAtEpoch(FULU_FORK_EPOCH), - gloas: computeStartSlotAtEpoch(GLOAS_FORK_EPOCH), - }; - - type BlockTestSet = { - block: SignedBeaconBlock; - blockRoot: Uint8Array; - rootHex: string; - }; - - function buildBlockTestSet(forkName: F): BlockTestSet { - const block = ssz[forkName].SignedBeaconBlock.defaultValue(); - block.message.slot = slots[forkName]; - const blockRoot = ssz[forkName].BeaconBlock.hashTreeRoot(block.message as any); - const rootHex = toRootHex(blockRoot); - return { - block, - blockRoot, - rootHex, - }; - } - - type ParentAndChildBlockTestSet = { - parentBlock: SignedBeaconBlock; - parentBlockRoot: Uint8Array; - parentRootHex: string; - childBlock: SignedBeaconBlock; - childBlockRoot: Uint8Array; - childRootHex: string; - }; - function buildParentAndChildBlockTestSet( - forkName: F - ): ParentAndChildBlockTestSet { - const {block: parentBlock, blockRoot: parentBlockRoot, rootHex: parentRootHex} = buildBlockTestSet(forkName); - const {block: childBlock, blockRoot: childBlockRoot, rootHex: childRootHex} = buildBlockTestSet(forkName); - childBlock.message.slot = parentBlock.message.slot + 1; - childBlock.message.parentRoot = parentBlockRoot; - return { - parentBlock, - parentBlockRoot, - parentRootHex, - childBlock, - childBlockRoot, - childRootHex, - }; - } - - type BlockAndBlobTestSet = BlockTestSet & { - blobSidecar: deneb.BlobSidecar; - }; - function buildBlockAndBlobTestSet(forkName: ForkPostDeneb): BlockAndBlobTestSet { - const {block, blockRoot, rootHex} = buildBlockTestSet(forkName); - const commitment = Buffer.alloc(48, 0x77); - block.message.body.blobKzgCommitments = [commitment]; - const signedBlockHeader = signedBlockToSignedHeader(config, block); - const blobSidecar = ssz[forkName].BlobSidecar.defaultValue(); - blobSidecar.signedBlockHeader = signedBlockHeader; - blobSidecar.kzgCommitment = commitment; - - return { - block, - blockRoot, - rootHex, - blobSidecar, - }; - } - + const privateKey = await generateKeyPair("secp256k1"); + const nodeId = computeNodeIdFromPrivateKey(privateKey); + const custodyConfig = new CustodyConfig({config, nodeId}); const logger = testLogger(); + beforeEach(() => { chainEvents = new ChainEventEmitter(); abortController = new AbortController(); const signal = abortController.signal; const genesisTime = Math.floor(Date.now() / 1000); - cache = new SeenBlockInputCache({ + cache = new SeenBlockInput({ config, + custodyConfig, clock: new Clock({config, genesisTime, signal}), chainEvents, signal, @@ -120,20 +49,24 @@ describe("SeenBlockInputCache", () => { metrics: null, }); }); + describe("has()", () => { it("should return true if in cache", () => { - const {block, rootHex} = buildBlockTestSet(ForkName.capella); + const {block, rootHex} = generateBlock({forkName: ForkName.capella}); cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(cache.has(rootHex)).toBeTruthy(); }); + it("should return false if not in cache", () => { - const {block, blockRoot, rootHex} = buildBlockTestSet(ForkName.capella); + const {block, blockRoot, rootHex} = generateBlock({forkName: ForkName.capella}); cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -144,20 +77,24 @@ describe("SeenBlockInputCache", () => { expect(cache.has(toRootHex(blockRoot))).toBeFalsy(); }); }); + describe("get()", () => { it("should return BlockInput if in cache", () => { - const {block, rootHex} = buildBlockTestSet(ForkName.capella); + const {block, rootHex} = generateBlock({forkName: ForkName.capella}); const blockInput = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(cache.get(rootHex)).toBe(blockInput); }); + it("should return undefined if not in cache", () => { - const {block, blockRoot, rootHex} = buildBlockTestSet(ForkName.capella); + const {block, blockRoot, rootHex} = generateBlock({forkName: ForkName.capella}); const blockInput = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -168,11 +105,13 @@ describe("SeenBlockInputCache", () => { expect(cache.get(toRootHex(blockRoot))).toBeUndefined(); }); }); + describe("remove()", () => { it("should remove a BlockInput", () => { - const {block, rootHex} = buildBlockTestSet(ForkName.capella); + const {block, rootHex} = generateBlock({forkName: ForkName.capella}); const blockInput = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -180,10 +119,12 @@ describe("SeenBlockInputCache", () => { cache.remove(rootHex); expect(cache.get(rootHex)).toBeUndefined(); }); + it("should not throw an error if BlockInput not in cache", () => { - const {block, blockRoot, rootHex} = buildBlockTestSet(ForkName.capella); + const {block, blockRoot, rootHex} = generateBlock({forkName: ForkName.capella}); const blockInput = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -195,11 +136,13 @@ describe("SeenBlockInputCache", () => { expect(cache.has(rootHex)).toBeTruthy(); }); }); + describe("prune()", () => { it("should remove a BlockInput", () => { - const {block, rootHex} = buildBlockTestSet(ForkName.capella); + const {block, rootHex} = generateBlock({forkName: ForkName.capella}); const blockInput = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -207,11 +150,17 @@ describe("SeenBlockInputCache", () => { cache.prune(rootHex); expect(cache.get(rootHex)).toBeUndefined(); }); + it("should remove all ancestors of a BlockInput", () => { - const {parentBlock, parentRootHex, childBlock, childRootHex} = buildParentAndChildBlockTestSet(ForkName.capella); + const blocks = generateChainOfBlocks({forkName: ForkName.capella, count: 2}); + const parentBlock = blocks[0].block; + const parentRootHex = blocks[0].rootHex; + const childBlock = blocks[1].block; + const childRootHex = blocks[1].rootHex; const parentBlockInput = cache.getByBlock({ block: parentBlock, + blockRootHex: parentRootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -219,6 +168,7 @@ describe("SeenBlockInputCache", () => { const childBlockInput = cache.getByBlock({ block: childBlock, + blockRootHex: childRootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -229,6 +179,7 @@ describe("SeenBlockInputCache", () => { expect(cache.get(parentRootHex)).toBeUndefined(); }); }); + describe("onFinalized()", () => { let childRootHex: string; let childBlockInput: IBlockInput; @@ -237,17 +188,17 @@ describe("SeenBlockInputCache", () => { const root = Buffer.alloc(32, 0xff); const rootHex = toRootHex(root); beforeEach(() => { - const { - parentBlock, - parentRootHex: parentRoot, - childBlock, - childRootHex: childRoot, - } = buildParentAndChildBlockTestSet(ForkName.capella); + const blocks = generateChainOfBlocks({forkName: ForkName.capella, count: 2}); + const parentBlock = blocks[0].block; + const parentRoot = blocks[0].rootHex; + const childBlock = blocks[1].block; + const childRoot = blocks[1].rootHex; parentRootHex = parentRoot; childRootHex = childRoot; parentBlockInput = cache.getByBlock({ block: parentBlock, + blockRootHex: parentRootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); @@ -255,23 +206,26 @@ describe("SeenBlockInputCache", () => { childBlockInput = cache.getByBlock({ block: childBlock, + blockRootHex: childRootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(cache.get(childRootHex)).toBe(childBlockInput); }); + it("should remove all BlockInputs in slots before the checkpoint", () => { chainEvents.emit(ChainEvent.forkChoiceFinalized, { - epoch: DENEB_FORK_EPOCH, + epoch: config.DENEB_FORK_EPOCH, root, rootHex, }); expect(cache.get(childRootHex)).toBeUndefined(); expect(cache.get(parentRootHex)).toBeUndefined(); }); + it("should not remove BlockInputs in slots after the checkpoint", () => { chainEvents.emit(ChainEvent.forkChoiceFinalized, { - epoch: CAPELLA_FORK_EPOCH, + epoch: config.CAPELLA_FORK_EPOCH, root, rootHex, }); @@ -279,173 +233,222 @@ describe("SeenBlockInputCache", () => { expect(cache.get(parentRootHex)).toBe(parentBlockInput); }); }); + describe("getByBlock()", () => { it("should return a new BlockInput for a new block root", () => { - const {block, rootHex} = buildBlockTestSet(ForkName.capella); + const {block, rootHex} = generateBlock({forkName: ForkName.capella}); expect(cache.get(rootHex)).toBeUndefined(); const blockInput = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(cache.get(rootHex)).toBe(blockInput); }); + describe("should return the correct type of BlockInput for a given block root", () => { it("should return a BlockInputPreDeneb", () => { - const {block} = buildBlockTestSet(ForkName.capella); + const {block, rootHex} = generateBlock({forkName: ForkName.capella}); const blockInput = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(isBlockInputPreDeneb(blockInput)).toBeTruthy(); }); + it("should return a BlockInputBlobs", () => { - const {block} = buildBlockTestSet(ForkName.deneb); + const {block, rootHex} = generateBlock({forkName: ForkName.deneb}); const blockInput = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(isBlockInputBlobs(blockInput)).toBeTruthy(); }); - // TODO(fulu): need to turn this on once we have custodyConfig available with peerDAS branch - // it("should return a BlockInputColumns", () => { - // const {block} = buildBlockTestSet(ForkName.fulu); - // const blockInput = cache.getByBlock({ - // block, - // source: BlockInputSource.gossip, - // seenTimestampSec: Date.now(), - // }); - // expect(isBlockInputColumns(blockInput)).toBeTruthy(); - // }); + + it("should return a BlockInputColumns", () => { + const {block, rootHex} = generateBlock({forkName: ForkName.fulu}); + const blockInput = cache.getByBlock({ + block, + blockRootHex: rootHex, + source: BlockInputSource.gossip, + seenTimestampSec: Date.now(), + }); + expect(isBlockInputColumns(blockInput)).toBeTruthy(); + }); }); + it("should return the same BlockInput for an existing block root", () => { - const {block, rootHex} = buildBlockTestSet(ForkName.capella); + const {block, rootHex} = generateBlock({forkName: ForkName.capella}); const blockInput1 = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(cache.get(rootHex)).toBe(blockInput1); const blockInput2 = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(blockInput1).toBe(blockInput2); }); + it("should not throw for a BlockInput with an existing block", () => { - const {block, rootHex} = buildBlockTestSet(ForkName.capella); + const {block, rootHex} = generateBlock({forkName: ForkName.capella}); const blockInput = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(() => blockInput.addBlock({ - block, + block: block as SignedBeaconBlock, blockRootHex: rootHex, - source: {source: BlockInputSource.gossip, seenTimestampSec: Date.now()}, + source: BlockInputSource.gossip, + seenTimestampSec: Date.now(), }) ).toThrow(); expect(() => cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }) ).not.toThrow(); }); + it("should return the correct BlockInput for a BlockInput created by blob", () => { - const {block, blobSidecar} = buildBlockAndBlobTestSet(ForkName.deneb); + const {block, blobSidecars, rootHex} = generateBlockWithBlobSidecars({forkName: ForkName.deneb, count: 1}); const blockInput1 = cache.getByBlob({ - blobSidecar, + blobSidecar: blobSidecars[0], + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); const blockInput2 = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(blockInput1).toBe(blockInput2); }); + + it("should return the correct BlockInput for a BlockInput created by column", () => { + // const {block, columnSidecar} = buildBlockAndBlobTestSet(ForkName.fulu); + // const blockInput1 = cache.getByColumn({ + // columnSidecar, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // }); + // const blockInput2 = cache.getByBlock({ + // block, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // }); + // expect(blockInput1).toBe(blockInput2); + }); }); + describe("getByBlob()", () => { it("should return a new BlockInput for a new block root", () => { - const {rootHex, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra); + const {rootHex, blobSidecars} = generateBlockWithBlobSidecars({forkName: ForkName.deneb, count: 1}); expect(cache.get(rootHex)).toBeUndefined(); const blockInput = cache.getByBlob({ - blobSidecar, + blobSidecar: blobSidecars[0], + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(cache.get(rootHex)).toBe(blockInput); }); + it("should return the same BlockInput for an existing block root", () => { - const {rootHex, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra); + const {rootHex, blobSidecars} = generateBlockWithBlobSidecars({forkName: ForkName.deneb, count: 1}); const blockInput1 = cache.getByBlob({ - blobSidecar, + blobSidecar: blobSidecars[0], + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(cache.get(rootHex)).toBe(blockInput1); const blockInput2 = cache.getByBlob({ - blobSidecar, + blobSidecar: blobSidecars[0], + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(blockInput1).toBe(blockInput2); }); + it("should throw if attempting to add a blob to wrong type of BlockInput", () => { - const {block} = buildBlockTestSet(ForkName.capella); + const {block, rootHex} = generateBlock({forkName: ForkName.capella}); const blockInput = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(isBlockInputPreDeneb(blockInput)).toBeTruthy(); - const {blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra); - blobSidecar.signedBlockHeader = signedBlockToSignedHeader(config, block); + const {blobSidecars} = generateBlockWithBlobSidecars({forkName: ForkName.deneb, count: 1}); + blobSidecars[0].signedBlockHeader = signedBlockToSignedHeader(config, block); expect(() => - cache.getByBlob({blobSidecar, source: BlockInputSource.gossip, seenTimestampSec: Date.now()}) + cache.getByBlob({ + blobSidecar: blobSidecars[0], + blockRootHex: rootHex, + source: BlockInputSource.gossip, + seenTimestampSec: Date.now(), + }) ).toThrow(); }); + it("should add blob to an existing BlockInput", () => { - const {block, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra); + const {block, blobSidecars, rootHex} = generateBlockWithBlobSidecars({forkName: ForkName.deneb, count: 1}); const blockInput1 = cache.getByBlock({ block, + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); const blockInput2 = cache.getByBlob({ - blobSidecar, + blobSidecar: blobSidecars[0], + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(blockInput1).toBe(blockInput2); - expect(blockInput2.getBlobs()[0]).toBe(blobSidecar); + expect(blockInput2.getBlobs()[0]).toBe(blobSidecars[0]); }); + it("should not throw for a BlockInput with an existing blob", () => { - const {rootHex, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra); + const {rootHex, blobSidecars} = generateBlockWithBlobSidecars({forkName: ForkName.deneb, count: 1}); expect(cache.get(rootHex)).toBeUndefined(); const blockInput = cache.getByBlob({ - blobSidecar, + blobSidecar: blobSidecars[0], + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }); expect(cache.get(rootHex)).toBe(blockInput); expect(() => blockInput.addBlob({ - blobSidecar, + blobSidecar: blobSidecars[0], source: BlockInputSource.gossip, seenTimestampSec: Date.now(), blockRootHex: rootHex, @@ -453,19 +456,22 @@ describe("SeenBlockInputCache", () => { ).toThrow(); expect(() => cache.getByBlob({ - blobSidecar, + blobSidecar: blobSidecars[0], + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }) ).not.toThrow(); }); + it("should throw for an existing blob with opts.throwGossipErrorIfAlreadyKnown", () => { - const {rootHex, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra); + const {rootHex, blobSidecars} = generateBlockWithBlobSidecars({forkName: ForkName.deneb, count: 1}); expect(cache.get(rootHex)).toBeUndefined(); const blockInput = cache.getByBlob( { - blobSidecar, + blobSidecar: blobSidecars[0], + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }, @@ -475,7 +481,8 @@ describe("SeenBlockInputCache", () => { expect(() => cache.getByBlob( { - blobSidecar, + blobSidecar: blobSidecars[0], + blockRootHex: rootHex, source: BlockInputSource.gossip, seenTimestampSec: Date.now(), }, @@ -484,4 +491,115 @@ describe("SeenBlockInputCache", () => { ).toThrow(); }); }); + + // describe("getByColumn()", () => { + // it("should return a new BlockInput for a new block root", () => { + // const {rootHex, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra); + // expect(cache.get(rootHex)).toBeUndefined(); + // const blockInput = cache.getByBlob({ + // blobSidecar, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // }); + // expect(cache.get(rootHex)).toBe(blockInput); + // }); + // it("should return the same BlockInput for an existing block root", () => { + // const {rootHex, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra); + + // const blockInput1 = cache.getByBlob({ + // blobSidecar, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // }); + // expect(cache.get(rootHex)).toBe(blockInput1); + // const blockInput2 = cache.getByBlob({ + // blobSidecar, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // }); + // expect(blockInput1).toBe(blockInput2); + // }); + // it("should throw if attempting to add a blob to wrong type of BlockInput", () => { + // const {block} = buildBlockTestSet(ForkName.capella); + // const blockInput = cache.getByBlock({ + // block, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // }); + // expect(isBlockInputPreDeneb(blockInput)).toBeTruthy(); + + // const {blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra); + // blobSidecar.signedBlockHeader = signedBlockToSignedHeader(config, block); + // expect(() => + // cache.getByBlob({blobSidecar, source: BlockInputSource.gossip, seenTimestampSec: Date.now()}) + // ).toThrow(); + // }); + // it("should add blob to an existing BlockInput", () => { + // const {block, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra); + + // const blockInput1 = cache.getByBlock({ + // block, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // }); + // const blockInput2 = cache.getByBlob({ + // blobSidecar, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // }); + + // expect(blockInput1).toBe(blockInput2); + // expect(blockInput2.getBlobs()[0]).toBe(blobSidecar); + // }); + // it("should not throw for a BlockInput with an existing blob", () => { + // const {rootHex, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra); + + // expect(cache.get(rootHex)).toBeUndefined(); + // const blockInput = cache.getByBlob({ + // blobSidecar, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // }); + // expect(cache.get(rootHex)).toBe(blockInput); + // expect(() => + // blockInput.addBlob({ + // blobSidecar, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // blockRootHex: rootHex, + // }) + // ).toThrow(); + // expect(() => + // cache.getByBlob({ + // blobSidecar, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // }) + // ).not.toThrow(); + // }); + // it("should throw for an existing blob with opts.throwGossipErrorIfAlreadyKnown", () => { + // const {rootHex, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra); + + // expect(cache.get(rootHex)).toBeUndefined(); + // const blockInput = cache.getByBlob( + // { + // blobSidecar, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // }, + // {throwErrorIfAlreadyKnown: true} + // ); + // expect(cache.get(rootHex)).toBe(blockInput); + // expect(() => + // cache.getByBlob( + // { + // blobSidecar, + // source: BlockInputSource.gossip, + // seenTimestampSec: Date.now(), + // }, + // {throwErrorIfAlreadyKnown: true} + // ) + // ).toThrow(); + // }); + // }); }); diff --git a/packages/beacon-node/test/unit/chain/seenCache/seenGossipBlockInput.test.ts b/packages/beacon-node/test/unit/chain/seenCache/seenGossipBlockInput.test.ts deleted file mode 100644 index 36ca1bdc9f92..000000000000 --- a/packages/beacon-node/test/unit/chain/seenCache/seenGossipBlockInput.test.ts +++ /dev/null @@ -1,207 +0,0 @@ -import {createBeaconConfig, createChainForkConfig, defaultChainConfig} from "@lodestar/config"; -import {ssz} from "@lodestar/types"; -import {describe, expect, it} from "vitest"; - -import {ZERO_HASH_HEX} from "@lodestar/params"; -import {BlockInput, BlockInputType, GossipedInputType} from "../../../../src/chain/blocks/types.js"; -import {ChainEventEmitter} from "../../../../src/chain/emitter.js"; -import { - BlockInputMetaPendingBlockWithBlobs, - SeenGossipBlockInput, -} from "../../../../src/chain/seenCache/seenGossipBlockInput.js"; -import {getExecutionEngineFromBackend} from "../../../../src/execution/engine/index.js"; -import {ExecutionEngineMockBackend} from "../../../../src/execution/engine/mock.js"; -import {computeNodeId} from "../../../../src/network/subnets/index.js"; -import {IClock} from "../../../../src/util/clock.js"; -import {CustodyConfig} from "../../../../src/util/dataColumns.js"; -import {testLogger} from "../../../utils/logger.js"; -import {getValidPeerId} from "../../../utils/peer.js"; - -describe("SeenGossipBlockInput", () => { - const chainConfig = createChainForkConfig({ - ...defaultChainConfig, - ALTAIR_FORK_EPOCH: 0, - BELLATRIX_FORK_EPOCH: 0, - DENEB_FORK_EPOCH: 0, - }); - const genesisValidatorsRoot = Buffer.alloc(32, 0xaa); - const config = createBeaconConfig(chainConfig, genesisValidatorsRoot); - const nodeId = computeNodeId(getValidPeerId()); - - // Execution engine - const executionEngineBackend = new ExecutionEngineMockBackend({ - onlyPredefinedResponses: false, - genesisBlockHash: ZERO_HASH_HEX, - }); - const controller = new AbortController(); - const executionEngine = getExecutionEngineFromBackend(executionEngineBackend, { - signal: controller.signal, - logger: testLogger("executionEngine"), - }); - - const emitter = new ChainEventEmitter(); - // Not used in this test, but required by the constructor - const unusedClock = {} as unknown as IClock; - - const seenGossipBlockInput = new SeenGossipBlockInput( - new CustodyConfig({nodeId, config}), - executionEngine, - emitter, - unusedClock, - testLogger("seenGossipBlockInput") - ); - - // array of numBlobs, events where events are array of - // [block|blob11|blob2, pd | bp | null | error string reflecting the expected result] - const testCases: [string, number, [string, string | null][]][] = [ - ["no blobs", 0, [["block", "pd"]]], - [ - "1 blob, block first", - 1, - [ - ["block", "bp"], - ["blob0", "pd"], - ], - ], - [ - "1 blob, blob first", - 1, - [ - ["blob0", null], - ["block", "pd"], - ], - ], - [ - "6 blobs, block first", - 6, - [ - ["block", "bp"], - ["blob1", "bp"], - ["blob0", "bp"], - ["blob5", "bp"], - ["blob4", "bp"], - ["blob2", "bp"], - ["blob3", "pd"], - ], - ], - [ - "4 blobs, block in mid", - 4, - [ - ["blob1", null], - ["blob3", null], - ["block", "bp"], - ["blob0", "bp"], - ["blob2", "pd"], - ], - ], - [ - "3 blobs, block in end", - 3, - [ - ["blob1", null], - ["blob0", null], - ["blob2", null], - ["block", "pd"], - ], - ], - ]; - - // lets start from a random slot to build cases - let slot = 7456; - for (const testCase of testCases) { - const [testName, numBlobs, events] = testCase; - - it(`${testName}`, () => { - const signedBlock = ssz.deneb.SignedBeaconBlock.defaultValue(); - // assign slot and increment for the next block so as to keep these block testcases distinguished - // in the cache - signedBlock.message.slot = slot++; - signedBlock.message.body.blobKzgCommitments = Array.from({length: numBlobs}, () => - ssz.deneb.KZGCommitment.defaultValue() - ); - - // create a dummy signed block header with matching body root - const bodyRoot = ssz.deneb.BeaconBlockBody.hashTreeRoot(signedBlock.message.body); - const signedBlockHeader = ssz.phase0.SignedBeaconBlockHeader.defaultValue(); - signedBlockHeader.message.slot = signedBlock.message.slot; - signedBlockHeader.message.bodyRoot = bodyRoot; - - const blobSidecars = Array.from({length: numBlobs}, (_val, index) => { - const message = {...ssz.deneb.BlobSidecar.defaultValue(), signedBlockHeader, index}; - return message; - }); - - for (const testEvent of events) { - const [inputEvent, expectedRes] = testEvent; - const eventType = inputEvent.includes("block") ? GossipedInputType.block : GossipedInputType.blob; - const expectedResponseType = parseResponseType(expectedRes); - - try { - if (eventType === GossipedInputType.block) { - const blockInputRes = seenGossipBlockInput.getGossipBlockInput( - config, - { - type: GossipedInputType.block, - signedBlock, - }, - null - ); - - if (expectedResponseType instanceof Error) { - expect.fail(`expected to fail with error: ${expectedResponseType.message}`); - } else if (expectedResponseType === null) { - expect(blockInputRes).toBeNull(); - } else { - expect((blockInputRes.blockInput as BlockInput)?.type).toEqual(expectedResponseType); - } - } else { - const index = parseInt(inputEvent.split("blob")[1] ?? "0"); - const blobSidecar = blobSidecars[index]; - expect(blobSidecar).not.toBeUndefined(); - - const blobInputRes = seenGossipBlockInput.getGossipBlockInput( - config, - { - type: GossipedInputType.blob, - blobSidecar, - }, - null - ); - - if (expectedResponseType instanceof Error) { - expect.fail(`expected to fail with error: ${expectedResponseType.message}`); - } else if (expectedResponseType === null) { - expect(blobInputRes.blockInput.block).toBeNull(); - expect((blobInputRes.blockInputMeta as BlockInputMetaPendingBlockWithBlobs).expectedBlobs).toBeNull(); - } else { - expect((blobInputRes.blockInput as BlockInput)?.type).toEqual(expectedResponseType); - } - } - } catch (e) { - if ( - !(e as Error).message.includes("expected to fail with error") && - !(expectedResponseType instanceof Error) - ) { - expect.fail( - `expected not to fail with response=${expectedResponseType} but errored: ${(e as Error).message}` - ); - } - } - } - }); - } -}); - -function parseResponseType(expectedRes: string | null): BlockInputType | null | Error { - switch (expectedRes) { - case null: - return null; - case "pd": - return BlockInputType.availableData; - case "bp": - return BlockInputType.dataPromise; - default: - return Error(expectedRes); - } -} diff --git a/packages/beacon-node/test/unit/chain/validation/blobSidecar.test.ts b/packages/beacon-node/test/unit/chain/validation/blobSidecar.test.ts new file mode 100644 index 000000000000..9e092cb3d5b7 --- /dev/null +++ b/packages/beacon-node/test/unit/chain/validation/blobSidecar.test.ts @@ -0,0 +1,82 @@ +import {ForkName} from "@lodestar/params"; +import {ssz} from "@lodestar/types"; +import {describe, expect, it} from "vitest"; +import {BlobSidecarValidationError} from "../../../../src/chain/errors/blobSidecarError.js"; +import {validateBlockBlobSidecars} from "../../../../src/chain/validation/blobSidecar.js"; +import {generateBlockWithBlobSidecars} from "../../../utils/blocksAndData.js"; + +describe("validateBlockBlobSidecars", () => { + const {block, blockRoot, blobSidecars} = generateBlockWithBlobSidecars({forkName: ForkName.deneb}); + + it("should validate correct blob sidecars", async () => { + await expect( + validateBlockBlobSidecars( + block.message.slot, + blockRoot, + block.message.body.blobKzgCommitments.length, + blobSidecars + ) + ).resolves.toBeUndefined(); + }); + + it("should error on no blobs in block", async () => { + await expect(validateBlockBlobSidecars(block.message.slot, blockRoot, 0, blobSidecars)).rejects.toThrow( + BlobSidecarValidationError + ); + }); + + it("should error if sidecar block header doesn't match block", async () => { + const invalidSidecar = ssz.deneb.BlobSidecar.clone(blobSidecars[0]); + invalidSidecar.signedBlockHeader.message.slot += 1; // invalid slot (will change the root) + + await expect( + validateBlockBlobSidecars(block.message.slot, blockRoot, block.message.body.blobKzgCommitments.length, [ + invalidSidecar, + ]) + ).rejects.toThrow(BlobSidecarValidationError); + }); + + it("should error on invalid index", async () => { + const invalidSidecar = ssz.deneb.BlobSidecar.clone(blobSidecars[0]); + invalidSidecar.index = block.message.body.blobKzgCommitments.length; // invalid index + + await expect( + validateBlockBlobSidecars(block.message.slot, blockRoot, block.message.body.blobKzgCommitments.length, [ + invalidSidecar, + ]) + ).rejects.toThrow(BlobSidecarValidationError); + }); + + it("should error on invalid kzg commitment", async () => { + const invalidSidecar = ssz.deneb.BlobSidecar.clone(blobSidecars[0]); + invalidSidecar.kzgCommitment = invalidSidecar.kzgCommitment.map((b) => b ^ 1); // invalid commitment + + await expect( + validateBlockBlobSidecars(block.message.slot, blockRoot, block.message.body.blobKzgCommitments.length, [ + invalidSidecar, + ]) + ).rejects.toThrow(BlobSidecarValidationError); + }); + + it("should error on invalid kzg commitment inclusion proof", async () => { + const invalidSidecar = ssz.deneb.BlobSidecar.clone(blobSidecars[0]); + invalidSidecar.kzgCommitmentInclusionProof[0][0] ^= 1; // invalid inclusion proof + + await expect( + validateBlockBlobSidecars(block.message.slot, blockRoot, block.message.body.blobKzgCommitments.length, [ + invalidSidecar, + ]) + ).rejects.toThrow(BlobSidecarValidationError); + }); + + it("should error on invalid kzg proof", async () => { + const invalidSidecar = ssz.deneb.BlobSidecar.clone(blobSidecars[0]); + invalidSidecar.kzgProof = invalidSidecar.kzgProof.map((b) => b ^ 1); // invalid proof + + await expect( + validateBlockBlobSidecars(block.message.slot, blockRoot, block.message.body.blobKzgCommitments.length, [ + invalidSidecar, + ]) + ).rejects.toThrow(BlobSidecarValidationError); + }); +}); diff --git a/packages/beacon-node/test/unit/chain/validation/dataColumnSidecar.test.ts b/packages/beacon-node/test/unit/chain/validation/dataColumnSidecar.test.ts new file mode 100644 index 000000000000..e01d719ffca3 --- /dev/null +++ b/packages/beacon-node/test/unit/chain/validation/dataColumnSidecar.test.ts @@ -0,0 +1,88 @@ +import {ForkName, NUMBER_OF_COLUMNS} from "@lodestar/params"; +import {ssz} from "@lodestar/types"; +import {describe, expect, it} from "vitest"; +import {DataColumnSidecarValidationError} from "../../../../src/chain/errors/dataColumnSidecarError.js"; +import {validateBlockDataColumnSidecars} from "../../../../src/chain/validation/dataColumnSidecar.js"; +import {generateBlockWithColumnSidecars} from "../../../utils/blocksAndData.js"; + +describe("validateBlockDataColumnSidecars", () => { + const {block, blockRoot, columnSidecars} = generateBlockWithColumnSidecars({forkName: ForkName.fulu}); + + it("should validate correct column sidecars", async () => { + await expect( + validateBlockDataColumnSidecars( + block.message.slot, + blockRoot, + block.message.body.blobKzgCommitments.length, + columnSidecars + ) + ).resolves.toBeUndefined(); + }); + + it("should validate empty sidecars array", async () => { + await expect( + validateBlockDataColumnSidecars(block.message.slot, blockRoot, block.message.body.blobKzgCommitments.length, []) + ).resolves.toBeUndefined(); + }); + + it("should error on no blobs in block", async () => { + await expect(validateBlockDataColumnSidecars(block.message.slot, blockRoot, 0, columnSidecars)).rejects.toThrow( + DataColumnSidecarValidationError + ); + }); + + it("should error if sidecar block header doesn't match block", async () => { + const invalidSidecar = ssz.fulu.DataColumnSidecar.clone(columnSidecars[0]); + invalidSidecar.signedBlockHeader.message.slot += 1; // invalid slot (will change the root) + + await expect( + validateBlockDataColumnSidecars(block.message.slot, blockRoot, block.message.body.blobKzgCommitments.length, [ + invalidSidecar, + ]) + ).rejects.toThrow(DataColumnSidecarValidationError); + }); + + it("should error on invalid column index", async () => { + const invalidSidecar = ssz.fulu.DataColumnSidecar.clone(columnSidecars[0]); + invalidSidecar.index = NUMBER_OF_COLUMNS; // invalid index + + await expect( + validateBlockDataColumnSidecars(block.message.slot, blockRoot, block.message.body.blobKzgCommitments.length, [ + invalidSidecar, + ]) + ).rejects.toThrow(DataColumnSidecarValidationError); + }); + + it("should error on invalid kzg commitments", async () => { + const invalidSidecar = ssz.fulu.DataColumnSidecar.clone(columnSidecars[0]); + invalidSidecar.kzgCommitments = columnSidecars[0].kzgCommitments.map((commitment) => commitment.map((b) => b ^ 1)); // invalid commitments + + await expect( + validateBlockDataColumnSidecars(block.message.slot, blockRoot, block.message.body.blobKzgCommitments.length, [ + invalidSidecar, + ]) + ).rejects.toThrow(DataColumnSidecarValidationError); + }); + + it("should error on invalid kzg commitments inclusion proofs", async () => { + const invalidSidecar = ssz.fulu.DataColumnSidecar.clone(columnSidecars[0]); + invalidSidecar.kzgCommitmentsInclusionProof[0][0] ^= 1; // invalid inclusion proof + + await expect( + validateBlockDataColumnSidecars(block.message.slot, blockRoot, block.message.body.blobKzgCommitments.length, [ + invalidSidecar, + ]) + ).rejects.toThrow(DataColumnSidecarValidationError); + }); + + it("should error on invalid kzg proof", async () => { + const invalidSidecar = ssz.fulu.DataColumnSidecar.clone(columnSidecars[0]); + invalidSidecar.kzgProofs = columnSidecars[0].kzgProofs.map((proof) => proof.map((b) => b ^ 1)); // invalid proofs + + await expect( + validateBlockDataColumnSidecars(block.message.slot, blockRoot, block.message.body.blobKzgCommitments.length, [ + invalidSidecar, + ]) + ).rejects.toThrow(DataColumnSidecarValidationError); + }); +}); diff --git a/packages/beacon-node/test/unit/network/beaconBlocksMaybeBlobsByRange.test.ts b/packages/beacon-node/test/unit/network/beaconBlocksMaybeBlobsByRange.test.ts deleted file mode 100644 index ba3993b7b23c..000000000000 --- a/packages/beacon-node/test/unit/network/beaconBlocksMaybeBlobsByRange.test.ts +++ /dev/null @@ -1,136 +0,0 @@ -import {createBeaconConfig, createChainForkConfig, defaultChainConfig} from "@lodestar/config"; -import {ForkName} from "@lodestar/params"; -import {deneb, ssz} from "@lodestar/types"; -import {describe, expect, it} from "vitest"; - -import {BlobsSource, BlockSource, getBlockInput} from "../../../src/chain/blocks/types.js"; -import {ZERO_HASH} from "../../../src/constants/constants.js"; -import {INetwork} from "../../../src/network/interface.js"; -import {beaconBlocksMaybeBlobsByRange} from "../../../src/network/reqresp/index.js"; -import {RangeSyncType} from "../../../src/sync/utils/remoteSyncType.js"; -import {CustodyConfig} from "../../../src/util/dataColumns.js"; - -describe.skip("beaconBlocksMaybeBlobsByRange", () => { - const peerId = "Qma9T5YraSnpRDZqRR4krcSJabThc8nwZuJV3LercPHufi"; - - const chainConfig = createChainForkConfig({ - ...defaultChainConfig, - ALTAIR_FORK_EPOCH: 0, - BELLATRIX_FORK_EPOCH: 0, - CAPELLA_FORK_EPOCH: 0, - DENEB_FORK_EPOCH: 0, - ELECTRA_FORK_EPOCH: 0, - FULU_FORK_EPOCH: 0, - }); - const genesisValidatorsRoot = Buffer.alloc(32, 0xaa); - const config = createBeaconConfig(chainConfig, genesisValidatorsRoot); - const rangeRequest = ssz.phase0.BeaconBlocksByRangeRequest.defaultValue(); - rangeRequest.count = 1; - - const block1 = ssz.deneb.SignedBeaconBlock.defaultValue(); - const blockheader1 = ssz.phase0.SignedBeaconBlockHeader.defaultValue(); - blockheader1.message.slot = 1; - block1.message.slot = 1; - block1.message.body.blobKzgCommitments.push(ssz.deneb.KZGCommitment.defaultValue()); - const blobSidecar1 = ssz.deneb.BlobSidecar.defaultValue(); - blobSidecar1.signedBlockHeader = blockheader1; - - const block2 = ssz.deneb.SignedBeaconBlock.defaultValue(); - block2.message.slot = 2; - const blockheader2 = ssz.phase0.SignedBeaconBlockHeader.defaultValue(); - blockheader2.message.slot = 2; - - block2.message.body.blobKzgCommitments.push(ssz.deneb.KZGCommitment.defaultValue()); - const blobSidecar2 = ssz.deneb.BlobSidecar.defaultValue(); - blobSidecar2.signedBlockHeader = blockheader2; - - const block3 = ssz.deneb.SignedBeaconBlock.defaultValue(); - block3.message.slot = 3; - // no blobsidecar for block3 - - const block4 = ssz.deneb.SignedBeaconBlock.defaultValue(); - block4.message.slot = 4; - const blockheader4 = ssz.phase0.SignedBeaconBlockHeader.defaultValue(); - blockheader4.message.slot = 4; - - // two blobsidecars - block4.message.body.blobKzgCommitments.push(ssz.deneb.KZGCommitment.defaultValue()); - block4.message.body.blobKzgCommitments.push(ssz.deneb.KZGCommitment.defaultValue()); - const blobSidecar41 = ssz.deneb.BlobSidecar.defaultValue(); - - blobSidecar41.signedBlockHeader = blockheader4; - - const blobSidecar42 = ssz.deneb.BlobSidecar.defaultValue(); - blobSidecar42.signedBlockHeader = blockheader4; - blobSidecar42.index = 1; - - // Array of testcases which are array of matched blocks with/without (if empty) sidecars - const testCases: [string, [deneb.SignedBeaconBlock, deneb.BlobSidecar[] | undefined][]][] = [ - ["one block with sidecar", [[block1, [blobSidecar1]]]], - [ - "two blocks with sidecar", - [ - [block1, [blobSidecar1]], - [block2, [blobSidecar2]], - ], - ], - ["block with skipped sidecar", [[block3, undefined]]], - ["multiple blob sidecars per block", [[block4, [blobSidecar41, blobSidecar42]]]], - [ - "all blocks together", - [ - [block1, [blobSidecar1]], - [block2, [blobSidecar2]], - [block3, undefined], - [block4, [blobSidecar41, blobSidecar42]], - ], - ], - ]; - testCases.map(([testName, blocksWithBlobs]) => { - it(testName, async () => { - const blocks = blocksWithBlobs.map(([block, _blobs]) => block); - - const blobSidecars = blocksWithBlobs - .map(([_block, blobs]) => blobs as deneb.BlobSidecars) - .filter((blobs) => blobs !== undefined) - .reduce((acc, elem) => acc.concat(elem), []); - - const expectedResponse = blocksWithBlobs.map(([block, blobSidecars]) => { - const blobs = blobSidecars !== undefined ? blobSidecars : []; - return getBlockInput.availableData(config, block, BlockSource.byRange, { - fork: ForkName.electra, - blobs, - blobsSource: BlobsSource.byRange, - }); - }); - - const custodyConfig = new CustodyConfig({ - nodeId: new Uint8Array(32), - config, - }); - custodyConfig.sampledColumns = [2, 4, 6, 8]; - - const network = { - sendBeaconBlocksByRange: async () => - blocks.map((data) => ({ - data, - bytes: ZERO_HASH, - })), - sendBlobSidecarsByRange: async () => blobSidecars, - custodyConfig, - } as Partial as INetwork; - - const response = await beaconBlocksMaybeBlobsByRange( - config, - network, - {peerId, client: "PEER_CLIENT", custodyGroups: []}, - rangeRequest, - 0, - null, - RangeSyncType.Finalized, - null - ); - expect(response).toEqual(expectedResponse); - }); - }); -}); diff --git a/packages/beacon-node/test/unit/network/unavailableBeaconBlobsByRoot.test.ts b/packages/beacon-node/test/unit/network/unavailableBeaconBlobsByRoot.test.ts deleted file mode 100644 index b54c3ed63613..000000000000 --- a/packages/beacon-node/test/unit/network/unavailableBeaconBlobsByRoot.test.ts +++ /dev/null @@ -1,315 +0,0 @@ -import {toHexString} from "@chainsafe/ssz"; -import {createBeaconConfig, createChainForkConfig, defaultChainConfig} from "@lodestar/config"; -import {BYTES_PER_FIELD_ELEMENT, FIELD_ELEMENTS_PER_BLOB, ForkName} from "@lodestar/params"; -import {signedBlockToSignedHeader} from "@lodestar/state-transition"; -import {deneb, fulu, ssz} from "@lodestar/types"; -import {describe, expect, it, vi} from "vitest"; -import { - BlobsSource, - BlockInput, - BlockInputAvailableData, - BlockInputType, - BlockSource, - CachedData, - getBlockInput, -} from "../../../src/chain/blocks/types.js"; -import {ChainEventEmitter} from "../../../src/chain/emitter.js"; -import {getEmptyBlockInputCacheEntry} from "../../../src/chain/seenCache/seenGossipBlockInput.js"; -import {IExecutionEngine} from "../../../src/execution/index.js"; -import {INetwork} from "../../../src/network/interface.js"; -import {unavailableBeaconBlobsByRoot} from "../../../src/network/reqresp/index.js"; -import {computeNodeId} from "../../../src/network/subnets/index.js"; -import {computeInclusionProof, kzgCommitmentToVersionedHash} from "../../../src/util/blobs.js"; -import {CustodyConfig, getDataColumnSidecarsFromBlock} from "../../../src/util/dataColumns.js"; -import {kzg} from "../../../src/util/kzg.js"; -import {getValidPeerId} from "../../utils/peer.js"; - -describe("unavailableBeaconBlobsByRoot", () => { - describe("blobs", () => { - const chainConfig = createChainForkConfig({ - ...defaultChainConfig, - ALTAIR_FORK_EPOCH: 0, - BELLATRIX_FORK_EPOCH: 0, - CAPELLA_FORK_EPOCH: 0, - DENEB_FORK_EPOCH: 0, - }); - const genesisValidatorsRoot = Buffer.alloc(32, 0xaa); - const config = createBeaconConfig(chainConfig, genesisValidatorsRoot); - - const executionEngine = { - getBlobs: vi.fn(), - }; - - const network = { - sendBeaconBlocksByRoot: vi.fn(), - sendBlobSidecarsByRoot: vi.fn(), - }; - - const peerId = "mockPeerId"; - const engineGetBlobsCache = new Map(); - - it("should successfully resolve all blobs from engine and network", async () => { - // Simulate a block 1 with 5 blobs - const signedBlock = ssz.deneb.SignedBeaconBlock.defaultValue(); - signedBlock.message.slot = 1; - const blobscommitmentsandproofs = generateBlobs(5); - signedBlock.message.body.blobKzgCommitments.push(...blobscommitmentsandproofs.kzgCommitments); - const blockheader = signedBlockToSignedHeader(config, signedBlock); - - const unavailableBlockInput = { - block: signedBlock, - source: BlockSource.gossip, - blockBytes: null, - type: BlockInputType.dataPromise, - cachedData: getEmptyBlockInputCacheEntry(ForkName.deneb, 1).cachedData, - } as BlockInput; - - // total of 5 blobs - // blob 0. not in cache & to resolved by getBlobs - // blob 1. not in cache & to resolved by getBlobs - // blob 2. to be found in engineGetBlobsCache - // blob 3. null cached earlier so should directly go to network query and skip engine query - // blob 4. to hit getBlobs first with null response and then go to the network query - // - // engineGetBlobsCache caches 2 fully, and null for 3 - // getBlobs should see 0,1,4 and return first two non null and last null - // network should see 3,4 - - engineGetBlobsCache.set(toHexString(blobscommitmentsandproofs.blobVersionedHashes[2]), { - blob: blobscommitmentsandproofs.blobs[2], - proof: blobscommitmentsandproofs.kzgProofs[2], - }); - engineGetBlobsCache.set(toHexString(blobscommitmentsandproofs.blobVersionedHashes[3]), null); - - // Mock execution engine to return 2 blobs - executionEngine.getBlobs.mockResolvedValueOnce([ - { - blob: blobscommitmentsandproofs.blobs[0], - proof: blobscommitmentsandproofs.kzgProofs[0], - }, - { - blob: blobscommitmentsandproofs.blobs[1], - proof: blobscommitmentsandproofs.kzgProofs[1], - }, - null, - ]); - - // Mock network to return 2 blobs - network.sendBlobSidecarsByRoot.mockResolvedValueOnce([ - { - index: 3, - blob: blobscommitmentsandproofs.blobs[3], - kzgCommitment: blobscommitmentsandproofs.kzgCommitments[3], - kzgProof: blobscommitmentsandproofs.kzgProofs[3], - signedBlockHeader: blockheader, - kzgCommitmentInclusionProof: computeInclusionProof(ForkName.deneb, signedBlock.message.body, 3), - }, - { - index: 4, - blob: blobscommitmentsandproofs.blobs[4], - kzgCommitment: blobscommitmentsandproofs.kzgCommitments[4], - kzgProof: blobscommitmentsandproofs.kzgProofs[4], - signedBlockHeader: blockheader, - kzgCommitmentInclusionProof: computeInclusionProof(ForkName.deneb, signedBlock.message.body, 4), - }, - ]); - - const result = await unavailableBeaconBlobsByRoot( - config, - network as unknown as INetwork, - peerId, - "peerClient", - unavailableBlockInput, - { - executionEngine: executionEngine as unknown as IExecutionEngine, - emitter: new ChainEventEmitter(), - engineGetBlobsCache, - } - ); - - // Check if all blobs are aggregated - const allBlobs = [ - { - index: 0, - blob: blobscommitmentsandproofs.blobs[0], - kzgCommitment: blobscommitmentsandproofs.kzgCommitments[0], - kzgProof: blobscommitmentsandproofs.kzgProofs[0], - signedBlockHeader: blockheader, - kzgCommitmentInclusionProof: computeInclusionProof(ForkName.deneb, signedBlock.message.body, 0), - }, - { - index: 1, - blob: blobscommitmentsandproofs.blobs[1], - kzgCommitment: blobscommitmentsandproofs.kzgCommitments[1], - kzgProof: blobscommitmentsandproofs.kzgProofs[1], - signedBlockHeader: blockheader, - kzgCommitmentInclusionProof: computeInclusionProof(ForkName.deneb, signedBlock.message.body, 1), - }, - { - index: 2, - blob: blobscommitmentsandproofs.blobs[2], - kzgCommitment: blobscommitmentsandproofs.kzgCommitments[2], - kzgProof: blobscommitmentsandproofs.kzgProofs[2], - signedBlockHeader: blockheader, - kzgCommitmentInclusionProof: computeInclusionProof(ForkName.deneb, signedBlock.message.body, 2), - }, - { - index: 3, - blob: blobscommitmentsandproofs.blobs[3], - kzgCommitment: blobscommitmentsandproofs.kzgCommitments[3], - kzgProof: blobscommitmentsandproofs.kzgProofs[3], - signedBlockHeader: blockheader, - kzgCommitmentInclusionProof: computeInclusionProof(ForkName.deneb, signedBlock.message.body, 3), - }, - { - index: 4, - blob: blobscommitmentsandproofs.blobs[4], - kzgCommitment: blobscommitmentsandproofs.kzgCommitments[4], - kzgProof: blobscommitmentsandproofs.kzgProofs[4], - signedBlockHeader: blockheader, - kzgCommitmentInclusionProof: computeInclusionProof(ForkName.deneb, signedBlock.message.body, 4), - }, - ]; - - const blockData: BlockInputAvailableData = { - fork: ForkName.deneb, - blobs: allBlobs, - blobsSource: BlobsSource.byRoot, - }; - const resolvedBlobs = getBlockInput.availableData(config, signedBlock, BlockSource.byRoot, blockData); - - const engineReqIdentifiers = [...blobscommitmentsandproofs.blobVersionedHashes]; - // versionedHashes: 1,2,4 - engineReqIdentifiers.splice(2, 2); - expect(result).toBeDefined(); - expect(executionEngine.getBlobs).toHaveBeenCalledWith("deneb", engineReqIdentifiers); - expect(result).toEqual(resolvedBlobs); - }); - }); - - describe("data columns", () => { - const chainConfig = createChainForkConfig({ - ...defaultChainConfig, - ALTAIR_FORK_EPOCH: 0, - BELLATRIX_FORK_EPOCH: 0, - CAPELLA_FORK_EPOCH: 0, - DENEB_FORK_EPOCH: 0, - ELECTRA_FORK_EPOCH: 0, - FULU_FORK_EPOCH: 0, - }); - const genesisValidatorsRoot = Buffer.alloc(32, 0xaa); - const config = createBeaconConfig(chainConfig, genesisValidatorsRoot); - - const executionEngine = { - getBlobs: vi.fn(), - }; - - const network = { - sendBeaconBlocksByRoot: vi.fn(), - sendBlobSidecarsByRoot: vi.fn(), - custodyConfig: new CustodyConfig({ - nodeId: computeNodeId(getValidPeerId()), - config, - }), - }; - - const peerId = "mockPeerId"; - const engineGetBlobsCache = new Map(); - - it("should successfully resolve all data columns from engine", async () => { - // Simulate a block 1 with 3 blobs - const signedBlock = ssz.fulu.SignedBeaconBlock.defaultValue(); - signedBlock.message.slot = 1; - const blobscommitmentsandproofs = generateBlobsWithCellProofs(3); - signedBlock.message.body.blobKzgCommitments.push(...blobscommitmentsandproofs.map((b) => b.kzgCommitment)); - - const unavailableBlockInput: BlockInput = { - block: signedBlock, - source: BlockSource.gossip, - type: BlockInputType.dataPromise, - cachedData: getEmptyBlockInputCacheEntry(ForkName.fulu, 1).cachedData as CachedData, - }; - - const blobAndProof: fulu.BlobAndProofV2[] = blobscommitmentsandproofs.map((b) => ({ - blob: b.blob, - proofs: b.cellsAndProofs.proofs, - })); - - // Mock execution engine to return all blobs - executionEngine.getBlobs.mockImplementationOnce( - (): Promise => Promise.resolve(blobAndProof) - ); - - const result = await unavailableBeaconBlobsByRoot( - config, - network as unknown as INetwork, - peerId, - "peerClient", - unavailableBlockInput, - { - executionEngine: executionEngine as unknown as IExecutionEngine, - emitter: new ChainEventEmitter(), - engineGetBlobsCache, - } - ); - - const sampledSidecars = getDataColumnSidecarsFromBlock( - config, - signedBlock, - blobscommitmentsandproofs.map((b) => b.cellsAndProofs) - ).filter((s) => network.custodyConfig.sampledColumns.includes(s.index)); - - expect(executionEngine.getBlobs).toHaveBeenCalledWith( - ForkName.fulu, - blobscommitmentsandproofs.map((b) => kzgCommitmentToVersionedHash(b.kzgCommitment)) - ); - expect(result.type).toEqual(BlockInputType.availableData); - if (result.type !== BlockInputType.availableData) throw new Error("Should not get here"); - expect(result.blockData.fork).toEqual(ForkName.fulu); - if (result.blockData.fork !== ForkName.fulu) throw new Error("Should not get here"); - expect(result.blockData.dataColumns).toEqual(sampledSidecars); - }); - }); -}); - -function generateBlobs(count: number): { - blobs: Uint8Array[]; - kzgCommitments: Uint8Array[]; - blobVersionedHashes: Uint8Array[]; - kzgProofs: Uint8Array[]; -} { - const blobs = Array.from({length: count}, (_, index) => generateRandomBlob(index)); - const kzgCommitments = blobs.map((blob) => kzg.blobToKzgCommitment(blob)); - const versionedHash = kzgCommitments.map((kzgCommitment) => kzgCommitmentToVersionedHash(kzgCommitment)); - const kzgProofs = blobs.map((blob, index) => kzg.computeBlobKzgProof(blob, kzgCommitments[index])); - - return { - blobs, - kzgCommitments, - blobVersionedHashes: versionedHash.map((hash) => hash), - kzgProofs, - }; -} - -function generateBlobsWithCellProofs( - count: number -): {blob: Uint8Array; cellsAndProofs: {cells: Uint8Array[]; proofs: Uint8Array[]}; kzgCommitment: Uint8Array}[] { - const blobs = Array.from({length: count}, (_, index) => generateRandomBlob(index)); - - return blobs.map((blob) => ({ - blob, - cellsAndProofs: kzg.computeCellsAndKzgProofs(blob), - kzgCommitment: kzg.blobToKzgCommitment(blob), - })); -} - -function generateRandomBlob(index: number): deneb.Blob { - const blob = new Uint8Array(FIELD_ELEMENTS_PER_BLOB * BYTES_PER_FIELD_ELEMENT); - const dv = new DataView(blob.buffer, blob.byteOffset, blob.byteLength); - - for (let i = 0; i < FIELD_ELEMENTS_PER_BLOB; i++) { - // Generate a unique value based on the index - dv.setUint32(i * BYTES_PER_FIELD_ELEMENT, index + i); - } - return blob; -} diff --git a/packages/beacon-node/test/unit/sync/range/batch.test.ts b/packages/beacon-node/test/unit/sync/range/batch.test.ts index 68d28c5c69eb..7a2b19a6c930 100644 --- a/packages/beacon-node/test/unit/sync/range/batch.test.ts +++ b/packages/beacon-node/test/unit/sync/range/batch.test.ts @@ -1,31 +1,271 @@ -import {config} from "@lodestar/config/default"; -import {SLOTS_PER_EPOCH} from "@lodestar/params"; +import {generateKeyPair} from "@libp2p/crypto/keys"; +import {ForkName} from "@lodestar/params"; import {ssz} from "@lodestar/types"; -import {describe, expect, it} from "vitest"; -import {BlockSource, getBlockInput} from "../../../../src/chain/blocks/types.js"; -import {EPOCHS_PER_BATCH} from "../../../../src/sync/constants.js"; +import {afterEach, beforeEach, describe, expect, it, vi} from "vitest"; +import {BlockInputPreData} from "../../../../src/chain/blocks/blockInput/blockInput.js"; +import {BlockInputSource} from "../../../../src/chain/blocks/blockInput/types.js"; +import {computeNodeIdFromPrivateKey} from "../../../../src/network/subnets/index.js"; import {Batch, BatchError, BatchErrorCode, BatchStatus} from "../../../../src/sync/range/batch.js"; +import {CustodyConfig} from "../../../../src/util/dataColumns.js"; +import {clock, config} from "../../../utils/blocksAndData.js"; import {expectThrowsLodestarError} from "../../../utils/errors.js"; import {validPeerIdStr} from "../../../utils/peer.js"; -describe("sync / range / batch", () => { + +/** + * Should not be called directly. Only exported for unit testing purposes + */ +// export function validateRequests({ +// config, +// daOutOfRange, +// blocksRequest, +// blobsRequest, +// columnsRequest, +// }: DownloadByRangeRequests & Pick): string { +// const startSlot = (blocksRequest?.startSlot ?? blobsRequest?.startSlot ?? columnsRequest?.startSlot) as number; +// const count = (blocksRequest?.count ?? blobsRequest?.count ?? columnsRequest?.count) as number; +// const slotRange = `${startSlot} - ${startSlot + count}`; +// const dataRequest = blobsRequest ?? columnsRequest; + +// if (!blocksRequest) { +// throw new DownloadByRangeError({ +// code: DownloadByRangeErrorCode.MISSING_BLOCKS_REQUEST, +// slotRange, +// }); +// } + +// if (daOutOfRange) { +// if (dataRequest) { +// throw new DownloadByRangeError( +// { +// code: DownloadByRangeErrorCode.INVALID_DATA_REQUEST, +// slotRange, +// }, +// "Cannot request data if it is outside of the availability range" +// ); +// } + +// return slotRange; +// } + +// if (!dataRequest) { +// throw new DownloadByRangeError( +// { +// code: DownloadByRangeErrorCode.MISSING_DATA_REQUEST, +// slotRange, +// }, +// "Must request data if it is available" +// ); +// } + +// if (blobsRequest && columnsRequest) { +// throw new DownloadByRangeError( +// { +// code: DownloadByRangeErrorCode.INVALID_DATA_REQUEST, +// slotRange, +// }, +// "Cannot request both blob and column data in the same slot range" +// ); +// } + +// const forkName = config.getForkName(startSlot); +// if (!isForkPostDeneb(forkName)) { +// throw new DownloadByRangeError( +// { +// code: DownloadByRangeErrorCode.INVALID_DATA_REQUEST, +// slotRange, +// }, +// "Cannot request data pre-deneb" +// ); +// } + +// if (isForkPostDeneb(forkName) && !isForkPostFulu(forkName) && !blobsRequest) { +// throw new DownloadByRangeError( +// { +// code: DownloadByRangeErrorCode.MISSING_BLOBS_REQUEST, +// slotRange, +// }, +// "Must request blobs for blob-only forks" +// ); +// } + +// if (isForkPostFulu(forkName) && !columnsRequest) { +// throw new DownloadByRangeError( +// { +// code: DownloadByRangeErrorCode.MISSING_COLUMNS_REQUEST, +// slotRange, +// }, +// "Must request columns for forks with columns" +// ); +// } + +// if (blocksRequest.startSlot !== dataRequest.startSlot) { +// throw new DownloadByRangeError({ +// code: DownloadByRangeErrorCode.START_SLOT_MISMATCH, +// blockStartSlot: blocksRequest.startSlot, +// dataStartSlot: dataRequest.startSlot, +// }); +// } + +// if (blocksRequest.count !== dataRequest.count) { +// throw new DownloadByRangeError({ +// code: DownloadByRangeErrorCode.COUNT_MISMATCH, +// blockCount: blocksRequest.count, +// dataCount: dataRequest.count, +// }); +// } + +// return slotRange; +// } + +describe("sync / range / batch", async () => { // Common mock data - const startEpoch = 0; + const privateKey = await generateKeyPair("secp256k1"); + const nodeId = computeNodeIdFromPrivateKey(privateKey); + const custodyConfig = new CustodyConfig({config, nodeId}); const peer = validPeerIdStr; - const blocksDownloaded = [ - getBlockInput.preData(config, ssz.phase0.SignedBeaconBlock.defaultValue(), BlockSource.byRange), - ]; - - it("Should return correct blockByRangeRequest", () => { - const batch = new Batch(startEpoch, config); - expect(batch.request).toEqual({ - startSlot: 0, - count: SLOTS_PER_EPOCH * EPOCHS_PER_BATCH, - step: 1, + + afterEach(() => { + vi.restoreAllMocks(); + }); + + describe("getRequests", () => { + describe("PreDeneb", () => { + let batch: Batch; + const startEpoch = config.CAPELLA_FORK_EPOCH + 1; + + it("should make default pre-deneb requests if no existing blocks are passed", () => { + batch = new Batch(startEpoch, config, clock, custodyConfig); + expect(batch.requests.blocksRequest).toEqual({startSlot: batch.startSlot, count: batch.count, step: 1}); + expect(batch.requests.blobsRequest).toBeUndefined(); + expect(batch.requests.columnsRequest).toBeUndefined(); + }); + + it("should have correct start slot to not re-download blocks", () => {}); + }); + + describe("ForkDABlobs", () => { + let batch: Batch; + const startEpoch = config.DENEB_FORK_EPOCH + 1; + + it("should make default ForkDABlobs requests if no existing blocks are passed", () => { + batch = new Batch(startEpoch, config, clock, custodyConfig); + + expect(batch.requests.blocksRequest).toEqual({startSlot: batch.startSlot, count: batch.count, step: 1}); + expect(batch.requests.blobsRequest).toEqual({startSlot: batch.startSlot, count: batch.count}); + expect(batch.requests.columnsRequest).toBeUndefined(); + }); + + it("should make default ForkDABlobs requests if current epoch is the last in request range", () => { + vi.spyOn(clock, "currentEpoch", "get").mockReturnValue( + startEpoch + config.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS + ); + batch = new Batch(startEpoch, config, clock, custodyConfig); + + expect(batch.requests.blocksRequest).toEqual({startSlot: batch.startSlot, count: batch.count, step: 1}); + expect(batch.requests.blobsRequest).toEqual({startSlot: batch.startSlot, count: batch.count}); + expect(batch.requests.columnsRequest).toBeUndefined(); + }); + + it("should not make ForkDABlobs requests if current epoch is ahead of request range", () => { + vi.spyOn(clock, "currentEpoch", "get").mockReturnValue( + startEpoch + config.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS + 1 + ); + batch = new Batch(startEpoch, config, clock, custodyConfig); + + expect(batch.requests.blocksRequest).toEqual({startSlot: batch.startSlot, count: batch.count, step: 1}); + expect(batch.requests.blobsRequest).toBeUndefined(); + expect(batch.requests.columnsRequest).toBeUndefined(); + }); + }); + + describe("ForkDAColumns", () => { + let batch: Batch; + const startEpoch = config.FULU_FORK_EPOCH + 1; + + beforeEach(() => { + batch = new Batch(startEpoch, config, clock, custodyConfig); + }); + + it("should make ForkDAColumns requests if no existing blocks are passed", () => { + expect(batch.requests.blocksRequest).toEqual({startSlot: batch.startSlot, count: batch.count, step: 1}); + expect(batch.requests.blobsRequest).toBeUndefined(); + expect(batch.requests.columnsRequest).toEqual({ + startSlot: batch.startSlot, + count: batch.count, + columns: custodyConfig.sampledColumns, + }); + }); + + it("should make ForkDAColumns requests if current epoch is the last in request range", () => { + vi.spyOn(clock, "currentEpoch", "get").mockReturnValue( + startEpoch + config.MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS + ); + batch = new Batch(startEpoch, config, clock, custodyConfig); + + expect(batch.requests.blocksRequest).toEqual({startSlot: batch.startSlot, count: batch.count, step: 1}); + expect(batch.requests.blobsRequest).toBeUndefined(); + expect(batch.requests.columnsRequest).toEqual({ + startSlot: batch.startSlot, + count: batch.count, + columns: custodyConfig.sampledColumns, + }); + }); + + it("should not make ForkDAColumns if current epoch is the last in request range", () => { + vi.spyOn(clock, "currentEpoch", "get").mockReturnValue( + startEpoch + config.MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS + 1 + ); + batch = new Batch(startEpoch, config, clock, custodyConfig); + + expect(batch.requests.blocksRequest).toEqual({startSlot: batch.startSlot, count: batch.count, step: 1}); + expect(batch.requests.blobsRequest).toBeUndefined(); + expect(batch.requests.columnsRequest).toBeUndefined(); + }); }); + + it("should not request data pre-deneb", () => { + const startEpoch = config.CAPELLA_FORK_EPOCH - 1; + const batch = new Batch(startEpoch, config, clock, custodyConfig); + expect(batch.requests.blocksRequest).toEqual({startSlot: batch.startSlot, count: batch.count, step: 1}); + expect(batch.requests.blobsRequest).toBeUndefined(); + expect(batch.requests.columnsRequest).toBeUndefined(); + }); + + it("should not request data when before availability window", () => {}); + + // it("should request data within availability window", () => {}); + + // it("should only request blobs or columns, not both", () => {}); + + // it("should request blobs between post-deneb and pre-fulu ", () => {}); + + it("should request columns post-fulu", () => { + const startEpoch = config.FULU_FORK_EPOCH + 1; + const batch = new Batch(startEpoch, config, clock, custodyConfig); + expect(batch.requests.blocksRequest).toEqual({startSlot: batch.startSlot, count: batch.count, step: 1}); + expect(batch.requests.blobsRequest).toBeUndefined(); + expect(batch.requests.columnsRequest).toEqual({ + startSlot: batch.startSlot, + count: batch.count, + columns: custodyConfig.sampledColumns, + }); + }); + + it("should have same start slot and count for blocks and data requests", () => { + const startEpoch = config.FULU_FORK_EPOCH + 1; + const batch = new Batch(startEpoch, config, clock, custodyConfig); + expect(batch.requests.blocksRequest?.startSlot).toEqual(batch.requests.columnsRequest?.startSlot); + expect(batch.requests.blocksRequest?.count).toEqual(batch.requests.columnsRequest?.count); + }); + }); + + describe("downloadingSuccess", () => { + it("should handle blocks that are not in slot-wise order", () => {}); }); it("Complete state flow", () => { - const batch = new Batch(startEpoch, config); + const startEpoch = 0; + const batch = new Batch(startEpoch, config, clock, custodyConfig); // Instantion: AwaitingDownload expect(batch.state.status).toBe(BatchStatus.AwaitingDownload); @@ -35,7 +275,7 @@ describe("sync / range / batch", () => { expect(batch.state.status).toBe(BatchStatus.Downloading); // downloadingError: Downloading -> AwaitingDownload - batch.downloadingError(); + batch.downloadingError(peer); expect(batch.state.status).toBe(BatchStatus.AwaitingDownload); expect(batch.getFailedPeers()[0]).toBe(peer); @@ -45,7 +285,16 @@ describe("sync / range / batch", () => { // retry download: AwaitingDownload -> Downloading // downloadingSuccess: Downloading -> AwaitingProcessing batch.startDownloading(peer); - batch.downloadingSuccess({blocks: blocksDownloaded, pendingDataColumns: null}); + batch.downloadingSuccess(peer, [ + BlockInputPreData.createFromBlock({ + block: ssz.capella.SignedBeaconBlock.defaultValue(), + blockRootHex: "0x1234", + source: BlockInputSource.byRoot, + seenTimestampSec: Date.now() / 1000, + forkName: ForkName.capella, + daOutOfRange: false, + }), + ]); expect(batch.state.status).toBe(BatchStatus.AwaitingProcessing); // startProcessing: AwaitingProcessing -> Processing @@ -81,10 +330,11 @@ describe("sync / range / batch", () => { }); it("Should throw on inconsistent state - downloadingSuccess", () => { - const batch = new Batch(startEpoch, config); + const startEpoch = 0; + const batch = new Batch(startEpoch, config, clock, custodyConfig); expectThrowsLodestarError( - () => batch.downloadingSuccess({blocks: blocksDownloaded, pendingDataColumns: []}), + () => batch.downloadingSuccess(peer, []), new BatchError({ code: BatchErrorCode.WRONG_STATUS, startEpoch, @@ -95,7 +345,8 @@ describe("sync / range / batch", () => { }); it("Should throw on inconsistent state - startProcessing", () => { - const batch = new Batch(startEpoch, config); + const startEpoch = 0; + const batch = new Batch(startEpoch, config, clock, custodyConfig); expectThrowsLodestarError( () => batch.startProcessing(), @@ -109,7 +360,8 @@ describe("sync / range / batch", () => { }); it("Should throw on inconsistent state - processingSuccess", () => { - const batch = new Batch(startEpoch, config); + const startEpoch = 0; + const batch = new Batch(startEpoch, config, clock, custodyConfig); expectThrowsLodestarError( () => batch.processingSuccess(), diff --git a/packages/beacon-node/test/unit/sync/range/chain.test.ts b/packages/beacon-node/test/unit/sync/range/chain.test.ts index 7e4b03b11acd..c9e22c493ebf 100644 --- a/packages/beacon-node/test/unit/sync/range/chain.test.ts +++ b/packages/beacon-node/test/unit/sync/range/chain.test.ts @@ -4,7 +4,8 @@ import {computeStartSlotAtEpoch} from "@lodestar/state-transition"; import {Epoch, Slot, phase0, ssz} from "@lodestar/types"; import {Logger, fromHex} from "@lodestar/utils"; import {afterEach, describe, it} from "vitest"; -import {BlockInput, BlockSource, getBlockInput} from "../../../../src/chain/blocks/types.js"; +import {BlockInputPreData} from "../../../../src/chain/blocks/blockInput/blockInput.js"; +import {BlockInputSource, IBlockInput} from "../../../../src/chain/blocks/blockInput/types.js"; import {ZERO_HASH} from "../../../../src/constants/index.js"; import {ChainTarget, SyncChain, SyncChainFns} from "../../../../src/sync/range/chain.js"; import {RangeSyncType} from "../../../../src/sync/utils/remoteSyncType.js"; @@ -12,6 +13,7 @@ import {CustodyConfig} from "../../../../src/util/dataColumns.js"; import {linspace} from "../../../../src/util/numpy.js"; import {testLogger} from "../../../utils/logger.js"; import {validPeerIdStr} from "../../../utils/peer.js"; +import {Clock} from "../../../../src/util/clock.js"; describe("sync / range / chain", () => { const testCases: { @@ -75,6 +77,7 @@ describe("sync / range / chain", () => { custodyGroups: [], }; }; + const pruneBlockInputs: SyncChainFns["pruneBlockInputs"] = (_) => {}; afterEach(() => { if (interval !== null) clearInterval(interval); @@ -83,19 +86,16 @@ describe("sync / range / chain", () => { for (const {id, startEpoch, targetEpoch, badBlocks, skippedSlots} of testCases) { it(id, async () => { const processChainSegment: SyncChainFns["processChainSegment"] = async (blocks) => { - for (const {block} of blocks) { + for (const blockInput of blocks) { + const block = blockInput.getBlock(); if (block.signature === ACCEPT_BLOCK) continue; if (block.signature === REJECT_BLOCK) throw Error("REJECT_BLOCK"); } }; - const downloadBeaconBlocksByRange: SyncChainFns["downloadBeaconBlocksByRange"] = async ( - _peer, - request, - _partialDownload - ) => { - const blocks: BlockInput[] = []; - for (let i = request.startSlot; i < request.startSlot + request.count; i += request.step) { + const downloadByRange: SyncChainFns["downloadByRange"] = async (_peer, request, _partialDownload) => { + const blocks: IBlockInput[] = []; + for (let i = request.startSlot; i < request.startSlot + request.count; i += 1) { if (skippedSlots?.has(i)) { continue; // Skip } @@ -104,17 +104,20 @@ describe("sync / range / chain", () => { const shouldReject = badBlocks?.has(i); if (shouldReject) badBlocks?.delete(i); blocks.push( - getBlockInput.preData( - config, - { + BlockInputPreData.createFromBlock({ + block: { message: generateEmptyBlock(i), signature: shouldReject ? REJECT_BLOCK : ACCEPT_BLOCK, }, - BlockSource.byRange - ) + blockRootHex: "0x00", + forkName: config.getForkName(i), + daOutOfRange: false, + source: BlockInputSource.byRange, + seenTimestampSec: Math.floor(Date.now() / 1000), + }) ); } - return {blocks, pendingDataColumns: null}; + return {result: blocks, warnings: null}; }; const target: ChainTarget = {slot: computeStartSlotAtEpoch(targetEpoch), root: ZERO_HASH}; @@ -122,18 +125,20 @@ describe("sync / range / chain", () => { await new Promise((resolve, reject) => { const onEnd: SyncChainFns["onEnd"] = (err) => (err ? reject(err) : resolve()); + const clock = new Clock({config, genesisTime: 0, signal: new AbortController().signal}); const initialSync = new SyncChain( startEpoch, target, syncType, logSyncChainFns(logger, { processChainSegment, - downloadBeaconBlocksByRange, + downloadByRange, getConnectedPeerSyncMeta, reportPeer, + pruneBlockInputs, onEnd, }), - {config, logger, custodyConfig, metrics: null} + {config, logger, clock, custodyConfig, metrics: null} ); const peers = [peer]; @@ -150,25 +155,24 @@ describe("sync / range / chain", () => { const peers = [peer]; const processChainSegment: SyncChainFns["processChainSegment"] = async () => {}; - const downloadBeaconBlocksByRange: SyncChainFns["downloadBeaconBlocksByRange"] = async ( - _peer, - request, - _partialDownload - ) => { - const blocks: BlockInput[] = []; - for (let i = request.startSlot; i < request.startSlot + request.count; i += request.step) { + const downloadByRange: SyncChainFns["downloadByRange"] = async (_peer, request, _partialDownload) => { + const blocks: IBlockInput[] = []; + for (let i = request.startSlot; i < request.startSlot + request.count; i += 1) { blocks.push( - getBlockInput.preData( - config, - { + BlockInputPreData.createFromBlock({ + block: { message: generateEmptyBlock(i), signature: ACCEPT_BLOCK, }, - BlockSource.byRange - ) + blockRootHex: "0x00", + forkName: config.getForkName(i), + seenTimestampSec: Math.floor(Date.now() / 1000), + daOutOfRange: false, + source: BlockInputSource.byRange, + }) ); } - return {blocks, pendingDataColumns: null}; + return {result: blocks, warnings: null}; }; const target: ChainTarget = {slot: computeStartSlotAtEpoch(targetEpoch), root: ZERO_HASH}; @@ -176,18 +180,20 @@ describe("sync / range / chain", () => { await new Promise((resolve, reject) => { const onEnd: SyncChainFns["onEnd"] = (err) => (err ? reject(err) : resolve()); + const clock = new Clock({config, genesisTime: 0, signal: new AbortController().signal}); const initialSync = new SyncChain( startEpoch, target, syncType, logSyncChainFns(logger, { processChainSegment, - downloadBeaconBlocksByRange, + downloadByRange, reportPeer, + pruneBlockInputs, getConnectedPeerSyncMeta, onEnd, }), - {config, logger, custodyConfig, metrics: null} + {config, logger, clock, custodyConfig, metrics: null} ); // Add peers after some time @@ -213,12 +219,12 @@ describe("sync / range / chain", () => { function logSyncChainFns(logger: Logger, fns: SyncChainFns): SyncChainFns { return { processChainSegment(blocks, syncType) { - logger.debug("mock processChainSegment", {blocks: blocks.map((b) => b.block.message.slot).join(",")}); + logger.debug("mock processChainSegment", {blocks: blocks.map((b) => b.slot).join(",")}); return fns.processChainSegment(blocks, syncType); }, - downloadBeaconBlocksByRange(peer, request, _partialDownload, syncType) { - logger.debug("mock downloadBeaconBlocksByRange", request); - return fns.downloadBeaconBlocksByRange(peer, request, _partialDownload, syncType); + downloadByRange(peer, request, syncType) { + logger.debug("mock downloadBeaconBlocksByRange", request.state.status); + return fns.downloadByRange(peer, request, syncType); }, getConnectedPeerSyncMeta(peerId) { logger.debug("mock getConnectedPeerSyncMeta", peerId); @@ -228,6 +234,10 @@ function logSyncChainFns(logger: Logger, fns: SyncChainFns): SyncChainFns { logger.debug("mock reportPeer", {peer: peer.toString(), action, actionName}); return fns.reportPeer(peer, action, actionName); }, + pruneBlockInputs(blockInputs) { + logger.debug("mock pruneBlockInputs", {blockInputsLength: blockInputs.length}); + return fns.pruneBlockInputs(blockInputs); + }, onEnd(err, target) { logger.debug("mock onEnd", {target: target?.slot}, err ?? undefined); return fns.onEnd(err, target); diff --git a/packages/beacon-node/test/unit/sync/range/utils/batches.test.ts b/packages/beacon-node/test/unit/sync/range/utils/batches.test.ts index bb286a0e8c87..66c484998bd7 100644 --- a/packages/beacon-node/test/unit/sync/range/utils/batches.test.ts +++ b/packages/beacon-node/test/unit/sync/range/utils/batches.test.ts @@ -9,7 +9,9 @@ import { toBeDownloadedStartEpoch, validateBatchesStatus, } from "../../../../../src/sync/range/utils/batches.js"; +import {CustodyConfig} from "../../../../../src/util/dataColumns.js"; import {validPeerIdStr} from "../../../../utils/peer.js"; +import {clock} from "../../../../utils/blocksAndData.js"; describe("sync / range / batches", () => { const peer = validPeerIdStr; @@ -220,14 +222,14 @@ describe("sync / range / batches", () => { }); function createBatch(status: BatchStatus, startEpoch = 0): Batch { - const batch = new Batch(startEpoch, config); + const batch = new Batch(startEpoch, config, clock, new CustodyConfig({config, nodeId: Buffer.alloc(32)})); if (status === BatchStatus.AwaitingDownload) return batch; batch.startDownloading(peer); if (status === BatchStatus.Downloading) return batch; - batch.downloadingSuccess({blocks: [], pendingDataColumns: null}); + batch.downloadingSuccess(peer, []); if (status === BatchStatus.AwaitingProcessing) return batch; batch.startProcessing(); diff --git a/packages/beacon-node/test/unit/sync/range/utils/peerBalancer.test.ts b/packages/beacon-node/test/unit/sync/range/utils/peerBalancer.test.ts index 021391d60905..08306f53ab42 100644 --- a/packages/beacon-node/test/unit/sync/range/utils/peerBalancer.test.ts +++ b/packages/beacon-node/test/unit/sync/range/utils/peerBalancer.test.ts @@ -2,8 +2,10 @@ import {createChainForkConfig} from "@lodestar/config"; import {chainConfig} from "@lodestar/config/default"; import {ZERO_HASH} from "@lodestar/params"; import {computeStartSlotAtEpoch} from "@lodestar/state-transition"; +import {ssz} from "@lodestar/types"; import {describe, expect, it} from "vitest"; -import {BlockInput} from "../../../../../src/chain/blocks/types.js"; +import {BlockInputColumns} from "../../../../../src/chain/blocks/blockInput/blockInput.js"; +import {BlockInputSource} from "../../../../../src/chain/blocks/blockInput/types.js"; import {Batch} from "../../../../../src/sync/range/batch.js"; import {ChainTarget} from "../../../../../src/sync/range/chain.js"; import {ChainPeersBalancer, PeerSyncInfo} from "../../../../../src/sync/range/utils/peerBalancer.js"; @@ -11,7 +13,7 @@ import {RangeSyncType} from "../../../../../src/sync/utils/remoteSyncType.js"; import {CustodyConfig} from "../../../../../src/util/dataColumns.js"; import {PeerIdStr} from "../../../../../src/util/peerId.js"; import {getRandPeerSyncMeta} from "../../../../utils/peer.js"; -import {generateSignedBlockAtSlot} from "../../../../utils/typeGenerator.js"; +import {clock} from "../../../../utils/blocksAndData.js"; describe("sync / range / peerBalancer", () => { const custodyConfig = {sampledColumns: [0, 1, 2, 3]} as CustodyConfig; @@ -143,12 +145,12 @@ describe("sync / range / peerBalancer", () => { ? createChainForkConfig({...chainConfig, FULU_FORK_EPOCH: 0}) : createChainForkConfig(chainConfig); - const batch0 = new Batch(1, config); - const batch1 = new Batch(2, config); + const batch0 = new Batch(1, config, clock, custodyConfig); + const batch1 = new Batch(2, config, clock, custodyConfig); // Batch zero has a failedDownloadAttempt with peer1 batch0.startDownloading(peer1.peerId); - batch0.downloadingError(); + batch0.downloadingError(peer1.peerId); // peer2 is busy downloading batch1 batch1.startDownloading(peer2.peerId); @@ -166,13 +168,26 @@ describe("sync / range / peerBalancer", () => { it("should not retry the batch with a not as up-to-date peer", async () => { const config = createChainForkConfig({...chainConfig, FULU_FORK_EPOCH: 0}); - const batch0 = new Batch(1, config); + const batch0 = new Batch(1, config, clock, custodyConfig); + const blocksRequest = batch0.requests.blocksRequest as {startSlot: number; count: number}; // Batch zero has a failedDownloadAttempt with peer1 batch0.startDownloading(peer1.peerId); - const block: BlockInput = { - block: generateSignedBlockAtSlot(batch0.request.startSlot + batch0.request.count - 1), - } as BlockInput; - batch0.downloadingSuccess({blocks: [block], pendingDataColumns: [1, 2, 3]}); + const block = ssz.fulu.SignedBeaconBlock.defaultValue(); + block.message.slot = blocksRequest.startSlot + blocksRequest.count - 1; + block.message.body.blobKzgCommitments = [ssz.fulu.KZGCommitment.defaultValue()]; + const blockInput = BlockInputColumns.createFromBlock({ + block, + blockRootHex: "0x00", + source: BlockInputSource.gossip, + seenTimestampSec: Math.floor(Date.now() / 1000), + forkName: config.getForkName(block.message.slot), + daOutOfRange: false, + custodyColumns: [0, 1, 2, 3], + sampledColumns: [0, 1, 2, 3], + }); + console.log(blockInput.hasAllData()); + const x = batch0.downloadingSuccess(peer1.peerId, [blockInput]); + console.log("x", x); // peer2 and peer3 are the same but peer3 has a lower target slot than the previous download const peerInfos: PeerSyncInfo[] = [ @@ -180,14 +195,14 @@ describe("sync / range / peerBalancer", () => { peerId: peer2.peerId, client: peer2.client, custodyGroups: [0, 1, 2, 3], - target: {slot: batch0.request.startSlot + batch0.request.count - 1, root: ZERO_HASH}, + target: {slot: blocksRequest.startSlot + blocksRequest.count - 1, root: ZERO_HASH}, earliestAvailableSlot: 0, }, { peerId: peer3.peerId, client: peer3.client, custodyGroups: [0, 1, 2, 3], - target: {slot: batch0.request.startSlot + batch0.request.count - 2, root: ZERO_HASH}, + target: {slot: blocksRequest.startSlot + blocksRequest.count - 2, root: ZERO_HASH}, earliestAvailableSlot: 0, }, ]; @@ -290,13 +305,13 @@ describe("sync / range / peerBalancer", () => { ? createChainForkConfig({...chainConfig, FULU_FORK_EPOCH: 0}) : createChainForkConfig(chainConfig); - const batch0 = new Batch(1, config); - const batch1 = new Batch(2, config); + const batch0 = new Batch(1, config, clock, custodyConfig); + const batch1 = new Batch(2, config, clock, custodyConfig); // peer1 and peer2 are busy downloading batch0.startDownloading(peer1.peerId); batch1.startDownloading(peer2.peerId); - const newBatch = new Batch(3, config); + const newBatch = new Batch(3, config, clock, custodyConfig); const peerBalancer = new ChainPeersBalancer(peerInfos, [batch0, batch1], custodyConfig, RangeSyncType.Head); const idlePeer = peerBalancer.idlePeerForBatch(newBatch); expect(idlePeer?.peerId).toBe(expected); diff --git a/packages/beacon-node/test/unit/sync/unknownBlock.test.ts b/packages/beacon-node/test/unit/sync/unknownBlock.test.ts index 5e549a5e5fc3..4a902834076e 100644 --- a/packages/beacon-node/test/unit/sync/unknownBlock.test.ts +++ b/packages/beacon-node/test/unit/sync/unknownBlock.test.ts @@ -3,31 +3,25 @@ import {toHexString} from "@chainsafe/ssz"; import {createChainForkConfig} from "@lodestar/config"; import {config as minimalConfig} from "@lodestar/config/default"; import {IForkChoice, ProtoBlock} from "@lodestar/fork-choice"; -import {ForkName, ZERO_HASH_HEX} from "@lodestar/params"; +import {ForkName} from "@lodestar/params"; import {ssz} from "@lodestar/types"; import {notNullish, sleep} from "@lodestar/utils"; import {afterEach, beforeEach, describe, expect, it, vi} from "vitest"; -import { - BlockInput, - BlockInputDataColumns, - BlockInputType, - BlockSource, - CachedDataColumns, - NullBlockInput, - getBlockInput, -} from "../../../src/chain/blocks/types.js"; +import {BlockInputColumns, BlockInputPreData} from "../../../src/chain/blocks/blockInput/blockInput.js"; +import {BlockInputSource} from "../../../src/chain/blocks/blockInput/types.js"; import {BlockError, BlockErrorCode} from "../../../src/chain/errors/blockError.js"; -import {IBeaconChain} from "../../../src/chain/index.js"; +import {ChainEvent, IBeaconChain} from "../../../src/chain/index.js"; import {SeenBlockProposers} from "../../../src/chain/seenCache/seenBlockProposers.js"; import {ZERO_HASH} from "../../../src/constants/constants.js"; -import {INetwork, NetworkEvent, NetworkEventBus, PeerAction} from "../../../src/network/index.js"; +import {INetwork, NetworkEventBus, PeerAction} from "../../../src/network/index.js"; import {PeerSyncMeta} from "../../../src/network/peers/peersData.js"; import {defaultSyncOptions} from "../../../src/sync/options.js"; -import {UnknownBlockPeerBalancer, UnknownBlockSync} from "../../../src/sync/unknownBlock.js"; +import {BlockInputSync, UnknownBlockPeerBalancer} from "../../../src/sync/unknownBlock.js"; import {CustodyConfig} from "../../../src/util/dataColumns.js"; import {PeerIdStr} from "../../../src/util/peerId.js"; import {ClockStopped} from "../../mocks/clock.js"; import {MockedBeaconChain, getMockedBeaconChain} from "../../mocks/mockedBeaconChain.js"; +import {generateBlockWithColumnSidecars} from "../../utils/blocksAndData.js"; import {testLogger} from "../../utils/logger.js"; import {getRandPeerIdStr, getRandPeerSyncMeta} from "../../utils/peer.js"; @@ -48,7 +42,7 @@ describe.skip( const testCases: { id: string; - event: NetworkEvent.unknownBlockParent | NetworkEvent.unknownBlock; + event: ChainEvent.unknownParent | ChainEvent.unknownBlockRoot; finalizedSlot: number; reportPeer?: boolean; seenBlock?: boolean; @@ -57,23 +51,23 @@ describe.skip( }[] = [ { id: "fetch and process multiple unknown blocks", - event: NetworkEvent.unknownBlock, + event: ChainEvent.unknownBlockRoot, finalizedSlot: 0, }, { id: "fetch and process multiple unknown block parents", - event: NetworkEvent.unknownBlockParent, + event: ChainEvent.unknownParent, finalizedSlot: 0, }, { id: "downloaded parent is before finalized slot", - event: NetworkEvent.unknownBlockParent, + event: ChainEvent.unknownParent, finalizedSlot: 2, reportPeer: true, }, { id: "unbundling attack", - event: NetworkEvent.unknownBlock, + event: ChainEvent.unknownBlockRoot, finalizedSlot: 0, seenBlock: true, }, @@ -86,12 +80,12 @@ describe.skip( // }, { id: "peer returns prefinalized block", - event: NetworkEvent.unknownBlock, + event: ChainEvent.unknownBlockRoot, finalizedSlot: 1, }, { id: "downloaded blocks only", - event: NetworkEvent.unknownBlockParent, + event: ChainEvent.unknownParent, finalizedSlot: 0, maxPendingBlocks: 1, }, @@ -182,7 +176,8 @@ describe.skip( const chain: Partial = { clock: new ClockStopped(0), forkChoice: forkChoice as IForkChoice, - processBlock: async ({block}, opts) => { + processBlock: async (blockInput, opts) => { + const block = blockInput.getBlock(); if (!forkChoice.hasBlock(block.message.parentRoot)) throw Error("Unknown parent"); const blockSlot = block.message.slot; if (blockSlot <= finalizedSlot && !opts?.ignoreIfFinalized) { @@ -200,28 +195,37 @@ describe.skip( const setTimeoutSpy = vi.spyOn(global, "setTimeout"); const processBlockSpy = vi.spyOn(chain, "processBlock"); - const syncService = new UnknownBlockSync(config, network as INetwork, chain as IBeaconChain, logger, null, { + const syncService = new BlockInputSync(config, network as INetwork, chain as IBeaconChain, logger, null, { ...defaultSyncOptions, maxPendingBlocks, }); syncService.subscribeToNetwork(); - if (event === NetworkEvent.unknownBlockParent) { - network.events?.emit(NetworkEvent.unknownBlockParent, { - blockInput: getBlockInput.preData(config, blockC, BlockSource.gossip), + if (event === ChainEvent.unknownParent) { + chain.emitter?.emit(ChainEvent.unknownParent, { + blockInput: BlockInputPreData.createFromBlock({ + block: blockC, + blockRootHex: blockRootHexC, + forkName: config.getForkName(blockC.message.slot), + daOutOfRange: false, + seenTimestampSec: Math.floor(Date.now() / 1000), + source: BlockInputSource.gossip, + }), peer, + source: BlockInputSource.gossip, }); } else { - network.events?.emit(NetworkEvent.unknownBlock, {rootHex: blockRootHexC, peer}); + chain.emitter?.emit(ChainEvent.unknownBlockRoot, { + rootHex: blockRootHexC, + peer, + source: BlockInputSource.gossip, + }); } if (wrongBlockRoot) { - const [_, requestedRoots] = await sendBeaconBlocksByRootPromise; + await sendBeaconBlocksByRootPromise; await sleep(200); // should not send the invalid root block to chain expect(processBlockSpy).toHaveBeenCalledOnce(); - for (const requestedRoot of requestedRoots) { - expect(syncService["pendingBlocks"].get(toHexString(requestedRoot))?.downloadAttempts).toEqual(1); - } } else if (reportPeer) { const err = await reportPeerPromise; expect(err[0]).toBe(peer); @@ -259,7 +263,7 @@ describe("UnknownBlockSync", () => { let network: INetwork; let chain: MockedBeaconChain; const logger = testLogger(); - let service: UnknownBlockSync; + let service: BlockInputSync; beforeEach(() => { network = { @@ -288,8 +292,8 @@ describe("UnknownBlockSync", () => { for (const {actions, expected} of testCases) { const testName = actions.map((action) => (action ? "subscribe" : "unsubscribe")).join(" - "); it(testName, () => { - const events = network.events as EventEmitter; - service = new UnknownBlockSync(minimalConfig, network, chain, logger, null, defaultSyncOptions); + const events = chain.emitter as EventEmitter; + service = new BlockInputSync(minimalConfig, network, chain, logger, null, defaultSyncOptions); for (const action of actions) { if (action) { service.subscribeToNetwork(); @@ -299,12 +303,12 @@ describe("UnknownBlockSync", () => { } if (expected) { - expect(events.listenerCount(NetworkEvent.unknownBlock)).toBe(1); - expect(events.listenerCount(NetworkEvent.unknownBlockParent)).toBe(1); + expect(events.listenerCount(ChainEvent.unknownBlockRoot)).toBe(1); + expect(events.listenerCount(ChainEvent.unknownParent)).toBe(1); expect(service.isSubscribedToNetwork()).toBe(true); } else { - expect(events.listenerCount(NetworkEvent.unknownBlock)).toBe(0); - expect(events.listenerCount(NetworkEvent.unknownBlockParent)).toBe(0); + expect(events.listenerCount(ChainEvent.unknownBlockRoot)).toBe(0); + expect(events.listenerCount(ChainEvent.unknownParent)).toBe(0); expect(service.isSubscribedToNetwork()).toBe(false); } }); @@ -361,7 +365,7 @@ describe("UnknownBlockPeerBalancer", async () => { let peerBalancer: UnknownBlockPeerBalancer; beforeEach(() => { - peerBalancer = new UnknownBlockPeerBalancer(custodyConfig); + peerBalancer = new UnknownBlockPeerBalancer(); for (const [peerId, peerMeta] of peersMeta.entries()) { peerBalancer.onPeerConnected(peerId, peerMeta); } @@ -373,23 +377,28 @@ describe("UnknownBlockPeerBalancer", async () => { } const signedBlock = ssz.fulu.SignedBeaconBlock.defaultValue(); - const cachedData: CachedDataColumns = { - cacheId: 2025, - fork: ForkName.fulu, - availabilityPromise: Promise.resolve({} as unknown as BlockInputDataColumns), - resolveAvailability: () => {}, - dataColumnsCache: new Map([ - [0, {dataColumn: ssz.fulu.DataColumnSidecar.defaultValue(), dataColumnBytes: null}], - [1, {dataColumn: ssz.fulu.DataColumnSidecar.defaultValue(), dataColumnBytes: null}], - ]), - calledRecover: false, - }; - const blockInput: BlockInput = { - block: signedBlock, - source: BlockSource.gossip, - type: BlockInputType.dataPromise, - cachedData, - }; + signedBlock.message.body.blobKzgCommitments = [ssz.fulu.KZGCommitment.defaultValue()]; + const {block, rootHex, columnSidecars} = generateBlockWithColumnSidecars({forkName: ForkName.fulu}); + const blockInput = BlockInputColumns.createFromBlock({ + block: block, + blockRootHex: rootHex, + forkName: ForkName.fulu, + daOutOfRange: false, + source: BlockInputSource.gossip, + seenTimestampSec: Math.floor(Date.now() / 1000), + custodyColumns: custodyConfig.custodyColumns, + sampledColumns: custodyConfig.sampledColumns, + }); + + // test cases rely on first 2 columns being known, the rest unknown + for (const sidecar of columnSidecars.slice(0, 2)) { + blockInput.addColumn({ + columnSidecar: sidecar, + blockRootHex: rootHex, + seenTimestampSec: Math.floor(Date.now() / 1000), + source: BlockInputSource.gossip, + }); + } it(`bestPeerForBlockInput - test case ${testCaseIndex}`, () => { for (const [i, activeRequest] of activeRequests.entries()) { @@ -419,62 +428,4 @@ describe("UnknownBlockPeerBalancer", async () => { } }); } // end for testCases - - it("bestPeerForBlockInput - NullBlockInput", () => { - // there is an edge case where the NullBlockInput has full custody groups but no block, make sure it can return any peers - // in case NullBlockInput has some pending columns, it falls on the above test cases - const signedBlock = ssz.fulu.SignedBeaconBlock.defaultValue(); - const cachedData: CachedDataColumns = { - cacheId: 2025, - fork: ForkName.fulu, - availabilityPromise: Promise.resolve({} as unknown as BlockInputDataColumns), - resolveAvailability: () => {}, - dataColumnsCache: new Map([ - [0, {dataColumn: ssz.fulu.DataColumnSidecar.defaultValue(), dataColumnBytes: null}], - [1, {dataColumn: ssz.fulu.DataColumnSidecar.defaultValue(), dataColumnBytes: null}], - [2, {dataColumn: ssz.fulu.DataColumnSidecar.defaultValue(), dataColumnBytes: null}], - [3, {dataColumn: ssz.fulu.DataColumnSidecar.defaultValue(), dataColumnBytes: null}], - ]), - calledRecover: false, - }; - const blockInput: BlockInput = { - block: signedBlock, - source: BlockSource.gossip, - type: BlockInputType.dataPromise, - cachedData, - }; - - const nullBlockInput: NullBlockInput = { - block: null, - blockRootHex: ZERO_HASH_HEX, - blockInputPromise: Promise.resolve(blockInput), - cachedData, - }; - - const excludedPeers = new Set(); - for (let i = 0; i < peers.length; i++) { - const peer = peerBalancer.bestPeerForBlockInput(nullBlockInput, excludedPeers); - expect(peer).not.toBeNull(); - if (peer == null) { - // should not happen, this is just to make the compiler happy - throw new Error("Unexpected null peer"); - } - excludedPeers.add(peer.peerId); - } - - // last round, no more peer should be returned because all are requested - const peer = peerBalancer.bestPeerForBlockInput(nullBlockInput, excludedPeers); - expect(peer).toBeNull(); - }); - - it("onRequest and onRequestCompleted", () => { - peerBalancer.onRequest(peers[0].peerId); - expect(peerBalancer.activeRequests.get(peers[0].peerId)).toBe(1); - peerBalancer.onRequest(peers[0].peerId); - expect(peerBalancer.activeRequests.get(peers[0].peerId)).toBe(2); - peerBalancer.onRequestCompleted(peers[0].peerId); - expect(peerBalancer.activeRequests.get(peers[0].peerId)).toBe(1); - peerBalancer.onRequestCompleted(peers[0].peerId); - expect(peerBalancer.activeRequests.get(peers[0].peerId)).toBe(0); - }); }); diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts new file mode 100644 index 000000000000..58393c8b8b37 --- /dev/null +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRange.test.ts @@ -0,0 +1,363 @@ +import {ForkName} from "@lodestar/params"; +import {beforeEach, describe, expect, it} from "vitest"; +import {BlockInputPreData} from "../../../../src/chain/blocks/blockInput/blockInput.js"; +import {BlockInputSource, IBlockInput} from "../../../../src/chain/blocks/blockInput/types.js"; +import {ValidatedBlock, getBlocksForDataValidation} from "../../../../src/sync/utils/downloadByRange.js"; +import {generateChainOfBlockMaybeSidecars} from "../../../utils/blocksAndData.js"; + +/** + * Logic errors and gaps identified during test case creation: + * + * INSERT_LOGIC_ERROR_BULLET_POINTS_HERE + * + * - validateBlockByRangeResponse: Commented out zero blocks check breaks during chain liveness issues (line 445-453) + * - validateBlobsByRangeResponse: Missing validation that blob sidecars are in consecutive (slot, index) order as per spec + * - validateColumnsByRangeResponse: Missing validation that column sidecars are in consecutive (slot, index) order + * - cacheByRangeResponses: Error handling for wrong chain only breaks loop but doesn't throw/propagate error properly + * - getBlocksForDataValidation: No validation that cached blocks are actually before current blocks as assumed in comments + * - validateResponses: Missing validation that blocks and data requests have matching/compatible slot ranges + * - downloadByRange: Original error details are lost when catching and re-throwing REQ_RESP_ERROR + * - validateBlobsByRangeResponse: Doesn't validate blob indices are sequential (0, 1, 2...) within each block + * - validateColumnsByRangeResponse: Logic assumes all requested columns present but doesn't validate properly + * - cacheByRangeResponses: Type checking for DAType mismatch happens after attempting operations + * - validateBlockByRangeResponse: Parent root validation only checks consecutive blocks, missing skip slot handling + * - requestByRange: No timeout handling for concurrent network requests + * - validateResponses: batchBlocks parameter can be undefined but not properly handled in all cases + */ + +// describe("downloadByRange", () => { +// const peerIdStr = "0x1234567890abcdef"; +// // let cache: SeenBlockInputCache; +// let network: INetwork; +// // const logger = getMockedLogger(); + +// const startSlot = slots.deneb; +// const count = 32; +// let requests!: DownloadByRangeRequests; +// let networkResponse!: { +// blocks: WithBytes[]; +// blobSidecars: deneb.BlobSidecars; +// columnSidecars: fulu.DataColumnSidecars; +// }; +// let expected!: DownloadByRangeResponses; + +// beforeAll(() => { +// // Test setup code here +// }); + +// describe("cacheByRangeResponses", () => { +// it("should cache blocks only when no data sidecars present"); +// it("should cache blocks with blob sidecars"); +// it("should cache blocks with column sidecars"); +// it("should add blocks to existing batch blocks"); +// it("should add blob sidecars to existing batch blocks"); +// it("should add column sidecars to existing batch blocks"); +// it("should create new block input when block doesn't exist in batch"); +// it("should create new block input from blob sidecars when block doesn't exist"); +// it("should create new block input from column sidecars when block doesn't exist"); +// it("should throw error when block input type mismatches for blobs"); +// it("should throw error when block input type mismatches for columns"); +// it("should handle wrong chain error for blocks in finalized sync"); +// it("should handle wrong chain error for blobs in finalized sync"); +// it("should handle wrong chain error for columns in finalized sync"); +// it("should not report peer for wrong chain in non-finalized sync"); +// it("should maintain slot ordering in returned block inputs"); +// it("should handle empty responses gracefully"); +// it("should handle duplicate blocks with throwOnDuplicateAdd false"); +// it("should handle duplicate blobs with throwOnDuplicateAdd false"); +// it("should handle duplicate columns with throwOnDuplicateAdd false"); +// }); + +// describe("downloadByRange", () => { +// it("should download and validate blocks only"); +// it("should download and validate blocks with blobs"); +// it("should download and validate blocks with columns"); +// it("should download blocks, blobs and columns concurrently"); +// it("should use cached batch blocks for data validation when no blocks request"); +// it("should throw REQ_RESP_ERROR when network request fails"); +// it("should handle empty responses from network"); +// it("should validate responses before returning"); +// it("should pass through validation errors"); +// it("should log verbose error before throwing"); +// }); + +// describe("requestByRange", () => { +// it("should make block requests"); +// it("should make blob requests"); +// it("should make column requests"); +// it("should make concurrent block/blob/column requests from the same peer"); +// it("should handle undefined responses properly"); +// it("should throw if one of the concurrent requests fails"); +// it("should not make requests for undefined request parameters"); +// it("should return empty object when no requests provided"); +// it("should handle network timeout errors"); +// it("should preserve response order for concurrent requests"); +// }); + +// describe("validateResponses", () => { +// it("should validate blocks when blocksRequest provided"); +// it("should validate blobs when blobsRequest provided with blocks"); +// it("should validate columns when columnsRequest provided with blocks"); +// it("should use batchBlocks for data validation when no blocksRequest"); +// it("should throw MISSING_BLOCKS when data request but no blocks available"); +// it("should throw MISSING_BLOBS_RESPONSE when blobsRequest but no blobSidecars"); +// it("should throw MISSING_COLUMNS_RESPONSE when columnsRequest but no columnSidecars"); +// it("should return empty responses when no requests provided"); +// it("should validate blocks before validating data sidecars"); +// it("should use validated blocks for data validation when both downloaded"); +// it("should handle mixed cached and downloaded blocks for validation"); +// it("should validate slot ranges match between blocks and data requests"); +// }); + +// describe("validateBlockByRangeResponse", () => { +// it("should accept valid chain of blocks"); +// it("should accept empty response during chain liveness issues"); +// it("should throw EXTRA_BLOCKS when more blocks than requested count"); +// it("should throw OUT_OF_RANGE_BLOCKS when block slot before startSlot"); +// it("should throw OUT_OF_RANGE_BLOCKS when block slot after lastValidSlot"); +// it("should throw OUT_OF_ORDER_BLOCKS when blocks not in ascending slot order"); +// it("should allow skip slots in block chain"); +// it("should validate parent root matches previous block root"); +// it("should throw PARENT_ROOT_MISMATCH when chain broken"); +// it("should handle single block response"); +// it("should handle maximum count blocks"); +// it("should compute block roots correctly for each fork"); +// it("should validate blocks at fork boundaries"); +// it("should handle blocks with same slot (reorgs)"); +// }); + +// describe("validateBlobsByRangeResponse", () => { +// it("should accept valid blob sidecars matching blocks"); +// it("should throw EXTRA_BLOBS when more blobs than expected"); +// it("should throw MISSING_BLOBS when fewer blobs than expected"); +// it("should validate blob count matches block kzg commitments"); +// it("should skip blocks with zero kzg commitments"); +// it("should validate blobs in consecutive (slot, index) order"); +// it("should validate blob indices are sequential within block"); +// it("should validate all blobs for a block are included"); +// it("should call validateBlockBlobSidecars for each block with blobs"); +// it("should handle blocks with different blob counts"); +// it("should validate blobs across multiple blocks"); +// it("should return validated blob sidecars grouped by block"); +// it("should handle maximum blob count per block"); +// it("should validate blob sidecars in parallel"); +// it("should propagate validation errors from validateBlockBlobSidecars"); +// }); + +// describe("validateColumnsByRangeResponse", () => { +// it("should accept valid column sidecars matching blocks"); +// it("should throw EXTRA_COLUMNS when more columns than expected"); +// it("should throw MISSING_COLUMNS when fewer columns than expected"); +// it("should validate column count matches requested columns times blocks with commitments"); +// it("should skip blocks with zero kzg commitments"); +// it("should validate columns in consecutive (slot, index) order"); +// it("should validate all requested column indices present for each block"); +// it("should validate column indices match requested columns array"); +// it("should validate columns are in order within each block"); +// it("should throw MISSING_COLUMNS when columns not in correct order"); +// it("should call validateBlockDataColumnSidecars for each block with columns"); +// it("should handle blocks with different commitment counts"); +// it("should validate columns across multiple blocks"); +// it("should return validated column sidecars grouped by block"); +// it("should handle partial column requests (subset of indices)"); +// it("should validate column sidecars in parallel"); +// it("should propagate validation errors from validateBlockDataColumnSidecars"); +// }); + +describe("getBlocksForDataValidation", () => { + const forkName = ForkName.capella; + let chainOfBlocks: ReturnType; + let blockInputs: IBlockInput[]; + let validatedBlocks: ValidatedBlock[]; + + beforeEach(() => { + chainOfBlocks = generateChainOfBlockMaybeSidecars({forkName, count: 32, oomProtection: true}); + blockInputs = chainOfBlocks.map(({block, rootHex}) => + BlockInputPreData.createFromBlock({ + block, + forkName, + blockRootHex: rootHex, + daOutOfRange: true, + seenTimestampSec: Date.now(), + source: BlockInputSource.gossip, + }) + ); + validatedBlocks = chainOfBlocks.map(({block, blockRoot}) => ({block, blockRoot})); + }); + + it("should return requested slot range from cached", () => { + // Request slots 10-20 from cached blocks (slots 0-31) + const dataRequest = {startSlot: 10, count: 10}; + const lastSlot = dataRequest.startSlot + dataRequest.count - 1; + + const result = getBlocksForDataValidation(dataRequest, blockInputs.slice(10, 20), undefined); + + expect(result).toHaveLength(10); + expect(result[0].block.message.slot).toBe(dataRequest.startSlot); + expect(result[dataRequest.count - 1].block.message.slot).toBe(lastSlot); + }); + + it("should filter out blocks before and after range from cached", () => { + // Request slots 10-20 but provide cached blocks from slots 5-25 + const dataRequest = {startSlot: 10, count: 10}; + const lastSlot = dataRequest.startSlot + dataRequest.count - 1; + const cached = blockInputs; + + const result = getBlocksForDataValidation(dataRequest, cached, undefined); + + expect(result).toHaveLength(10); + expect(result[0].block.message.slot).toBe(dataRequest.startSlot); + expect(result[dataRequest.count - 1].block.message.slot).toBe(lastSlot); + // Verify no blocks outside range + for (const block of result) { + expect(block.block.message.slot).toBeGreaterThanOrEqual(10); + expect(block.block.message.slot).toBeLessThan(20); + } + }); + + it("should return requested slot range from current", () => { + // Request slots 10-20 from current blocks (slots 0-31) + const dataRequest = {startSlot: 10, count: 10}; + const lastSlot = dataRequest.startSlot + dataRequest.count - 1; + const current = validatedBlocks.slice(10, 20); + + const result = getBlocksForDataValidation(dataRequest, undefined, current); + + expect(result).toHaveLength(10); + expect(result[0].block.message.slot).toBe(dataRequest.startSlot); + expect(result[dataRequest.count - 1].block.message.slot).toBe(lastSlot); + }); + + it("should filter out blocks before and after range from current", () => { + // Request slots 10-20 but provide current blocks from slots 5-25 + const dataRequest = {startSlot: 10, count: 10}; + const lastSlot = dataRequest.startSlot + dataRequest.count - 1; + const current = validatedBlocks; + + const result = getBlocksForDataValidation(dataRequest, undefined, current); + + expect(result).toHaveLength(10); + expect(result[0].block.message.slot).toBe(dataRequest.startSlot); + expect(result[dataRequest.count - 1].block.message.slot).toBe(lastSlot); + // Verify no blocks outside range + for (const block of result) { + expect(block.block.message.slot).toBeGreaterThanOrEqual(10); + expect(block.block.message.slot).toBeLessThan(20); + } + }); + + it("should return requested slot range from combination of cached and current", () => { + const dataRequest = {startSlot: 5, count: 25}; + const lastSlot = dataRequest.startSlot + dataRequest.count - 1; + const cached = blockInputs.slice(0, 15); + const current = validatedBlocks.slice(15); + + const result = getBlocksForDataValidation(dataRequest, cached, current); + + expect(result).toHaveLength(25); + expect(result[0].block.message.slot).toBe(dataRequest.startSlot); + + expect(result[dataRequest.count - 1].block.message.slot).toBe(lastSlot); + }); + + it("should always return ValidatedBlocks for mixed block source", () => { + const dataRequest = {startSlot: 5, count: 25}; + const cached = blockInputs.slice(0, 15); + const current = validatedBlocks.slice(15); + + const result = getBlocksForDataValidation(dataRequest, cached, current); + + // All results should be ValidatedBlock type with block and blockRoot + for (const validatedBlock of result) { + expect(validatedBlock).toHaveProperty("block"); + expect(validatedBlock).toHaveProperty("blockRoot"); + expect(validatedBlock.blockRoot).toBeInstanceOf(Uint8Array); + } + }); + + it("should maintain ascending slot order", () => { + const dataRequest = {startSlot: 5, count: 25}; + const cached = blockInputs.slice(0, 15); + const current = validatedBlocks.slice(15); + + const result = getBlocksForDataValidation(dataRequest, cached, current); + + expect(result.sort((a, b) => a.block.message.slot - b.block.message.slot)).toEqual(result); + }); + + it("should handle overlapping slot ranges between cached and current", () => { + // Both cached and current have blocks for slots 12-15 + const dataRequest = {startSlot: 10, count: 10}; + const lastSlot = dataRequest.startSlot + dataRequest.count - 1; + const cached = blockInputs.slice(0, 16); // slots 0-15 + const current = validatedBlocks.slice(12, 25); // slots 12-24 + + const result = getBlocksForDataValidation(dataRequest, cached, current); + + // Should not have duplicates, cached takes precedence + expect(result).toHaveLength(10); + expect(result[0].block.message.slot).toBe(dataRequest.startSlot); + expect(result[dataRequest.count - 1].block.message.slot).toBe(lastSlot); + // Verify no duplicate slots + const slots = result.map((b) => b.block.message.slot); + const uniqueSlots = new Set(slots); + expect(uniqueSlots.size).toBe(slots.length); + }); + + it("should return empty array when no blocks in range", () => { + const dataRequest = {startSlot: 100, count: 10}; + const cached = blockInputs.slice(0, 10); // slots 0-9 + const current = validatedBlocks.slice(10, 20); // slots 10-19 + + const result = getBlocksForDataValidation(dataRequest, cached, current); + + expect(result).toHaveLength(0); + }); + + it("should tolerate skip slots in cached and current", () => { + const dataRequest = {startSlot: 0, count: 20}; + // Create sparse arrays with skip slots + const cached = [blockInputs[1], blockInputs[3], blockInputs[5], blockInputs[7]]; + const current = [validatedBlocks[10], validatedBlocks[12], validatedBlocks[15], validatedBlocks[18]]; + + const result = getBlocksForDataValidation(dataRequest, cached, current); + + expect(result).toHaveLength(cached.length + current.length); + const slots = result.map(({block}) => block.message.slot); + const expectedSlots = cached.map((b) => b.slot).concat(...current.map((b) => b.block.message.slot)); + expect(slots).toEqual(expectedSlots); + + // Verify ascending order is maintained despite skip slots + for (let i = 1; i < slots.length; i++) { + expect(slots[i]).toBeGreaterThan(slots[i - 1]); + } + }); +}); + +// describe("Error handling", () => { +// it("should build correct slot range string for blocks request"); +// it("should build correct slot range string for blobs request"); +// it("should build correct slot range string for columns request"); +// it("should handle missing request parameters in slot range string"); +// it("should create DownloadByRangeError with correct error codes"); +// it("should preserve error context in DownloadByRangeError"); +// it("should handle network errors appropriately"); +// it("should handle validation errors appropriately"); +// it("should handle cache errors appropriately"); +// }); + +// describe("Integration scenarios", () => { +// it("should handle full download and cache flow for blocks only"); +// it("should handle full download and cache flow for blocks with blobs"); +// it("should handle full download and cache flow for blocks with columns"); +// it("should handle partial responses within valid range"); +// it("should handle peer disconnection during download"); +// it("should handle fork transition during range download"); +// it("should handle reorg detection via parent root mismatch"); +// it("should handle maximum request size limits"); +// it("should handle minimum request size (count=1)"); +// it("should handle skip slots in epoch boundaries"); +// it("should handle genesis slot edge cases"); +// it("should handle far future slot requests"); +// }); +// }); diff --git a/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts new file mode 100644 index 000000000000..f2cf578912f9 --- /dev/null +++ b/packages/beacon-node/test/unit/sync/utils/downloadByRoot.test.ts @@ -0,0 +1,355 @@ +import {randomBytes} from "node:crypto"; +import {ForkName, NUMBER_OF_COLUMNS} from "@lodestar/params"; +import {ssz} from "@lodestar/types"; +import {afterAll, afterEach, beforeAll, beforeEach, describe, expect, it, vi} from "vitest"; +import {BlobMeta, MissingColumnMeta} from "../../../../src/chain/blocks/blockInput/types.js"; +import {BlobSidecarValidationError} from "../../../../src/chain/errors/blobSidecarError.js"; +import {INetwork} from "../../../../src/network/index.js"; +import { + DownloadByRootError, + fetchAndValidateBlobs, + fetchAndValidateBlock, + fetchAndValidateColumns, + fetchBlobsByRoot, + fetchColumnsByRoot, +} from "../../../../src/sync/utils/downloadByRoot.js"; +import {kzgCommitmentToVersionedHash} from "../../../../src/util/blobs.js"; +import {ROOT_SIZE} from "../../../../src/util/sszBytes.js"; +import { + config, + generateBlock, + generateBlockWithBlobSidecars, + generateBlockWithColumnSidecars, +} from "../../../utils/blocksAndData.js"; +import {PeerSyncMeta} from "../../../../src/network/peers/peersData.js"; +import {DataColumnSidecarValidationError} from "../../../../src/chain/errors/dataColumnSidecarError.js"; + +describe("downloadByRoot.ts", () => { + const peerIdStr = "1234567890abcdef1234567890abcdef"; + const peerMeta: PeerSyncMeta = { + peerId: peerIdStr, + client: "N/A", + custodyGroups: Array.from({length: NUMBER_OF_COLUMNS}, (_, i) => i), + earliestAvailableSlot: 0, + }; + let network: INetwork; + + describe("fetchAndValidateBlock", () => { + let capellaBlock: ReturnType; + beforeAll(() => { + capellaBlock = generateBlock({forkName: ForkName.capella}); + }); + afterAll(() => { + vi.resetAllMocks(); + }); + + it("should successfully fetch and validate block with matching root", async () => { + network = { + sendBeaconBlocksByRoot: vi.fn(() => [{data: capellaBlock.block}]), + } as unknown as INetwork; + + const response = await fetchAndValidateBlock({ + config, + network, + peerIdStr, + blockRoot: capellaBlock.blockRoot, + }); + + expect(response).toBe(capellaBlock.block); + }); + + it("should throw error when no block is returned from network", async () => { + network = { + sendBeaconBlocksByRoot: vi.fn(() => []), + } as unknown as INetwork; + + await expect( + fetchAndValidateBlock({ + config, + network, + peerIdStr, + blockRoot: capellaBlock.blockRoot, + }) + ).rejects.toThrow(DownloadByRootError); + }); + + it("should throw error when block root doesn't match requested root", async () => { + network = { + sendBeaconBlocksByRoot: vi.fn(() => [{data: capellaBlock.block}]), + } as unknown as INetwork; + + const invalidRoot = randomBytes(ROOT_SIZE); + + await expect( + fetchAndValidateBlock({ + config, + network, + peerIdStr, + blockRoot: invalidRoot, + }) + ).rejects.toThrow(DownloadByRootError); + }); + }); + + describe("fetchAndValidateBlobs", () => { + const forkName = ForkName.deneb; + let denebBlockWithBlobs: ReturnType; + let blobMeta: BlobMeta[]; + + beforeEach(() => { + denebBlockWithBlobs = generateBlockWithBlobSidecars({forkName, count: 6}); + blobMeta = denebBlockWithBlobs.versionedHashes.map((versionedHash, index) => ({ + index, + blockRoot: denebBlockWithBlobs.blockRoot, + versionedHash, + })); + }); + + afterEach(() => { + vi.resetAllMocks(); + }); + + it("should successfully fetch blobs from network only", async () => { + const sendBlobSidecarsByRootMock = vi.fn(() => Promise.resolve(denebBlockWithBlobs.blobSidecars)); + network = { + sendBlobSidecarsByRoot: sendBlobSidecarsByRootMock, + } as unknown as INetwork; + + const response = await fetchAndValidateBlobs({ + config, + network, + forkName, + peerIdStr, + blockRoot: denebBlockWithBlobs.blockRoot, + block: denebBlockWithBlobs.block, + blobMeta, + }); + + expect(response).toEqual(denebBlockWithBlobs.blobSidecars); + }); + + it("should not error if unable to fetch all blobs from network", async () => { + const sendBlobSidecarsByRootMock = vi.fn(() => + Promise.resolve([ + denebBlockWithBlobs.blobSidecars[1], + denebBlockWithBlobs.blobSidecars[3], + denebBlockWithBlobs.blobSidecars[5], + ]) + ); + network = { + sendBlobSidecarsByRoot: sendBlobSidecarsByRootMock, + } as unknown as INetwork; + + const response = await fetchAndValidateBlobs({ + config, + network, + forkName, + peerIdStr, + blockRoot: denebBlockWithBlobs.blockRoot, + block: denebBlockWithBlobs.block, + blobMeta, + }); + + expect(sendBlobSidecarsByRootMock).toHaveBeenCalledExactlyOnceWith( + peerIdStr, + denebBlockWithBlobs.blobSidecars.map(({index}) => ({blockRoot: denebBlockWithBlobs.blockRoot, index})) + ); + + const returnedIndices = response.map((b) => b.index); + expect(returnedIndices).toEqual([1, 3, 5]); + }); + + it.todo("should throw error if no blobs are returned", async () => { + const sendBlobSidecarsByRootMock = vi.fn(() => Promise.resolve([])); + network = { + sendBlobSidecarsByRoot: sendBlobSidecarsByRootMock, + } as unknown as INetwork; + + const requestedBlockRoot = randomBytes(ROOT_SIZE); + + await expect( + fetchAndValidateBlobs({ + config, + network, + forkName, + peerIdStr, + blockRoot: requestedBlockRoot, + block: denebBlockWithBlobs.block, + blobMeta, + }) + ).rejects.toThrow(BlobSidecarValidationError); + }); + }); + + describe("fetchBlobsByRoot", () => { + let denebBlockWithColumns: ReturnType; + let blockRoot: Uint8Array; + let blobMeta: BlobMeta[]; + beforeAll(() => { + denebBlockWithColumns = generateBlockWithBlobSidecars({forkName: ForkName.deneb, count: 6}); + blockRoot = denebBlockWithColumns.blockRoot; + blobMeta = denebBlockWithColumns.blobSidecars.map((_, index) => ({blockRoot, index}) as BlobMeta); + network = { + sendBlobSidecarsByRoot: vi.fn(() => denebBlockWithColumns.blobSidecars), + } as unknown as INetwork; + }); + afterAll(() => { + vi.resetAllMocks(); + }); + + it("should fetch missing columnSidecars ByRoot from network", async () => { + const response = await fetchBlobsByRoot({ + network, + peerIdStr, + blobMeta, + }); + expect(response).toEqual(denebBlockWithColumns.blobSidecars); + expect(network.sendBlobSidecarsByRoot).toHaveBeenCalledOnce(); + expect(network.sendBlobSidecarsByRoot).toHaveBeenCalledWith(peerIdStr, blobMeta); + }); + + it("should filter out blobs already in possession", async () => { + await fetchBlobsByRoot({ + network, + peerIdStr, + blobMeta, + // biome-ignore lint/style/noNonNullAssertion: its there + indicesInPossession: [0, denebBlockWithColumns.blobSidecars.at(-1)?.index!], + }); + expect(network.sendBlobSidecarsByRoot).toHaveBeenCalledOnce(); + expect(network.sendBlobSidecarsByRoot).toHaveBeenCalledWith(peerIdStr, blobMeta.slice(1, -1)); + }); + + it("should handle empty blob request when all blobs are in possession", async () => { + const response = await fetchBlobsByRoot({ + network, + peerIdStr, + blobMeta, + indicesInPossession: blobMeta.map(({index}) => index), + }); + expect(response).toEqual([]); + expect(network.sendBlobSidecarsByRoot).not.toHaveBeenCalled(); + }); + }); + + describe("fetchAndValidateColumns", () => { + const forkName = ForkName.fulu; + let fuluBlockWithColumns: ReturnType; + let columnMeta: MissingColumnMeta; + let versionedHashes: Uint8Array[]; + + beforeEach(() => { + fuluBlockWithColumns = generateBlockWithColumnSidecars({forkName, returnBlobs: true}); + versionedHashes = fuluBlockWithColumns.block.message.body.blobKzgCommitments.map((c) => + kzgCommitmentToVersionedHash(c) + ); + columnMeta = { + missing: [0, 1, 2, 3, 4, 5, 6, 7], // Sample a subset of columns + versionedHashes, + }; + }); + + afterEach(() => { + vi.resetAllMocks(); + }); + + it("should successfully fetch columns from network only", async () => { + const neededColumns = fuluBlockWithColumns.columnSidecars.filter((c) => columnMeta.missing.includes(c.index)); + const sendDataColumnSidecarsByRootMock = vi.fn(() => Promise.resolve(neededColumns)); + network = { + sendDataColumnSidecarsByRoot: sendDataColumnSidecarsByRootMock, + custodyConfig: { + custodyColumns: [0, 1, 2, 3, 4, 5], + sampledColumns: columnMeta.missing, + }, + logger: { + error: vi.fn(), + }, + } as unknown as INetwork; + + const response = await fetchAndValidateColumns({ + config, + network, + forkName, + peerMeta, + blockRoot: fuluBlockWithColumns.blockRoot, + block: fuluBlockWithColumns.block, + columnMeta, + }); + + expect(sendDataColumnSidecarsByRootMock).toHaveBeenCalledExactlyOnceWith(peerIdStr, [ + {blockRoot: fuluBlockWithColumns.blockRoot, columns: columnMeta.missing}, + ]); + expect(response.result.map((c) => c.index)).toEqual(columnMeta.missing); + }); + + it("should throw error if column validation fails", async () => { + // biome-ignore lint/style/noNonNullAssertion: exists + const invalidColumn = ssz.fulu.DataColumnSidecar.clone(fuluBlockWithColumns.columnSidecars.at(1)!); + // Corrupt the inclusion proof to make validation fail + invalidColumn.kzgCommitmentsInclusionProof[0] = new Uint8Array(32).fill(255); + + const sendDataColumnSidecarsByRootMock = vi.fn(() => + Promise.resolve([ + fuluBlockWithColumns.columnSidecars[0], + invalidColumn, + fuluBlockWithColumns.columnSidecars.slice(2, 6), + ]) + ); + network = { + sendDataColumnSidecarsByRoot: sendDataColumnSidecarsByRootMock, + custodyConfig: { + custodyColumns: [0, 1, 2, 3, 4, 5], + sampledColumns: [0, 1, 2, 3, 4, 5], + }, + logger: { + error: vi.fn(), + }, + } as unknown as INetwork; + + await expect( + fetchAndValidateColumns({ + config, + network, + forkName, + peerMeta, + blockRoot: fuluBlockWithColumns.blockRoot, + block: fuluBlockWithColumns.block, + columnMeta: { + missing: [0, 1, 2, 3, 4, 5], + versionedHashes, + }, + }) + ).rejects.toThrow(DataColumnSidecarValidationError); + }); + }); + + describe("fetchColumnsByRoot", () => { + let fuluBlockWithColumns: ReturnType; + beforeAll(() => { + fuluBlockWithColumns = generateBlockWithColumnSidecars({forkName: ForkName.fulu}); + network = { + sendDataColumnSidecarsByRoot: vi.fn(() => fuluBlockWithColumns.columnSidecars), + } as unknown as INetwork; + }); + afterAll(() => { + vi.resetAllMocks(); + }); + it("should fetch missing columnSidecars ByRoot from network", async () => { + const blockRoot = fuluBlockWithColumns.blockRoot; + const missing = fuluBlockWithColumns.columnSidecars.map((c) => c.index); + const response = await fetchColumnsByRoot({ + network, + peerMeta, + blockRoot, + columnMeta: { + missing, + versionedHashes: [], + }, + }); + expect(response).toEqual(fuluBlockWithColumns.columnSidecars); + expect(network.sendDataColumnSidecarsByRoot).toHaveBeenCalledOnce(); + expect(network.sendDataColumnSidecarsByRoot).toHaveBeenCalledWith(peerIdStr, [{blockRoot, columns: missing}]); + }); + }); +}); diff --git a/packages/beacon-node/test/unit/sync/utils/pendingBlocksTree.test.ts b/packages/beacon-node/test/unit/sync/utils/pendingBlocksTree.test.ts index 9251c1159c64..5e76f31cc276 100644 --- a/packages/beacon-node/test/unit/sync/utils/pendingBlocksTree.test.ts +++ b/packages/beacon-node/test/unit/sync/utils/pendingBlocksTree.test.ts @@ -1,11 +1,18 @@ import {RootHex} from "@lodestar/types"; import {describe, expect, it} from "vitest"; -import {PendingBlock, PendingBlockStatus, UnknownAndAncestorBlocks} from "../../../../src/sync/index.js"; import { + BlockInputSyncCacheItem, + PendingBlockInput, + PendingBlockInputStatus, + getBlockInputSyncCacheItemRootHex, +} from "../../../../src/sync/types.js"; +import { + UnknownAndAncestorBlocks, getAllDescendantBlocks, getDescendantBlocks, getUnknownAndAncestorBlocks, } from "../../../../src/sync/utils/pendingBlocksTree.js"; +import {MockBlockInput} from "../../../utils/blockInput.js"; describe("sync / pendingBlocksTree", () => { const testCases: { @@ -49,13 +56,15 @@ describe("sync / pendingBlocksTree", () => { ]; for (const testCase of testCases) { - const blocks = new Map(); + const blocks = new Map(); for (const block of testCase.blocks) { - blocks.set(block.block, { - blockRootHex: block.block, - parentBlockRootHex: block.parent, - status: block.parent == null ? PendingBlockStatus.pending : PendingBlockStatus.downloaded, - } as PendingBlock); + const pending: PendingBlockInput = { + status: block.parent === null ? PendingBlockInputStatus.pending : PendingBlockInputStatus.downloaded, + blockInput: new MockBlockInput({blockRootHex: block.block, parentRootHex: block.parent}), + peerIdStrings: new Set(), + timeAddedSec: 0, + }; + blocks.set(pending.blockInput.blockRootHex, pending); } describe(testCase.id, () => { @@ -78,13 +87,13 @@ describe("sync / pendingBlocksTree", () => { } }); -function toRes(blocks: PendingBlock[]): string[] { - return blocks.map((block) => block.blockRootHex); +function toRes(blocks: BlockInputSyncCacheItem[]): string[] { + return blocks.map((block) => getBlockInputSyncCacheItemRootHex(block)); } function toRes2(blocks: UnknownAndAncestorBlocks): {unknowns: string[]; ancestors: string[]} { return { - unknowns: blocks.unknowns.map((block) => block.blockRootHex), - ancestors: blocks.ancestors.map((block) => block.blockRootHex), + unknowns: blocks.unknowns.map((block) => getBlockInputSyncCacheItemRootHex(block)), + ancestors: blocks.ancestors.map((block) => getBlockInputSyncCacheItemRootHex(block)), }; } diff --git a/packages/beacon-node/test/unit/util/dataColumn.test.ts b/packages/beacon-node/test/unit/util/dataColumn.test.ts index 216a124d2eb0..6f29df80b723 100644 --- a/packages/beacon-node/test/unit/util/dataColumn.test.ts +++ b/packages/beacon-node/test/unit/util/dataColumn.test.ts @@ -5,7 +5,7 @@ import {ssz} from "@lodestar/types"; import {bigIntToBytes, fromHex} from "@lodestar/utils"; import {afterEach, beforeEach, describe, expect, it} from "vitest"; -import {validateDataColumnsSidecars} from "../../../src/chain/validation/dataColumnSidecar.js"; +import {validateBlockDataColumnSidecars} from "../../../src/chain/validation/dataColumnSidecar.js"; import { CustodyConfig, getDataColumnSidecarsFromBlock, @@ -175,7 +175,7 @@ describe("data column sidecars", () => { expect(columnSidecars[0].column.length).toEqual(blobs.length); await expect( - validateDataColumnsSidecars(slot, blockRoot, kzgCommitments, columnSidecars, null) + validateBlockDataColumnSidecars(slot, blockRoot, kzgCommitments.length, columnSidecars) ).resolves.toBeUndefined(); }); @@ -211,8 +211,8 @@ describe("data column sidecars", () => { expect(columnSidecars.length).toEqual(NUMBER_OF_COLUMNS); expect(columnSidecars[0].column.length).toEqual(blobs.length); - await expect(validateDataColumnsSidecars(slot, blockRoot, [], columnSidecars, null)).rejects.toThrow( - `Invalid data column sidecar slot=${slot}` + await expect(validateBlockDataColumnSidecars(slot, blockRoot, 0, columnSidecars)).rejects.toThrow( + "Block has no blob commitments but data column sidecars were provided" ); }); }); diff --git a/packages/beacon-node/test/unit/util/execution.test.ts b/packages/beacon-node/test/unit/util/execution.test.ts new file mode 100644 index 000000000000..cbdfbef7f1f6 --- /dev/null +++ b/packages/beacon-node/test/unit/util/execution.test.ts @@ -0,0 +1,230 @@ +// describe("fetchGetBlobsV1AndBuildSidecars", () => { +// let denebBlockWithBlobs: ReturnType; +// let blobsAndProofs: deneb.BlobAndProof[]; +// let blobMeta: BlobMeta[]; +// const forkName = ForkName.deneb; + +// beforeEach(() => { +// denebBlockWithBlobs = generateBlockWithBlobSidecars({forkName, count: 6}); +// blobsAndProofs = denebBlockWithBlobs.blobSidecars.map(({blob, kzgProof}) => ({blob, proof: kzgProof})); +// blobMeta = denebBlockWithBlobs.versionedHashes.map((versionedHash, index) => ({index, versionedHash}) as BlobMeta); +// }); + +// afterEach(() => { +// vi.resetAllMocks(); +// }); + +// it("should call getBlobs with the correct arguments", async () => { +// const getBlobsMock = vi.fn(() => Promise.resolve(blobsAndProofs)); +// executionEngine = { +// getBlobs: getBlobsMock, +// } as unknown as IExecutionEngine; + +// await fetchGetBlobsV1AndBuildSidecars({ +// config, +// forkName, +// executionEngine, +// block: denebBlockWithBlobs.block, +// blobMeta: blobMeta, +// }); + +// expect(getBlobsMock).toHaveBeenCalledOnce(); +// expect(getBlobsMock).toHaveBeenCalledWith(forkName, denebBlockWithBlobs.versionedHashes); +// }); + +// it("should return empty array when execution engine returns no blobs", async () => { +// const getBlobsMock = vi.fn(() => Promise.resolve([])); +// executionEngine = { +// getBlobs: getBlobsMock, +// } as unknown as IExecutionEngine; + +// const response = await fetchGetBlobsV1AndBuildSidecars({ +// config, +// forkName, +// executionEngine, +// block: denebBlockWithBlobs.block, +// blobMeta: blobMeta, +// }); +// expect(response).toEqual([]); +// }); + +// it("should build valid blob sidecars from execution engine response", async () => { +// const getBlobsMock = vi.fn(() => Promise.resolve(blobsAndProofs)); +// executionEngine = { +// getBlobs: getBlobsMock, +// } as unknown as IExecutionEngine; + +// const response = await fetchGetBlobsV1AndBuildSidecars({ +// config, +// forkName, +// executionEngine, +// block: denebBlockWithBlobs.block, +// blobMeta: blobMeta, +// }); + +// expect(getBlobsMock).toHaveBeenCalledOnce(); +// expect(response).toBeDefined(); +// expect(response).toBeInstanceOf(Array); +// expect(response.length).toEqual(blobsAndProofs.length); +// for (const blobSidecar of response) { +// blobSidecar.kzgCommitmentInclusionProof; +// expect(blobSidecar).toHaveProperty("index"); +// expect(blobSidecar.index).toBeTypeOf("number"); + +// expect(blobSidecar).toHaveProperty("blob"); +// expect(blobSidecar.blob).toBeInstanceOf(Uint8Array); +// expect(blobSidecar.blob.length).toEqual(BYTES_PER_BLOB); + +// expect(blobSidecar).toHaveProperty("kzgProof"); +// expect(blobSidecar.kzgProof).toBeInstanceOf(Uint8Array); +// expect(blobSidecar.kzgProof.length).toEqual(BYTES_PER_PROOF); + +// expect(blobSidecar).toHaveProperty("kzgCommitment"); +// expect(blobSidecar.kzgCommitment).toBeInstanceOf(Uint8Array); +// expect(blobSidecar.kzgCommitment.length).toEqual(BYTES_PER_COMMITMENT); + +// expect(blobSidecar).toHaveProperty("kzgCommitmentInclusionProof"); +// expect(blobSidecar.kzgCommitmentInclusionProof).toBeInstanceOf(Array); +// blobSidecar.kzgCommitmentInclusionProof.map((proof) => expect(proof).toBeInstanceOf(Uint8Array)); + +// expect(blobSidecar).toHaveProperty("signedBlockHeader"); +// expect(blobSidecar.signedBlockHeader.message.slot).toBe(denebBlockWithBlobs.block.message.slot); +// expect(blobSidecar.signedBlockHeader.message.proposerIndex).toBe(denebBlockWithBlobs.block.message.proposerIndex); +// expect(blobSidecar.signedBlockHeader.message.parentRoot).toEqual(denebBlockWithBlobs.block.message.parentRoot); +// expect(blobSidecar.signedBlockHeader.message.stateRoot).toEqual(denebBlockWithBlobs.block.message.stateRoot); +// } + +// await expect( +// validateBlockBlobSidecars( +// denebBlockWithBlobs.block.message.slot, +// denebBlockWithBlobs.blockRoot, +// denebBlockWithBlobs.block.message.body.blobKzgCommitments.length, +// response +// ) +// ).resolves.toBeUndefined(); +// }); + +// it("should handle partial blob response from execution engine", async () => { +// const engineResponse: (BlobAndProof | null)[] = [...blobsAndProofs]; +// engineResponse[2] = null; +// engineResponse[4] = null; +// const getBlobsMock = vi.fn(() => Promise.resolve(engineResponse)); +// executionEngine = { +// getBlobs: getBlobsMock, +// } as unknown as IExecutionEngine; + +// const response = await fetchGetBlobsV1AndBuildSidecars({ +// config, +// forkName, +// executionEngine, +// block: denebBlockWithBlobs.block, +// blobMeta: blobMeta, +// }); + +// expect(response.length).toEqual(4); +// expect(response.map(({index}) => index)).toEqual([0, 1, 3, 5]); +// }); +// }); + +// describe("fetchGetBlobsV2AndBuildSidecars", () => { +// let fuluBlockWithColumns: ReturnType; +// let blobAndProofs: fulu.BlobAndProofV2[]; +// let versionedHashes: Uint8Array[]; + +// beforeEach(() => { +// fuluBlockWithColumns = generateBlockWithColumnSidecars({forkName: ForkName.fulu, returnBlobs: true}); +// // biome-ignore lint/style/noNonNullAssertion: returnBlobs = true +// const blobs = fuluBlockWithColumns.blobs!; +// blobAndProofs = blobs.map((b) => kzg.computeCellsAndKzgProofs(b)).map(({proofs}, i) => ({proofs, blob: blobs[i]})); +// versionedHashes = fuluBlockWithColumns.block.message.body.blobKzgCommitments.map((c) => +// kzgCommitmentToVersionedHash(c) +// ); +// }); + +// afterEach(() => { +// vi.resetAllMocks(); +// }); + +// it("should call getBlobs with the correct arguments", async () => { +// const getBlobsMock = vi.fn(() => Promise.resolve(blobAndProofs)); +// executionEngine = { +// getBlobs: getBlobsMock, +// } as unknown as IExecutionEngine; + +// const columnMeta = { +// missing: fuluBlockWithColumns.columnSidecars.map((c) => c.index), +// versionedHashes, +// }; + +// await fetchGetBlobsV2AndBuildSidecars({ +// config, +// executionEngine, +// forkName: ForkName.fulu, +// block: fuluBlockWithColumns.block, +// columnMeta, +// }); + +// expect(getBlobsMock).toHaveBeenCalledOnce(); +// expect(getBlobsMock).toHaveBeenCalledWith(ForkName.fulu, versionedHashes); +// }); + +// it("should return empty array when execution engine returns no response", async () => { +// const getBlobsMock = vi.fn(() => Promise.resolve(null)); +// executionEngine = { +// getBlobs: getBlobsMock, +// } as unknown as IExecutionEngine; + +// const columnMeta = { +// missing: fuluBlockWithColumns.columnSidecars.map((c) => c.index), +// versionedHashes, +// }; + +// const result = await fetchGetBlobsV2AndBuildSidecars({ +// config, +// executionEngine, +// forkName: ForkName.fulu, +// block: fuluBlockWithColumns.block, +// columnMeta, +// }); + +// expect(getBlobsMock).toHaveBeenCalledOnce(); +// expect(result).toEqual([]); +// }); + +// it("should build valid columnSidecars from execution engine blobs", async () => { +// const getBlobsMock = vi.fn(() => Promise.resolve(blobAndProofs)); +// executionEngine = { +// getBlobs: getBlobsMock, +// } as unknown as IExecutionEngine; + +// const columnMeta = { +// missing: fuluBlockWithColumns.columnSidecars.map((c) => c.index), +// versionedHashes, +// }; + +// const result = await fetchGetBlobsV2AndBuildSidecars({ +// config, +// executionEngine, +// forkName: ForkName.fulu, +// block: fuluBlockWithColumns.block, +// columnMeta, +// }); + +// expect(getBlobsMock).toHaveBeenCalledOnce(); +// expect(result).toBeDefined(); +// expect(result).toBeInstanceOf(Array); +// expect(result.length).toEqual(NUMBER_OF_COLUMNS); + +// // Verify the structure of the returned column sidecars +// for (const [_, columnSidecar] of Object.entries(result)) { +// expect( +// validateBlockDataColumnSidecars( +// columnSidecar.signedBlockHeader.message.slot, +// fuluBlockWithColumns.blockRoot, +// fuluBlockWithColumns.block.message.body.blobKzgCommitments.length, +// [columnSidecar] +// ) +// ).resolves.toBeUndefined(); +// } +// }); +// }); diff --git a/packages/beacon-node/test/unit/util/kzg.test.ts b/packages/beacon-node/test/unit/util/kzg.test.ts index 3e2c46b8d69c..d008c1f246d0 100644 --- a/packages/beacon-node/test/unit/util/kzg.test.ts +++ b/packages/beacon-node/test/unit/util/kzg.test.ts @@ -3,8 +3,8 @@ import {NUMBER_OF_COLUMNS} from "@lodestar/params"; import {signedBlockToSignedHeader} from "@lodestar/state-transition"; import {deneb, fulu, ssz} from "@lodestar/types"; import {afterEach, describe, expect, it} from "vitest"; -import {validateBlobSidecars, validateGossipBlobSidecar} from "../../../src/chain/validation/blobSidecar.js"; -import {getBlobSidecars, recoverDataColumnSidecars} from "../../../src/util/blobs.js"; +import {validateBlockBlobSidecars, validateGossipBlobSidecar} from "../../../src/chain/validation/blobSidecar.js"; +import {getBlobSidecars, dataColumnMatrixRecovery} from "../../../src/util/blobs.js"; import {getDataColumnSidecarsFromBlock} from "../../../src/util/dataColumns.js"; import {kzg} from "../../../src/util/kzg.js"; import {shuffle} from "../../../src/util/shuffle.js"; @@ -62,7 +62,7 @@ describe("KZG", () => { expect(blobSidecars.length).toBe(2); // Full validation - await validateBlobSidecars(slot, blockRoot, kzgCommitments, blobSidecars); + await validateBlockBlobSidecars(slot, blockRoot, kzgCommitments.length, blobSidecars); for (const blobSidecar of blobSidecars) { try { @@ -137,7 +137,7 @@ describe("KZG", () => { } } - const recoveredSidecars = await recoverDataColumnSidecars(shuffledPartial); + const recoveredSidecars = await dataColumnMatrixRecovery(shuffledPartial); expect(recoveredSidecars !== null).toBeTruthy(); if (recoveredSidecars == null) { // should not happen diff --git a/packages/beacon-node/test/utils/blockInput.ts b/packages/beacon-node/test/utils/blockInput.ts new file mode 100644 index 000000000000..34b9d0747f07 --- /dev/null +++ b/packages/beacon-node/test/utils/blockInput.ts @@ -0,0 +1,103 @@ +import {ForkName} from "@lodestar/params"; +import {SignedBeaconBlock} from "@lodestar/types"; +import { + AddBlock, + BlockInputSource, + DAData, + DAType, + IBlockInput, + LogMetaBasic, + SourceMeta, +} from "../../src/chain/blocks/blockInput/index.js"; + +export type MockBlockInputProps = { + type?: DAType; + daOutOfRange?: boolean; + timeCreatedSec?: number; + forkName?: ForkName; + slot?: number; + blockRootHex?: string; + parentRootHex?: string | null; +}; + +export class MockBlockInput implements IBlockInput { + type: DAType; + daOutOfRange: boolean; + timeCreatedSec: number; + forkName: ForkName; + slot: number; + blockRootHex: string; + parentRootHex: string; + + _block?: SignedBeaconBlock; + _blockSource?: BlockInputSource; + _blockSeenTimestampSec?: number; + _blockPeerIdStr?: string; + + _timeCompleted?: number; + + constructor({type, daOutOfRange, timeCreatedSec, forkName, slot, blockRootHex, parentRootHex}: MockBlockInputProps) { + this.type = type ?? DAType.PreData; + this.daOutOfRange = daOutOfRange ?? true; + this.timeCreatedSec = timeCreatedSec ?? 0; + this.forkName = forkName ?? ForkName.capella; + this.slot = slot ?? 0; + this.blockRootHex = blockRootHex ?? "0x0000000000000000000000000000000000000000000000000000000000000000"; + this.parentRootHex = parentRootHex ?? "0x0000000000000000000000000000000000000000000000000000000000000000"; + } + + addBlock( + {block, blockRootHex, seenTimestampSec, source, peerIdStr}: AddBlock, + _opts?: {throwOnDuplicateAdd: boolean} + ): void { + this.blockRootHex = blockRootHex; + + this._block = block; + this._blockSeenTimestampSec = seenTimestampSec; + this._blockSource = source; + this._blockPeerIdStr = peerIdStr; + } + hasBlock(): boolean { + return !this._block; + } + getBlock(): SignedBeaconBlock { + // biome-ignore lint/style/noNonNullAssertion: test fixture + return this._block!; + } + getBlockSource(): SourceMeta { + return { + seenTimestampSec: this._blockSeenTimestampSec ?? Date.now(), + source: this._blockSource ?? BlockInputSource.gossip, + peerIdStr: this._blockPeerIdStr ?? "0xTESTING_PEER_ID_STR", + }; + } + + hasAllData(): boolean { + return true; + } + hasBlockAndAllData(): boolean { + return !!this._block; + } + + getLogMeta(): LogMetaBasic { + return { + blockRoot: this.blockRootHex, + slot: this.slot, + timeCreatedSec: this.timeCreatedSec, + }; + } + + getTimeComplete(): number { + return this._timeCompleted ?? 0; + } + + waitForAllData(_timeout: number, _signal?: AbortSignal): Promise { + return Promise.resolve(null); + } + waitForBlock(_timeout: number, _signal?: AbortSignal): Promise { + return Promise.resolve(this._block as SignedBeaconBlock); + } + waitForBlockAndAllData(_timeout: number, _signal?: AbortSignal): Promise { + return Promise.resolve(this); + } +} diff --git a/packages/beacon-node/test/utils/blocksAndData.ts b/packages/beacon-node/test/utils/blocksAndData.ts new file mode 100644 index 000000000000..fa90e0a25aea --- /dev/null +++ b/packages/beacon-node/test/utils/blocksAndData.ts @@ -0,0 +1,364 @@ +import {randomBytes} from "node:crypto"; +import {SIGNATURE_LENGTH_UNCOMPRESSED} from "@chainsafe/blst"; +import {BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT} from "@crate-crypto/node-eth-kzg"; +import {generateKeyPair} from "@libp2p/crypto/keys"; +import {createChainForkConfig, defaultChainConfig} from "@lodestar/config"; +import { + ForkPostCapella, + ForkPostDeneb, + ForkPostFulu, + NUMBER_OF_COLUMNS, + SLOTS_PER_EPOCH, + isForkPostDeneb, + isForkPostFulu, +} from "@lodestar/params"; +import {computeStartSlotAtEpoch, signedBlockToSignedHeader} from "@lodestar/state-transition"; +import {SignedBeaconBlock, Slot, deneb, fulu, ssz} from "@lodestar/types"; +import {toRootHex} from "@lodestar/utils"; +import {VersionedHashes} from "../../src/execution/index.js"; +import {computeNodeIdFromPrivateKey} from "../../src/network/subnets/index.js"; +import {getBlobSidecars, kzgCommitmentToVersionedHash} from "../../src/util/blobs.js"; +import {CustodyConfig, computePostFuluKzgCommitmentsInclusionProof} from "../../src/util/dataColumns.js"; +import {kzg} from "../../src/util/kzg.js"; +import {ROOT_SIZE} from "../../src/util/sszBytes.js"; +import {Clock} from "../../src/util/clock.js"; + +export const CAPELLA_FORK_EPOCH = 0; +export const DENEB_FORK_EPOCH = 10; +export const ELECTRA_FORK_EPOCH = 20; +export const FULU_FORK_EPOCH = 30; +export const GLOAS_FORK_EPOCH = 40; +export const config = createChainForkConfig({ + ...defaultChainConfig, + CAPELLA_FORK_EPOCH, + DENEB_FORK_EPOCH, + ELECTRA_FORK_EPOCH, + FULU_FORK_EPOCH, + GLOAS_FORK_EPOCH, +}); +export const clock = new Clock({ + config, + // For our testing we want the clock to be at head of the latest fork + genesisTime: Date.now() / 1000 - SLOTS_PER_EPOCH * GLOAS_FORK_EPOCH * config.SECONDS_PER_SLOT, + signal: new AbortController().signal, +}); +export const privateKey = await generateKeyPair("secp256k1"); +export const nodeId = computeNodeIdFromPrivateKey(privateKey); +export const custodyConfig = new CustodyConfig({config, nodeId}); + +export const slots: Record = { + capella: computeStartSlotAtEpoch(CAPELLA_FORK_EPOCH), + deneb: computeStartSlotAtEpoch(DENEB_FORK_EPOCH), + electra: computeStartSlotAtEpoch(ELECTRA_FORK_EPOCH), + fulu: computeStartSlotAtEpoch(FULU_FORK_EPOCH), + gloas: computeStartSlotAtEpoch(GLOAS_FORK_EPOCH), +}; + +/** + * Value used in c-kzg + * https://github.com/matthewkeil/c-kzg-4844/blob/cc7c4e90669efc777a92b375574036a64f8ae9ae/bindings/node.js/test/kzg.test.ts#L42 + */ +const MAX_TOP_BYTE = 114; + +/** + * Generates a random blob of the correct length for the KZG library + * https://github.com/matthewkeil/c-kzg-4844/blob/cc7c4e90669efc777a92b375574036a64f8ae9ae/bindings/node.js/test/kzg.test.ts#L87 + */ +export function generateRandomBlob(): Uint8Array { + return new Uint8Array( + randomBytes(BYTES_PER_BLOB).map((x, i) => { + // Set the top byte to be low enough that the field element doesn't overflow the BLS modulus + if (x > MAX_TOP_BYTE && i % BYTES_PER_FIELD_ELEMENT === 0) { + return Math.floor(Math.random() * MAX_TOP_BYTE); + } + return x; + }) + ); +} + +/** + * Generate a random number between min and max (inclusive) + */ +function generateRandomInt(min: number, max: number): number { + return Math.floor(Math.random() * (max - min + 1)) + min; +} +function generateProposerIndex(min = 0, max = 100_000): number { + return generateRandomInt(min, max); +} + +export type GenerateBlockProps = { + forkName: F; + slot?: Slot; + parentRoot?: Uint8Array; +}; + +function generateBeaconBlock({ + forkName, + slot, + parentRoot, +}: GenerateBlockProps): SignedBeaconBlock { + const block = ssz[forkName].SignedBeaconBlock.defaultValue(); + block.message.slot = slot ? slot : slots[forkName]; + block.message.parentRoot = parentRoot ? parentRoot : Uint8Array.from(randomBytes(ROOT_SIZE)); + block.message.stateRoot = Uint8Array.from(randomBytes(ROOT_SIZE)); + block.message.proposerIndex = generateProposerIndex(); + // signature is obviously not valid so can generate it now instead of after commitments are attached + block.signature = Uint8Array.from(randomBytes(SIGNATURE_LENGTH_UNCOMPRESSED)); + return block; +} + +function generateRoots( + forkName: F, + block: SignedBeaconBlock +): { + blockRoot: Uint8Array; + rootHex: string; +} { + const blockRoot = ssz[forkName].BeaconBlock.hashTreeRoot(block.message as any); + const rootHex = toRootHex(blockRoot); + return { + blockRoot, + rootHex, + }; +} + +function generateBlobSidecars( + block: SignedBeaconBlock, + count: number, + oomProtection = false +): { + block: SignedBeaconBlock; + blobSidecars: deneb.BlobSidecars; + versionedHashes: VersionedHashes; +} { + const blobs = Array.from({length: count}, () => generateRandomBlob()); + const commitments = blobs.map((blob) => kzg.blobToKzgCommitment(blob)); + const proofs = blobs.map((blob, i) => kzg.computeBlobKzgProof(blob, commitments[i])); + + block.message.body.blobKzgCommitments = commitments; + + const blobSidecars = getBlobSidecars(config, block, blobs, proofs); + + if (oomProtection) { + blobSidecars.map((sidecar) => ({...sidecar, blob: new Uint8Array(1)})); + } + + const versionedHashes = commitments.map((commitment) => kzgCommitmentToVersionedHash(commitment)); + + return { + block, + blobSidecars, + versionedHashes, + }; +} + +function generateColumnSidecars( + forkName: F, + block: SignedBeaconBlock, + numberOfBlobs: number, + oomProtection = false, + returnBlobs = false +): { + block: SignedBeaconBlock; + columnSidecars: fulu.DataColumnSidecars; + blobs?: deneb.Blob[]; +} { + const blobs = Array.from({length: numberOfBlobs}, () => generateRandomBlob()); + const kzgCommitments = blobs.map((blob) => kzg.blobToKzgCommitment(blob)); + block.message.body.blobKzgCommitments = kzgCommitments; + + const signedBlockHeader = signedBlockToSignedHeader(config, block); + const cellsAndProofs = blobs.map((blob) => kzg.computeCellsAndKzgProofs(blob)); + const kzgCommitmentsInclusionProof = computePostFuluKzgCommitmentsInclusionProof(forkName, block.message.body); + + const columnSidecars = Array.from({length: NUMBER_OF_COLUMNS}, (_, columnIndex) => { + const column = oomProtection + ? [] + : Array.from({length: blobs.length}, (_, rowNumber) => cellsAndProofs[rowNumber].cells[columnIndex]); + const kzgProofs = Array.from( + {length: blobs.length}, + (_, rowNumber) => cellsAndProofs[rowNumber].proofs[columnIndex] + ); + return { + index: columnIndex, + column, + kzgCommitments, + kzgProofs, + signedBlockHeader, + kzgCommitmentsInclusionProof, + }; + }); + + return { + block, + columnSidecars, + blobs: returnBlobs ? blobs : undefined, + }; +} + +export type BlockTestSet = { + block: SignedBeaconBlock; + blockRoot: Uint8Array; + rootHex: string; +}; + +export function generateBlock({ + forkName, + parentRoot, + slot, +}: GenerateBlockProps): BlockTestSet { + const block = generateBeaconBlock({ + forkName, + slot, + parentRoot, + }); + const {blockRoot, rootHex} = generateRoots(forkName, block); + + return { + block, + rootHex, + blockRoot, + }; +} + +export function generateChainOfBlocks({ + forkName, + count, +}: { + forkName: F; + count: number; +}): BlockTestSet[] { + let parentRoot = Uint8Array.from(randomBytes(ROOT_SIZE)); + const startSlot = slots[forkName]; + const blocks: BlockTestSet[] = []; + for (let slot = startSlot; slot < startSlot + count; slot++) { + const {block, blockRoot, rootHex} = generateBlock({forkName, parentRoot, slot}); + parentRoot = blockRoot; + blocks.push({ + block, + blockRoot, + rootHex, + }); + } + return blocks; +} + +export type BlockWithBlobsTestSet = BlockTestSet & { + blobSidecars: deneb.BlobSidecars; + versionedHashes: VersionedHashes; +}; + +export type BlockWithColumnsTestSet = BlockTestSet & { + columnSidecars: fulu.DataColumnSidecars; + blobs?: deneb.Blob[]; +}; + +export function generateBlockWithBlobSidecars({ + forkName, + slot, + count, + parentRoot, + oomProtection = false, +}: { + forkName: F; + parentRoot?: Uint8Array; + count?: number; + slot?: Slot; + oomProtection?: boolean; +}): BlockWithBlobsTestSet { + const {block, blobSidecars, versionedHashes} = generateBlobSidecars( + generateBeaconBlock({forkName, parentRoot, slot}), + count ? count : generateRandomInt(1, 6), + oomProtection + ); + const {blockRoot, rootHex} = generateRoots(forkName, block); + return { + block, + blobSidecars, + blockRoot, + rootHex, + versionedHashes, + }; +} + +export function generateBlockWithColumnSidecars({ + forkName, + slot, + parentRoot, + oomProtection = false, + returnBlobs = false, +}: { + forkName: F; + parentRoot?: Uint8Array; + slot?: Slot; + oomProtection?: boolean; + returnBlobs?: boolean; +}): BlockWithColumnsTestSet { + const {block, columnSidecars, blobs} = generateColumnSidecars( + forkName, + generateBeaconBlock({forkName, parentRoot, slot}), + generateRandomInt(1, 6), + oomProtection, + returnBlobs + ); + const {blockRoot, rootHex} = generateRoots(forkName, block); + return { + block, + blockRoot, + rootHex, + columnSidecars, + blobs: returnBlobs ? blobs : undefined, + }; +} + +export type BlockWithSidecars = F extends ForkPostFulu + ? BlockWithColumnsTestSet + : BlockWithBlobsTestSet; + +export function generateChainOfBlocksWithBlobs({ + forkName, + count, + oomProtection = false, +}: { + forkName: F; + count: number; + oomProtection?: boolean; +}): BlockWithSidecars[] { + let parentRoot = Uint8Array.from(randomBytes(ROOT_SIZE)); + let slot = slots[forkName]; + const blocks: BlockWithSidecars[] = []; + for (; slot < slot + count; slot++) { + const blockWithSidecars = ( + isForkPostFulu(forkName) + ? generateBlockWithColumnSidecars({forkName, parentRoot, slot, oomProtection}) + : generateBlockWithBlobSidecars({ + forkName, + parentRoot, + slot, + oomProtection, + }) + ) as BlockWithSidecars; + parentRoot = blockWithSidecars.blockRoot; + blocks.push(blockWithSidecars); + } + return blocks; +} + +export type ChainOfBlockMaybeSidecars = F extends ForkPostDeneb + ? BlockWithSidecars[] + : BlockTestSet[]; + +export function generateChainOfBlockMaybeSidecars({ + forkName, + count, + oomProtection = false, +}: { + forkName: F; + count: number; + oomProtection?: boolean; +}): ChainOfBlockMaybeSidecars { + if (isForkPostDeneb(forkName)) { + return generateChainOfBlocksWithBlobs({forkName, count, oomProtection}) as ChainOfBlockMaybeSidecars; + } + return generateChainOfBlocks({forkName, count}) as ChainOfBlockMaybeSidecars; +} diff --git a/packages/spec-test-util/src/single.ts b/packages/spec-test-util/src/single.ts index 645567e2ae65..ad131bf94f18 100644 --- a/packages/spec-test-util/src/single.ts +++ b/packages/spec-test-util/src/single.ts @@ -87,7 +87,7 @@ const defaultOptions: SpecTestOptions = { export function describeDirectorySpecTest( name: string, testCaseDirectoryPath: string, - testFunction: (testCase: TestCase, directoryName: string) => Result | Promise, + testFunction: (testCase: TestCase, directoryName: string, testCaseName: string) => Result | Promise, options: Partial> ): void { options = {...defaultOptions, ...options}; @@ -124,12 +124,12 @@ export function describeDirectorySpecTest if (options.shouldError?.(testCase)) { try { - await testFunction(testCase, name); + await testFunction(testCase, name, testSubDirname); } catch (_e) { return; } } else { - const result = await testFunction(testCase, name); + const result = await testFunction(testCase, name, testSubDirname); if (!options.getExpected) throw Error("getExpected is not defined"); if (!options.expectFunc) throw Error("expectFunc is not defined"); const expected = options.getExpected(testCase);