chore: post-merge peerDAS branch review (#8187)

Did another sanity check of peerDAS changes after merging it, few minor
things I picked up and fixed.
This commit is contained in:
Nico Flaig
2025-08-12 15:35:45 +01:00
committed by GitHub
parent 25bf0b65b8
commit 1244f930ff
14 changed files with 59 additions and 49 deletions

View File

@@ -23,7 +23,7 @@ RUN cd packages/cli && GIT_COMMIT=${COMMIT} yarn write-git-data
# Note: This step is redundant for the host arch
FROM node:22-slim AS build_deps
WORKDIR /usr/app
RUN apt-get update && apt-get install -y git g++ make python3 python3-setuptools && apt-get clean && rm -rf /var/lib/apt/lists/*
RUN apt-get update && apt-get install -y g++ make python3 python3-setuptools && apt-get clean && rm -rf /var/lib/apt/lists/*
COPY --from=build_src /usr/app .

View File

@@ -1,5 +1,5 @@
import {ChainForkConfig} from "@lodestar/config";
import {ForkPostBellatrix, ForkPostDeneb, ForkSeq, isForkPostAltair, isForkPostBellatrix} from "@lodestar/params";
import {ForkPostBellatrix, ForkSeq, isForkPostAltair, isForkPostBellatrix} from "@lodestar/params";
import {
CachedBeaconStateAllForks,
CachedBeaconStateBellatrix,
@@ -393,7 +393,7 @@ export async function produceBlockBody<T extends BlockType>(
}
if (this.opts.sanityCheckExecutionEngineBlobs) {
await validateBlobsAndKzgCommitments(fork as ForkPostDeneb, executionPayload, blobsBundle, cells);
await validateBlobsAndKzgCommitments(fork, executionPayload, blobsBundle, cells);
}
(blockBody as deneb.BeaconBlockBody).blobKzgCommitments = blobsBundle.commitments;

View File

@@ -1,4 +1,4 @@
import {CELLS_PER_EXT_BLOB, ForkPostDeneb, ForkSeq} from "@lodestar/params";
import {CELLS_PER_EXT_BLOB, ForkName, ForkSeq, isForkPostDeneb} from "@lodestar/params";
import {ExecutionPayload, fulu} from "@lodestar/types";
import {BlobsBundle} from "../../execution/index.js";
import {kzg} from "../../util/kzg.js";
@@ -9,11 +9,15 @@ import {kzg} from "../../util/kzg.js";
*/
export async function validateBlobsAndKzgCommitments(
fork: ForkPostDeneb,
fork: ForkName,
_payload: ExecutionPayload,
blobsBundle: BlobsBundle,
cells?: fulu.Cell[][]
): Promise<void> {
if (!isForkPostDeneb(fork)) {
throw Error(`validateBlobsAndKzgCommitments called with pre-deneb fork=${fork}`);
}
if (blobsBundle.blobs.length !== blobsBundle.commitments.length) {
throw Error(
`Blobs bundle blobs len ${blobsBundle.blobs.length} != commitments len ${blobsBundle.commitments.length}`
@@ -35,7 +39,7 @@ export async function validateBlobsAndKzgCommitments(
}
if (!cells) {
cells = blobsBundle.blobs.map((blob) => kzg.computeCells(blob));
throw Error(`Missing cells for post-fulu fork=${fork}`);
}
const expectedProofsLength = blobsBundle.blobs.length * CELLS_PER_EXT_BLOB;

View File

@@ -231,7 +231,7 @@ export class NetworkCore implements INetworkCore {
clock,
peerRpcScores,
events,
networkConfig: networkConfig,
networkConfig,
peersData,
statusCache,
},
@@ -258,7 +258,7 @@ export class NetworkCore implements INetworkCore {
attnetsService,
syncnetsService,
peerManager,
networkConfig: networkConfig,
networkConfig,
peersData,
metadata,
logger,

View File

@@ -559,13 +559,13 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
}: GossipHandlerParamGeneric<GossipType.data_column_sidecar>) => {
const {serializedData} = gossipData;
const dataColumnSidecar = sszDeserialize(topic, serializedData);
const blobSlot = dataColumnSidecar.signedBlockHeader.message.slot;
const dataColumnSlot = dataColumnSidecar.signedBlockHeader.message.slot;
const index = dataColumnSidecar.index;
if (config.getForkSeq(blobSlot) < ForkSeq.deneb) {
throw new GossipActionError(GossipAction.REJECT, {code: "PRE_DENEB_BLOCK"});
if (config.getForkSeq(dataColumnSlot) < ForkSeq.fulu) {
throw new GossipActionError(GossipAction.REJECT, {code: "PRE_FULU_BLOCK"});
}
const delaySec = chain.clock.secFromSlot(blobSlot, seenTimestampSec);
const delaySec = chain.clock.secFromSlot(dataColumnSlot, seenTimestampSec);
metrics?.dataColumns.elapsedTimeTillReceived.observe({source: DataColumnsSource.gossip}, delaySec);
const blockInput = await validateBeaconDataColumn(
dataColumnSidecar,
@@ -576,28 +576,28 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
);
if (blockInput.block !== null) {
if (blockInput.type === BlockInputType.dataPromise) {
chain.logger.debug("Block corresponding to blob is available but waiting for data availability", {
blobSlot,
chain.logger.debug("Block corresponding to data column is available but waiting for data availability", {
dataColumnSlot,
index,
});
await raceWithCutoff(
chain,
blobSlot,
dataColumnSlot,
blockInput.cachedData.availabilityPromise as Promise<BlockInputAvailableData>,
BLOCK_AVAILABILITY_CUTOFF_MS
).catch((_e) => {
chain.logger.debug("Block under processing not yet fully available adding to unknownBlockInput", {
blobSlot,
dataColumnSlot,
});
events.emit(NetworkEvent.unknownBlockInput, {blockInput, peer: peerIdStr});
});
}
} else {
// wait for the block to arrive till some cutoff else emit unknownBlockInput event
chain.logger.debug("Block not yet available, racing with cutoff", {blobSlot, index});
chain.logger.debug("Block not yet available, racing with cutoff", {dataColumnSlot, index});
const normalBlockInput = await raceWithCutoff(
chain,
blobSlot,
dataColumnSlot,
blockInput.blockInputPromise,
BLOCK_AVAILABILITY_CUTOFF_MS
).catch((_e) => {
@@ -606,26 +606,35 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
if (normalBlockInput !== null) {
if (normalBlockInput.type === BlockInputType.dataPromise) {
chain.logger.debug("Block corresponding to blob is now available but waiting for data availability", {
blobSlot,
index,
});
chain.logger.debug(
"Block corresponding to data column is now available but waiting for data availability",
{
dataColumnSlot,
index,
}
);
await raceWithCutoff(
chain,
blobSlot,
dataColumnSlot,
normalBlockInput.cachedData.availabilityPromise as Promise<BlockInputAvailableData>,
BLOCK_AVAILABILITY_CUTOFF_MS
).catch((_e) => {
chain.logger.debug("Block under processing not yet fully available adding to unknownBlockInput", {
blobSlot,
dataColumnSlot,
});
events.emit(NetworkEvent.unknownBlockInput, {blockInput: normalBlockInput, peer: peerIdStr});
});
} else {
chain.logger.debug("Block corresponding to blob is now available for processing", {blobSlot, index});
chain.logger.debug("Block corresponding to data column is now available for processing", {
dataColumnSlot,
index,
});
}
} else {
chain.logger.debug("Block not available till BLOCK_AVAILABILITY_CUTOFF_MS", {blobSlot, index});
chain.logger.debug("Block not available till BLOCK_AVAILABILITY_CUTOFF_MS", {
dataColumnSlot,
index,
});
events.emit(NetworkEvent.unknownBlockInput, {blockInput, peer: peerIdStr});
}
}

View File

@@ -1,9 +1,9 @@
import {getV4Crypto} from "@chainsafe/enr";
import {fromHexString} from "@chainsafe/ssz";
import type {PeerId, PrivateKey} from "@libp2p/interface";
import {peerIdFromPrivateKey} from "@libp2p/peer-id";
import {ForkBoundary} from "@lodestar/config";
import {Bytes32, Slot, SubnetID, ValidatorIndex} from "@lodestar/types";
import {fromHex} from "@lodestar/utils";
import {GossipTopic} from "../gossip/interface.js";
import {RequestedSubnet} from "../peers/utils/index.js";
@@ -62,5 +62,5 @@ export function computeNodeId(peerId: PeerId): Uint8Array {
throw Error(`Undefined publicKey peerId=${peerId.toString()}`);
}
const nodeIdHex = getV4Crypto().nodeId(peerId.publicKey.raw);
return fromHexString(nodeIdHex);
return fromHex(nodeIdHex);
}

View File

@@ -1,4 +1,3 @@
import {fromHexString, toHexString} from "@chainsafe/ssz";
import {ChainForkConfig} from "@lodestar/config";
import {ForkName, INTERVALS_PER_SLOT, NUMBER_OF_COLUMNS} from "@lodestar/params";
import {ColumnIndex, Root, RootHex, deneb} from "@lodestar/types";
@@ -184,9 +183,7 @@ export class UnknownBlockSync {
} else {
if (blockInputOrRootHex.block !== null) {
const {block} = blockInputOrRootHex;
blockRootHex = toHexString(
this.config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message)
);
blockRootHex = toRootHex(this.config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message));
unknownBlockType = PendingBlockType.UNKNOWN_DATA;
} else {
unknownBlockType = PendingBlockType.UNKNOWN_BLOCKINPUT;
@@ -298,7 +295,7 @@ export class UnknownBlockSync {
if (block.blockInput === null) {
connectedPeers = allPeers;
// we only have block root, and nothing else
res = await wrapError(this.fetchUnknownBlockRoot(fromHexString(block.blockRootHex), connectedPeers));
res = await wrapError(this.fetchUnknownBlockRoot(fromHex(block.blockRootHex), connectedPeers));
} else {
const {cachedData} = block.blockInput;
if (cachedData.fork === ForkName.fulu) {
@@ -376,7 +373,7 @@ export class UnknownBlockSync {
...block,
status: PendingBlockStatus.downloaded,
blockInput,
parentBlockRootHex: toHexString(blockInput.block.message.parentRoot),
parentBlockRootHex: toRootHex(blockInput.block.message.parentRoot),
};
this.pendingBlocks.set(block.blockRootHex, block);
const blockSlot = blockInput.block.message.slot;
@@ -408,7 +405,7 @@ export class UnknownBlockSync {
this.logger.debug("Downloaded block is before finalized slot", {
finalizedSlot,
blockSlot,
parentRoot: toHexString(blockRoot),
parentRoot: toRootHex(blockRoot),
unknownBlockType,
});
this.removeAndDownscoreAllDescendants(block);

View File

@@ -81,7 +81,7 @@ export function computeDataColumnSidecars(
): fulu.DataColumnSidecars {
const blobKzgCommitments = (signedBlock as deneb.SignedBeaconBlock).message.body.blobKzgCommitments;
if (blobKzgCommitments === undefined) {
throw Error("Invalid block with missing blobKzgCommitments for computeBlobSidecars");
throw Error("Invalid block with missing blobKzgCommitments for computeDataColumnSidecars");
}
if (blobKzgCommitments.length === 0) {
return [];

View File

@@ -38,7 +38,7 @@ export enum RecoverResult {
NotAttemptedFull = "not_attempted_full",
// the recover is a success and it helps resolve availability
SuccessResolved = "success_resolved",
// the redover is a success but it's late, availability is already resolved by either gossip or getBlobsV2
// the recover is a success but it's late, availability is already resolved by either gossip or getBlobsV2
SuccessLate = "success_late",
// the recover failed
Failed = "failed",
@@ -172,7 +172,7 @@ export function getValidatorsCustodyRequirement(config: ChainForkConfig, effecti
*/
export function computeColumnsForCustodyGroup(custodyIndex: CustodyIndex): ColumnIndex[] {
if (custodyIndex > NUMBER_OF_CUSTODY_GROUPS) {
custodyIndex = NUMBER_OF_CUSTODY_GROUPS;
throw Error(`Invalid custody index ${custodyIndex} > ${NUMBER_OF_CUSTODY_GROUPS}`);
}
const columnsPerCustodyGroup = Number(NUMBER_OF_COLUMNS / NUMBER_OF_CUSTODY_GROUPS);
const columnIndexes = [];

View File

@@ -105,6 +105,7 @@ describe("validateBlobsAndKzgCommitments", () => {
blobs,
proofs: [new Uint8Array(48).fill(1)], // Only one proof when we need CELLS_PER_EXT_BLOB
};
const cells = blobsBundle.blobs.map((blob) => kzg.computeCells(blob));
// Create a mock ExecutionPayload
const mockPayload = {
@@ -113,7 +114,7 @@ describe("validateBlobsAndKzgCommitments", () => {
parentHash: new Uint8Array(32),
} as ExecutionPayload;
await expect(validateBlobsAndKzgCommitments(ForkName.fulu, mockPayload, blobsBundle)).rejects.toThrow(
await expect(validateBlobsAndKzgCommitments(ForkName.fulu, mockPayload, blobsBundle, cells)).rejects.toThrow(
`Invalid proofs length for BlobsBundleV2 format: expected ${CELLS_PER_EXT_BLOB}, got 1`
);
});
@@ -171,7 +172,7 @@ describe("validateBlobsAndKzgCommitments", () => {
await expect(validateBlobsAndKzgCommitments(ForkName.fulu, mockPayload, blobsBundle, cells)).resolves.not.toThrow();
});
it("should validate V2 blobs bundle when cells are not passed", async () => {
it("should throw when cells are not passed post-fulu", async () => {
const blobs = [generateRandomBlob()];
// Compute commitments and proofs for each blob
@@ -197,6 +198,6 @@ describe("validateBlobsAndKzgCommitments", () => {
parentHash: new Uint8Array(32),
} as ExecutionPayload;
await expect(validateBlobsAndKzgCommitments(ForkName.fulu, mockPayload, blobsBundle)).resolves.not.toThrow();
await expect(validateBlobsAndKzgCommitments(ForkName.fulu, mockPayload, blobsBundle)).rejects.toThrow();
});
});

View File

@@ -1,9 +1,8 @@
import {fromHexString} from "@chainsafe/ssz";
import {config} from "@lodestar/config/default";
import {SLOTS_PER_EPOCH} from "@lodestar/params";
import {computeStartSlotAtEpoch} from "@lodestar/state-transition";
import {Epoch, Slot, phase0, ssz} from "@lodestar/types";
import {Logger} from "@lodestar/utils";
import {Logger, fromHex} from "@lodestar/utils";
import {afterEach, describe, it} from "vitest";
import {BlockInput, BlockSource, getBlockInput} from "../../../../src/chain/blocks/types.js";
import {ZERO_HASH} from "../../../../src/constants/index.js";
@@ -62,7 +61,7 @@ describe("sync / range / chain", () => {
const REJECT_BLOCK = Buffer.alloc(96, 1);
const zeroBlockBody = ssz.phase0.BeaconBlockBody.defaultValue();
const interval: NodeJS.Timeout | null = null;
const nodeId = fromHexString("cdbee32dc3c50e9711d22be5565c7e44ff6108af663b2dc5abd2df573d2fa83f");
const nodeId = fromHex("cdbee32dc3c50e9711d22be5565c7e44ff6108af663b2dc5abd2df573d2fa83f");
const custodyConfig = new CustodyConfig({
nodeId,
config,

View File

@@ -99,7 +99,7 @@ describe("computeDataColumnSidecars", () => {
blobs,
kzgProofs,
})
).toThrow("Invalid block with missing blobKzgCommitments for computeBlobSidecars");
).toThrow("Invalid block with missing blobKzgCommitments for computeDataColumnSidecars");
});
});

View File

@@ -1,9 +1,8 @@
import {fromHexString} from "@chainsafe/ssz";
import {createBeaconConfig, createChainForkConfig, defaultChainConfig} from "@lodestar/config";
import {ChainForkConfig} from "@lodestar/config";
import {NUMBER_OF_COLUMNS, NUMBER_OF_CUSTODY_GROUPS} from "@lodestar/params";
import {ssz} from "@lodestar/types";
import {bigIntToBytes} from "@lodestar/utils";
import {bigIntToBytes, fromHex} from "@lodestar/utils";
import {afterEach, beforeEach, describe, expect, it} from "vitest";
import {validateDataColumnsSidecars} from "../../../src/chain/validation/dataColumnSidecar.js";
@@ -60,7 +59,7 @@ describe("getValidatorsCustodyRequirement", () => {
describe("CustodyConfig", () => {
let config: ChainForkConfig;
const nodeId = fromHexString("cdbee32dc3c50e9711d22be5565c7e44ff6108af663b2dc5abd2df573d2fa83f");
const nodeId = fromHex("cdbee32dc3c50e9711d22be5565c7e44ff6108af663b2dc5abd2df573d2fa83f");
beforeEach(() => {
// Create a proper config using createChainForkConfig
@@ -121,7 +120,7 @@ describe("getDataColumns", () => {
];
for (const [nodeIdHex, numSubnets, custodyColumns] of testCases) {
it(`${nodeIdHex} / ${numSubnets}`, async () => {
const nodeId = nodeIdHex.length === 64 ? fromHexString(nodeIdHex) : bigIntToBytes(BigInt(nodeIdHex), 32, "be");
const nodeId = nodeIdHex.length === 64 ? fromHex(nodeIdHex) : bigIntToBytes(BigInt(nodeIdHex), 32, "be");
const columnIndexs = getDataColumns(nodeId, numSubnets);
expect(columnIndexs).toEqual(custodyColumns);

View File

@@ -95,6 +95,7 @@ export type BeaconPreset = {
PENDING_CONSOLIDATIONS_LIMIT: number;
MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: number;
WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA: number;
// FULU
///////////
FIELD_ELEMENTS_PER_CELL: number;