Merge branch 'develop' into batch-publish-data-columns

This commit is contained in:
kasey
2025-12-30 12:27:15 -07:00
committed by GitHub
22 changed files with 96 additions and 141 deletions

View File

@@ -193,7 +193,7 @@ nogo(
"//tools/analyzers/featureconfig:go_default_library",
"//tools/analyzers/gocognit:go_default_library",
"//tools/analyzers/ineffassign:go_default_library",
"//tools/analyzers/httperror:go_default_library",
"//tools/analyzers/httpwriter:go_default_library",
"//tools/analyzers/interfacechecker:go_default_library",
"//tools/analyzers/logcapitalization:go_default_library",
"//tools/analyzers/logruswitherror:go_default_library",

View File

@@ -22,7 +22,7 @@ import (
// The caller of this function must have a lock on forkchoice.
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
headEpoch := slots.ToEpoch(s.HeadSlot())
if c.Epoch < headEpoch || c.Epoch == 0 {
if c.Epoch+1 < headEpoch || c.Epoch == 0 {
return nil
}
// Only use head state if the head state is compatible with the target checkpoint.
@@ -30,11 +30,13 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
if err != nil {
return nil
}
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), c.Epoch-1)
// headEpoch - 1 equals c.Epoch if c is from the previous epoch and equals c.Epoch - 1 if c is from the current epoch.
// We don't use the smaller c.Epoch - 1 because forkchoice would not have the data to answer that.
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), headEpoch-1)
if err != nil {
return nil
}
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), c.Epoch-1)
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), headEpoch-1)
if err != nil {
return nil
}
@@ -43,7 +45,7 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
}
// If the head state alone is enough, we can return it directly read only.
if c.Epoch == headEpoch {
if c.Epoch <= headEpoch {
st, err := s.HeadStateReadOnly(ctx)
if err != nil {
return nil

View File

@@ -170,12 +170,13 @@ func TestService_GetRecentPreState(t *testing.T) {
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
st, blk, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
block: blk,
slot: 31,
}
require.NotNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 1, Root: ckRoot}))
@@ -197,12 +198,13 @@ func TestService_GetRecentPreState_Old_Checkpoint(t *testing.T) {
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
block: blk,
slot: 33,
}
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{}))
@@ -227,6 +229,7 @@ func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
headBlock := blk
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'U'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
require.NoError(t, err)
@@ -235,8 +238,9 @@ func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
service.head = &head{
root: [32]byte{'T'},
state: s,
block: headBlock,
slot: 64,
state: s,
}
require.NotNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
}
@@ -263,6 +267,7 @@ func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'U'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
headBlock := blk
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'V'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
require.NoError(t, err)
@@ -270,7 +275,8 @@ func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
cpRoot := blk.Root()
service.head = &head{
root: [32]byte{'T'},
root: [32]byte{'U'},
block: headBlock,
state: s,
slot: 64,
}
@@ -287,12 +293,13 @@ func TestService_GetRecentPreState_Different(t *testing.T) {
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
block: blk,
slot: 33,
}
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{}))

View File

@@ -295,14 +295,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
return errors.Wrap(err, "could not set optimistic block to valid")
}
}
arg := &fcuConfig{
headState: preState,
headRoot: lastBR,
headBlock: lastB,
}
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
return err
}
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
}

View File

@@ -820,24 +820,6 @@ func TestOnBlock_NilBlock(t *testing.T) {
require.Equal(t, true, IsInvalidBlock(err))
}
func TestOnBlock_InvalidSignature(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
blk, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
blk.Signature = []byte{'a'} // Mutate the signature.
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
_, err = service.validateStateTransition(ctx, preState, wsb)
require.Equal(t, true, IsInvalidBlock(err))
}
func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()

View File

@@ -290,52 +290,3 @@ func TestProcessBlockHeader_OK(t *testing.T) {
}
assert.Equal(t, true, proto.Equal(nsh, expected), "Expected %v, received %v", expected, nsh)
}
func TestBlockSignatureSet_OK(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := range validators {
validators[i] = &ethpb.Validator{
PublicKey: make([]byte, 32),
WithdrawalCredentials: make([]byte, 32),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
Slashed: true,
}
}
state, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, state.SetValidators(validators))
require.NoError(t, state.SetSlot(10))
require.NoError(t, state.SetLatestBlockHeader(util.HydrateBeaconHeader(&ethpb.BeaconBlockHeader{
Slot: 9,
ProposerIndex: 0,
})))
latestBlockSignedRoot, err := state.LatestBlockHeader().HashTreeRoot()
require.NoError(t, err)
currentEpoch := time.CurrentEpoch(state)
priv, err := bls.RandKey()
require.NoError(t, err)
pID, err := helpers.BeaconProposerIndex(t.Context(), state)
require.NoError(t, err)
block := util.NewBeaconBlock()
block.Block.Slot = 10
block.Block.ProposerIndex = pID
block.Block.Body.RandaoReveal = bytesutil.PadTo([]byte{'A', 'B', 'C'}, 96)
block.Block.ParentRoot = latestBlockSignedRoot[:]
block.Signature, err = signing.ComputeDomainAndSign(state, currentEpoch, block.Block, params.BeaconConfig().DomainBeaconProposer, priv)
require.NoError(t, err)
proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), state)
require.NoError(t, err)
validators[proposerIdx].Slashed = false
validators[proposerIdx].PublicKey = priv.PublicKey().Marshal()
err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx])
require.NoError(t, err)
set, err := blocks.BlockSignatureBatch(state, block.Block.ProposerIndex, block.Signature, block.Block.HashTreeRoot)
require.NoError(t, err)
verified, err := set.Verify()
require.NoError(t, err)
assert.Equal(t, true, verified, "Block signature set returned a set which was unable to be verified")
}

View File

@@ -122,24 +122,6 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
return nil
}
// BlockSignatureBatch retrieves the block signature batch from the provided block and its corresponding state.
func BlockSignatureBatch(beaconState state.ReadOnlyBeaconState,
proposerIndex primitives.ValidatorIndex,
sig []byte,
rootFunc func() ([32]byte, error)) (*bls.SignatureBatch, error) {
currentEpoch := slots.ToEpoch(beaconState.Slot())
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
if err != nil {
return nil, err
}
proposer, err := beaconState.ValidatorAtIndex(proposerIndex)
if err != nil {
return nil, err
}
proposerPubKey := proposer.PublicKey
return signing.BlockSignatureBatch(proposerPubKey, sig, domain, rootFunc)
}
// RandaoSignatureBatch retrieves the relevant randao specific signature batch object
// from a block and its corresponding state.
func RandaoSignatureBatch(

View File

@@ -182,12 +182,6 @@ func ProcessBlockNoVerifyAnySig(
return nil, nil, err
}
sig := signed.Signature()
bSet, err := b.BlockSignatureBatch(st, blk.ProposerIndex(), sig[:], blk.HashTreeRoot)
if err != nil {
tracing.AnnotateError(span, err)
return nil, nil, errors.Wrap(err, "could not retrieve block signature set")
}
randaoReveal := signed.Block().Body().RandaoReveal()
rSet, err := b.RandaoSignatureBatch(ctx, st, randaoReveal[:])
if err != nil {
@@ -201,7 +195,7 @@ func ProcessBlockNoVerifyAnySig(
// Merge beacon block, randao and attestations signatures into a set.
set := bls.NewSet()
set.Join(bSet).Join(rSet).Join(aSet)
set.Join(rSet).Join(aSet)
if blk.Version() >= version.Capella {
changes, err := signed.Block().Body().BLSToExecutionChanges()

View File

@@ -157,9 +157,8 @@ func TestProcessBlockNoVerify_SigSetContainsDescriptions(t *testing.T) {
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
require.NoError(t, err)
assert.Equal(t, len(set.Signatures), len(set.Descriptions), "Signatures and descriptions do not match up")
assert.Equal(t, "block signature", set.Descriptions[0])
assert.Equal(t, "randao signature", set.Descriptions[1])
assert.Equal(t, "attestation signature", set.Descriptions[2])
assert.Equal(t, "randao signature", set.Descriptions[0])
assert.Equal(t, "attestation signature", set.Descriptions[1])
}
func TestProcessOperationsNoVerifyAttsSigs_OK(t *testing.T) {

View File

@@ -67,9 +67,9 @@ func NewSyncNeeds(current CurrentSlotter, oldestSlotFlagPtr *primitives.Slot, bl
// Override spec minimum block retention with user-provided flag only if it is lower than the spec minimum.
sn.blockRetention = primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests)
if oldestSlotFlagPtr != nil {
oldestEpoch := slots.ToEpoch(*oldestSlotFlagPtr)
if oldestEpoch < sn.blockRetention {
if *oldestSlotFlagPtr <= syncEpochOffset(current(), sn.blockRetention) {
sn.validOldestSlotPtr = oldestSlotFlagPtr
} else {
log.WithField("backfill-oldest-slot", *oldestSlotFlagPtr).

View File

@@ -128,6 +128,9 @@ func TestSyncNeedsInitialize(t *testing.T) {
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
minBlobEpochs := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
minColEpochs := params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest
denebSlot := slots.UnsafeEpochStart(params.BeaconConfig().DenebForkEpoch)
fuluSlot := slots.UnsafeEpochStart(params.BeaconConfig().FuluForkEpoch)
minSlots := slots.UnsafeEpochStart(primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests))
currentSlot := primitives.Slot(10000)
currentFunc := func() primitives.Slot { return currentSlot }
@@ -141,6 +144,7 @@ func TestSyncNeedsInitialize(t *testing.T) {
expectedCol primitives.Epoch
name string
input SyncNeeds
current func() primitives.Slot
}{
{
name: "basic initialization with no flags",
@@ -174,13 +178,13 @@ func TestSyncNeedsInitialize(t *testing.T) {
{
name: "valid oldestSlotFlagPtr (earlier than spec minimum)",
blobRetentionFlag: 0,
oldestSlotFlagPtr: func() *primitives.Slot {
slot := primitives.Slot(10)
return &slot
}(),
oldestSlotFlagPtr: &denebSlot,
expectValidOldest: true,
expectedBlob: minBlobEpochs,
expectedCol: minColEpochs,
current: func() primitives.Slot {
return fuluSlot + minSlots
},
},
{
name: "invalid oldestSlotFlagPtr (later than spec minimum)",
@@ -210,6 +214,9 @@ func TestSyncNeedsInitialize(t *testing.T) {
{
name: "both blob retention flag and oldest slot set",
blobRetentionFlag: minBlobEpochs + 5,
current: func() primitives.Slot {
return fuluSlot + minSlots
},
oldestSlotFlagPtr: func() *primitives.Slot {
slot := primitives.Slot(100)
return &slot
@@ -232,16 +239,27 @@ func TestSyncNeedsInitialize(t *testing.T) {
expectedBlob: 5000,
expectedCol: 5000,
},
{
name: "regression for deneb start",
blobRetentionFlag: 8212500,
expectValidOldest: true,
oldestSlotFlagPtr: &denebSlot,
current: func() primitives.Slot {
return fuluSlot + minSlots
},
expectedBlob: 8212500,
expectedCol: 8212500,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
result, err := NewSyncNeeds(currentFunc, tc.oldestSlotFlagPtr, tc.blobRetentionFlag)
if tc.current == nil {
tc.current = currentFunc
}
result, err := NewSyncNeeds(tc.current, tc.oldestSlotFlagPtr, tc.blobRetentionFlag)
require.NoError(t, err)
// Check that current, deneb, fulu are set correctly
require.Equal(t, currentSlot, result.current())
// Check retention calculations
require.Equal(t, tc.expectedBlob, result.blobRetention)
require.Equal(t, tc.expectedCol, result.colRetention)

View File

@@ -14,6 +14,7 @@ import (
"github.com/OffchainLabs/prysm/v7/api"
"github.com/OffchainLabs/prysm/v7/api/server/structs"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
coreblocks "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
corehelpers "github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filters"
@@ -957,6 +958,13 @@ func (s *Server) validateConsensus(ctx context.Context, b *eth.GenericSignedBeac
}
}
}
blockRoot, err := blk.Block().HashTreeRoot()
if err != nil {
return errors.Wrap(err, "could not hash block")
}
if err := coreblocks.VerifyBlockSignatureUsingCurrentFork(parentState, blk, blockRoot); err != nil {
return errors.Wrap(err, "could not verify block signature")
}
_, err = transition.ExecuteStateTransition(ctx, parentState, blk)
if err != nil {
return errors.Wrap(err, "could not execute state transition")

View File

@@ -40,6 +40,7 @@ func GetForkSchedule(w http.ResponseWriter, r *http.Request) {
httputil.WriteJson(w, &structs.GetForkScheduleResponse{
Data: data,
})
return
}
previous := schedule[0]
for _, entry := range schedule {

View File

@@ -0,0 +1,2 @@
#### Fixed
- Fix validation logic for `--backfill-oldest-slot`, which was rejecting slots newer than 1056767.

View File

@@ -0,0 +1,2 @@
### Ignored
- D not send FCU on block batches.

View File

@@ -0,0 +1,3 @@
### Changed
- Do not check block signature on state transition.

View File

@@ -0,0 +1,3 @@
### Added
- Use the head state to validate attestations for the previous epoch if head is compatible with the target checkpoint.

View File

@@ -0,0 +1,3 @@
### Changed
- Extend `httperror` analyzer to more functions.

View File

@@ -0,0 +1,3 @@
### Fixed
- avoid panic when fork schedule is empty [#16175](https://github.com/OffchainLabs/prysm/pull/16175)

View File

@@ -43,6 +43,7 @@ func WriteJson(w http.ResponseWriter, v any) {
func WriteSsz(w http.ResponseWriter, respSsz []byte) {
w.Header().Set("Content-Length", strconv.Itoa(len(respSsz)))
w.Header().Set("Content-Type", api.OctetStreamMediaType)
w.WriteHeader(http.StatusOK)
if _, err := io.Copy(w, io.NopCloser(bytes.NewReader(respSsz))); err != nil {
log.WithError(err).Error("Could not write response message")
}

View File

@@ -3,7 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["analyzer.go"],
importpath = "github.com/OffchainLabs/prysm/v7/tools/analyzers/httperror",
importpath = "github.com/OffchainLabs/prysm/v7/tools/analyzers/httpwriter",
visibility = ["//visibility:public"],
deps = [
"@org_golang_x_tools//go/analysis:go_default_library",

View File

@@ -1,4 +1,4 @@
package httperror
package httpwriter
import (
"go/ast"
@@ -9,8 +9,8 @@ import (
)
var Analyzer = &analysis.Analyzer{
Name: "httperror",
Doc: "Ensures calls to httputil.HandleError are immediately followed by a return statement.",
Name: "httpwriter",
Doc: "Ensures that httputil functions which make use of the writer are immediately followed by a return statement.",
Requires: []*analysis.Analyzer{
inspect.Analyzer,
},
@@ -99,7 +99,7 @@ func checkBlock(pass *analysis.Pass, fn *ast.FuncDecl, block *ast.BlockStmt, nex
// Now check the current statement itself: is it (or does it contain) a direct call to httputil.HandleError?
// We only consider ExprStmt that are direct CallExpr to httputil.HandleError.
call := findHandleErrorCall(stmt)
call, name := findHandleErrorCall(stmt)
if call == nil {
continue
}
@@ -121,7 +121,7 @@ func checkBlock(pass *analysis.Pass, fn *ast.FuncDecl, block *ast.BlockStmt, nex
continue
}
// otherwise it's not a return (even if it's an if/for etc) -> violation
pass.Reportf(stmt.Pos(), "call to httputil.HandleError must be immediately followed by a return statement")
pass.Reportf(stmt.Pos(), "call to httputil.%s must be immediately followed by a return statement", name)
continue
}
@@ -133,31 +133,33 @@ func checkBlock(pass *analysis.Pass, fn *ast.FuncDecl, block *ast.BlockStmt, nex
}
// Non-void function and it's the last statement → violation
pass.Reportf(stmt.Pos(), "call to httputil.HandleError must be followed by return because function has return values")
pass.Reportf(stmt.Pos(), "call to httputil.%s must be immediately followed by a return statement", name)
}
}
// findHandleErrorCall returns the call expression if stmt is a direct call to httputil.HandleError(...),
// otherwise nil. We only match direct ExprStmt -> CallExpr -> SelectorExpr where selector is httputil.HandleError.
func findHandleErrorCall(stmt ast.Stmt) *ast.CallExpr {
func findHandleErrorCall(stmt ast.Stmt) (*ast.CallExpr, string) {
es, ok := stmt.(*ast.ExprStmt)
if !ok {
return nil
return nil, ""
}
call, ok := es.X.(*ast.CallExpr)
if !ok {
return nil
return nil, ""
}
sel, ok := call.Fun.(*ast.SelectorExpr)
if !ok {
return nil
return nil, ""
}
pkgIdent, ok := sel.X.(*ast.Ident)
if !ok {
return nil
return nil, ""
}
if pkgIdent.Name == "httputil" && sel.Sel.Name == "HandleError" {
return call
selectorName := sel.Sel.Name
if pkgIdent.Name == "httputil" &&
(selectorName == "HandleError" || selectorName == "WriteError" || selectorName == "WriteJson" || selectorName == "WriteSSZ") {
return call, selectorName
}
return nil
return nil, ""
}