mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 06:18:05 -05:00
Compare commits
2 Commits
no-batchin
...
fix-16223
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
226cde6969 | ||
|
|
e0466737d1 |
19
CHANGELOG.md
19
CHANGELOG.md
@@ -4,25 +4,6 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
|
||||
|
||||
## [v7.1.2](https://github.com/prysmaticlabs/prysm/compare/v7.1.1...v7.1.2) - 2026-01-07
|
||||
|
||||
Happy new year! This patch release is very small. The main improvement is better management of pending attestation aggregation via [PR 16153](https://github.com/OffchainLabs/prysm/pull/16153).
|
||||
|
||||
### Added
|
||||
|
||||
- `primitives.BuilderIndex`: SSZ `uint64` wrapper for builder registry indices. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16169)
|
||||
|
||||
### Changed
|
||||
|
||||
- the /eth/v2/beacon/pool/attestations and /eth/v1/beacon/pool/sync_committees now returns a 503 error if the node is still syncing, the rest api is also working in a similar process to gRPC broadcasting immediately now. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16152)
|
||||
- `validateDataColumn`: Remove error logs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16157)
|
||||
- Pending aggregates: When multiple aggregated attestations only differing by the aggregator index are in the pending queue, only process one of them. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16153)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix the missing fork version object mapping for Fulu in light client p2p. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16151)
|
||||
- Do not process slots and copy states for next epoch proposers after Fulu. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16168)
|
||||
|
||||
## [v7.1.1](https://github.com/prysmaticlabs/prysm/compare/v7.1.0...v7.1.1) - 2025-12-18
|
||||
|
||||
Release highlights:
|
||||
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -273,16 +273,16 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.7.0-alpha.0"
|
||||
consensus_spec_version = "v1.6.0"
|
||||
|
||||
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
|
||||
|
||||
consensus_spec_tests(
|
||||
name = "consensus_spec_tests",
|
||||
flavors = {
|
||||
"general": "sha256-b+rJOuVqq+Dy53quPcNYcQwPFoMU7Wp7tdUVe7n0g8w=",
|
||||
"minimal": "sha256-qxRIxtjPxVsVCY90WsBJKhk0027XDSmhjnRvRN14V1c=",
|
||||
"mainnet": "sha256-NsuOQG3LzeiEE1TrWuvQ6vu6BboHv7h7f/RTS0pWkCs=",
|
||||
"general": "sha256-54hTaUNF9nLg+hRr3oHoq0yjZpW3MNiiUUuCQu6Rajk=",
|
||||
"minimal": "sha256-1JHIGg3gVMjvcGYRHR5cwdDgOvX47oR/MWp6gyAeZfA=",
|
||||
"mainnet": "sha256-292h3W2Ffts0YExgDTyxYe9Os7R0bZIXuAaMO8P6kl4=",
|
||||
},
|
||||
version = consensus_spec_version,
|
||||
)
|
||||
@@ -298,7 +298,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-hwNdUBgdBrkk6pWIpNYbzbwswUuOu6AMD2exN8uv+QQ=",
|
||||
integrity = "sha256-VzBgrEokvYSMIIXVnSA5XS9I3m9oxpvToQGxC1N5lzw=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -212,8 +212,7 @@ func ProcessWithdrawals(st state.BeaconState, executionData interfaces.Execution
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get next withdrawal validator index")
|
||||
}
|
||||
bound := min(uint64(st.NumValidators()), params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
||||
nextValidatorIndex += primitives.ValidatorIndex(bound)
|
||||
nextValidatorIndex += primitives.ValidatorIndex(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
||||
nextValidatorIndex = nextValidatorIndex % primitives.ValidatorIndex(st.NumValidators())
|
||||
} else {
|
||||
nextValidatorIndex = expectedWithdrawals[len(expectedWithdrawals)-1].ValidatorIndex + 1
|
||||
|
||||
@@ -56,7 +56,9 @@ func (r StateRoots) MarshalSSZTo(dst []byte) ([]byte, error) {
|
||||
func (r StateRoots) MarshalSSZ() ([]byte, error) {
|
||||
marshalled := make([]byte, fieldparams.StateRootsLength*32)
|
||||
for i, r32 := range r {
|
||||
copy(marshalled[i*32:(i+1)*32], r32[:])
|
||||
for j, rr := range r32 {
|
||||
marshalled[i*32+j] = rr
|
||||
}
|
||||
}
|
||||
return marshalled, nil
|
||||
}
|
||||
|
||||
@@ -148,7 +148,7 @@ func (b batch) ensureParent(expected [32]byte) error {
|
||||
func (b batch) blockRequest() *eth.BeaconBlocksByRangeRequest {
|
||||
return ð.BeaconBlocksByRangeRequest{
|
||||
StartSlot: b.begin,
|
||||
Count: uint64(b.end - b.begin),
|
||||
Count: uint64(b.end.FlooredSubSlot(b.begin)),
|
||||
Step: 1,
|
||||
}
|
||||
}
|
||||
@@ -156,7 +156,7 @@ func (b batch) blockRequest() *eth.BeaconBlocksByRangeRequest {
|
||||
func (b batch) blobRequest() *eth.BlobSidecarsByRangeRequest {
|
||||
return ð.BlobSidecarsByRangeRequest{
|
||||
StartSlot: b.begin,
|
||||
Count: uint64(b.end - b.begin),
|
||||
Count: uint64(b.end.FlooredSubSlot(b.begin)),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,93 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func TestBlockRequest(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
begin primitives.Slot
|
||||
end primitives.Slot
|
||||
expectedCount uint64
|
||||
}{
|
||||
{
|
||||
name: "normal case",
|
||||
begin: 100,
|
||||
end: 200,
|
||||
expectedCount: 100,
|
||||
},
|
||||
{
|
||||
name: "end equals begin",
|
||||
begin: 100,
|
||||
end: 100,
|
||||
expectedCount: 0,
|
||||
},
|
||||
{
|
||||
name: "end less than begin (would underflow without check)",
|
||||
begin: 200,
|
||||
end: 100,
|
||||
expectedCount: 0,
|
||||
},
|
||||
{
|
||||
name: "zero values",
|
||||
begin: 0,
|
||||
end: 0,
|
||||
expectedCount: 0,
|
||||
},
|
||||
{
|
||||
name: "single slot",
|
||||
begin: 0,
|
||||
end: 1,
|
||||
expectedCount: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
b := batch{begin: tc.begin, end: tc.end}
|
||||
req := b.blockRequest()
|
||||
require.Equal(t, tc.expectedCount, req.Count)
|
||||
require.Equal(t, tc.begin, req.StartSlot)
|
||||
require.Equal(t, uint64(1), req.Step)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlobRequest(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
begin primitives.Slot
|
||||
end primitives.Slot
|
||||
expectedCount uint64
|
||||
}{
|
||||
{
|
||||
name: "normal case",
|
||||
begin: 100,
|
||||
end: 200,
|
||||
expectedCount: 100,
|
||||
},
|
||||
{
|
||||
name: "end equals begin",
|
||||
begin: 100,
|
||||
end: 100,
|
||||
expectedCount: 0,
|
||||
},
|
||||
{
|
||||
name: "end less than begin (would underflow without check)",
|
||||
begin: 200,
|
||||
end: 100,
|
||||
expectedCount: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
b := batch{begin: tc.begin, end: tc.end}
|
||||
req := b.blobRequest()
|
||||
require.Equal(t, tc.expectedCount, req.Count)
|
||||
require.Equal(t, tc.begin, req.StartSlot)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSortBatchDesc(t *testing.T) {
|
||||
orderIn := []primitives.Slot{100, 10000, 1}
|
||||
orderOut := []primitives.Slot{10000, 100, 1}
|
||||
|
||||
@@ -4,6 +4,9 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
|
||||
@@ -53,6 +56,32 @@ func (s *Service) verifierRoutine() {
|
||||
}
|
||||
}
|
||||
|
||||
// A routine that runs in the background to perform batch
|
||||
// KZG verifications by draining the channel and processing all pending requests.
|
||||
func (s *Service) kzgVerifierRoutine() {
|
||||
for {
|
||||
kzgBatch := make([]*kzgVerifier, 0, 1)
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case kzg := <-s.kzgChan:
|
||||
kzgBatch = append(kzgBatch, kzg)
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case kzg := <-s.kzgChan:
|
||||
kzgBatch = append(kzgBatch, kzg)
|
||||
continue
|
||||
default:
|
||||
verifyKzgBatch(kzgBatch)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) validateWithBatchVerifier(ctx context.Context, message string, set *bls.SignatureBatch) (pubsub.ValidationResult, error) {
|
||||
_, span := trace.StartSpan(ctx, "sync.validateWithBatchVerifier")
|
||||
defer span.End()
|
||||
@@ -125,3 +154,71 @@ func performBatchAggregation(aggSet *bls.SignatureBatch) (*bls.SignatureBatch, e
|
||||
}
|
||||
return aggSet, nil
|
||||
}
|
||||
|
||||
func (s *Service) validateWithKzgBatchVerifier(ctx context.Context, dataColumns []blocks.RODataColumn) (pubsub.ValidationResult, error) {
|
||||
_, span := trace.StartSpan(ctx, "sync.validateWithKzgBatchVerifier")
|
||||
defer span.End()
|
||||
|
||||
timeout := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
|
||||
resChan := make(chan error, 1)
|
||||
verificationSet := &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
select {
|
||||
case s.kzgChan <- verificationSet:
|
||||
case <-ctx.Done():
|
||||
return pubsub.ValidationIgnore, ctx.Err()
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return pubsub.ValidationIgnore, ctx.Err() // parent context canceled, give up
|
||||
case err := <-resChan:
|
||||
if err != nil {
|
||||
log.WithError(err).Trace("Could not perform batch verification")
|
||||
tracing.AnnotateError(span, err)
|
||||
return s.validateUnbatchedColumnsKzg(ctx, dataColumns)
|
||||
}
|
||||
}
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
func (s *Service) validateUnbatchedColumnsKzg(ctx context.Context, columns []blocks.RODataColumn) (pubsub.ValidationResult, error) {
|
||||
_, span := trace.StartSpan(ctx, "sync.validateUnbatchedColumnsKzg")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
if err := peerdas.VerifyDataColumnsSidecarKZGProofs(columns); err != nil {
|
||||
err = errors.Wrap(err, "could not verify")
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
verification.DataColumnBatchKZGVerificationHistogram.WithLabelValues("fallback").Observe(float64(time.Since(start).Milliseconds()))
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
func verifyKzgBatch(kzgBatch []*kzgVerifier) {
|
||||
if len(kzgBatch) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
allDataColumns := make([]blocks.RODataColumn, 0, len(kzgBatch))
|
||||
for _, kzgVerifier := range kzgBatch {
|
||||
allDataColumns = append(allDataColumns, kzgVerifier.dataColumns...)
|
||||
}
|
||||
|
||||
var verificationErr error
|
||||
start := time.Now()
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(allDataColumns)
|
||||
if err != nil {
|
||||
verificationErr = errors.Wrap(err, "batch KZG verification failed")
|
||||
} else {
|
||||
verification.DataColumnBatchKZGVerificationHistogram.WithLabelValues("batch").Observe(float64(time.Since(start).Milliseconds()))
|
||||
}
|
||||
|
||||
// Send the same result to all verifiers in the batch
|
||||
for _, verifier := range kzgBatch {
|
||||
verifier.resChan <- verificationErr
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,339 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
)
|
||||
|
||||
func TestValidateWithKzgBatchVerifier(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
dataColumns []blocks.RODataColumn
|
||||
expectedResult pubsub.ValidationResult
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "single valid data column",
|
||||
dataColumns: createValidTestDataColumns(t, 1),
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "multiple valid data columns",
|
||||
dataColumns: createValidTestDataColumns(t, 3),
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "single invalid data column",
|
||||
dataColumns: createInvalidTestDataColumns(t, 1),
|
||||
expectedResult: pubsub.ValidationReject,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "empty data column slice",
|
||||
dataColumns: []blocks.RODataColumn{},
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
result, err := service.validateWithKzgBatchVerifier(ctx, tt.dataColumns)
|
||||
|
||||
require.Equal(t, tt.expectedResult, result)
|
||||
if tt.expectError {
|
||||
assert.NotNil(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifierRoutine(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("processes single request", func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
dataColumns := createValidTestDataColumns(t, 1)
|
||||
resChan := make(chan error, 1)
|
||||
service.kzgChan <- &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
|
||||
|
||||
select {
|
||||
case err := <-resChan:
|
||||
require.NoError(t, err)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("timeout waiting for verification result")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("batches multiple requests", func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
const numRequests = 5
|
||||
resChans := make([]chan error, numRequests)
|
||||
|
||||
for i := range numRequests {
|
||||
dataColumns := createValidTestDataColumns(t, 1)
|
||||
resChan := make(chan error, 1)
|
||||
resChans[i] = resChan
|
||||
service.kzgChan <- &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
|
||||
}
|
||||
|
||||
for i := range numRequests {
|
||||
select {
|
||||
case err := <-resChans[i]:
|
||||
require.NoError(t, err)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("timeout waiting for verification result %d", i)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("context cancellation stops routine", func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
|
||||
routineDone := make(chan struct{})
|
||||
go func() {
|
||||
service.kzgVerifierRoutine()
|
||||
close(routineDone)
|
||||
}()
|
||||
|
||||
cancel()
|
||||
|
||||
select {
|
||||
case <-routineDone:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("timeout waiting for routine to exit")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestVerifyKzgBatch(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("all valid data columns succeed", func(t *testing.T) {
|
||||
dataColumns := createValidTestDataColumns(t, 3)
|
||||
resChan := make(chan error, 1)
|
||||
kzgVerifiers := []*kzgVerifier{{dataColumns: dataColumns, resChan: resChan}}
|
||||
|
||||
verifyKzgBatch(kzgVerifiers)
|
||||
|
||||
select {
|
||||
case err := <-resChan:
|
||||
require.NoError(t, err)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("timeout waiting for batch verification")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("invalid proofs fail entire batch", func(t *testing.T) {
|
||||
validColumns := createValidTestDataColumns(t, 1)
|
||||
invalidColumns := createInvalidTestDataColumns(t, 1)
|
||||
allColumns := append(validColumns, invalidColumns...)
|
||||
|
||||
resChan := make(chan error, 1)
|
||||
kzgVerifiers := []*kzgVerifier{{dataColumns: allColumns, resChan: resChan}}
|
||||
|
||||
verifyKzgBatch(kzgVerifiers)
|
||||
|
||||
select {
|
||||
case err := <-resChan:
|
||||
assert.NotNil(t, err)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("timeout waiting for batch verification")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("empty batch handling", func(t *testing.T) {
|
||||
verifyKzgBatch([]*kzgVerifier{})
|
||||
})
|
||||
}
|
||||
|
||||
func TestKzgBatchVerifierConcurrency(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
const numGoroutines = 10
|
||||
const numRequestsPerGoroutine = 5
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(numGoroutines)
|
||||
|
||||
// Multiple goroutines sending verification requests simultaneously
|
||||
for i := range numGoroutines {
|
||||
go func(goroutineID int) {
|
||||
defer wg.Done()
|
||||
|
||||
for range numRequestsPerGoroutine {
|
||||
dataColumns := createValidTestDataColumns(t, 1)
|
||||
result, err := service.validateWithKzgBatchVerifier(ctx, dataColumns)
|
||||
require.Equal(t, pubsub.ValidationAccept, result)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestKzgBatchVerifierFallback(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("fallback handles mixed valid/invalid batch correctly", func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
validColumns := createValidTestDataColumns(t, 1)
|
||||
invalidColumns := createInvalidTestDataColumns(t, 1)
|
||||
|
||||
result, err := service.validateWithKzgBatchVerifier(ctx, validColumns)
|
||||
require.Equal(t, pubsub.ValidationAccept, result)
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err = service.validateWithKzgBatchVerifier(ctx, invalidColumns)
|
||||
require.Equal(t, pubsub.ValidationReject, result)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("empty data columns fallback", func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
result, err := service.validateWithKzgBatchVerifier(ctx, []blocks.RODataColumn{})
|
||||
require.Equal(t, pubsub.ValidationAccept, result)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestValidateWithKzgBatchVerifier_DeadlockOnTimeout(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.SecondsPerSlot = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
result, err := service.validateWithKzgBatchVerifier(context.Background(), nil)
|
||||
require.Equal(t, pubsub.ValidationIgnore, result)
|
||||
require.ErrorIs(t, err, context.DeadlineExceeded)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
_, _ = service.validateWithKzgBatchVerifier(context.Background(), nil)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
t.Fatal("validateWithKzgBatchVerifier blocked")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateWithKzgBatchVerifier_ContextCanceledBeforeSend(t *testing.T) {
|
||||
cancelledCtx, cancel := context.WithCancel(t.Context())
|
||||
cancel()
|
||||
|
||||
service := &Service{
|
||||
ctx: context.Background(),
|
||||
kzgChan: make(chan *kzgVerifier),
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
result, err := service.validateWithKzgBatchVerifier(cancelledCtx, nil)
|
||||
require.Equal(t, pubsub.ValidationIgnore, result)
|
||||
require.ErrorIs(t, err, context.Canceled)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
t.Fatal("validateWithKzgBatchVerifier did not return after context cancellation")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-service.kzgChan:
|
||||
t.Fatal("verificationSet was sent to kzgChan despite canceled context")
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func createValidTestDataColumns(t *testing.T, count int) []blocks.RODataColumn {
|
||||
_, roSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, count)
|
||||
if len(roSidecars) >= count {
|
||||
|
||||
@@ -77,8 +77,13 @@ func SendBeaconBlocksByRangeRequest(
|
||||
}
|
||||
defer closeStream(stream, log)
|
||||
|
||||
// Cap the slice capacity to MaxRequestBlock to prevent panic from invalid Count values.
|
||||
// This guards against upstream bugs that may produce astronomically large Count values
|
||||
// (e.g., due to unsigned integer underflow).
|
||||
sliceCap := min(req.Count, params.MaxRequestBlock(slots.ToEpoch(tor.CurrentSlot())))
|
||||
|
||||
// Augment block processing function, if non-nil block processor is provided.
|
||||
blocks := make([]interfaces.ReadOnlySignedBeaconBlock, 0, req.Count)
|
||||
blocks := make([]interfaces.ReadOnlySignedBeaconBlock, 0, sliceCap)
|
||||
process := func(blk interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
blocks = append(blocks, blk)
|
||||
if blockProcessor != nil {
|
||||
|
||||
@@ -165,6 +165,7 @@ type Service struct {
|
||||
syncContributionBitsOverlapLock sync.RWMutex
|
||||
syncContributionBitsOverlapCache *lru.Cache
|
||||
signatureChan chan *signatureVerifier
|
||||
kzgChan chan *kzgVerifier
|
||||
clockWaiter startup.ClockWaiter
|
||||
initialSyncComplete chan struct{}
|
||||
verifierWaiter *verification.InitializerWaiter
|
||||
@@ -205,7 +206,10 @@ func NewService(ctx context.Context, opts ...Option) *Service {
|
||||
}
|
||||
// Initialize signature channel with configured limit
|
||||
r.signatureChan = make(chan *signatureVerifier, r.cfg.batchVerifierLimit)
|
||||
|
||||
// Initialize KZG channel with fixed buffer size of 100.
|
||||
// This buffer size is designed to handle burst traffic of data column gossip messages:
|
||||
// - Data columns arrive less frequently than attestations (default batchVerifierLimit=1000)
|
||||
r.kzgChan = make(chan *kzgVerifier, 100)
|
||||
// Correctly remove it from our seen pending block map.
|
||||
// The eviction method always assumes that the mutex is held.
|
||||
r.slotToPendingBlocks.OnEvicted(func(s string, i any) {
|
||||
@@ -258,6 +262,7 @@ func (s *Service) Start() {
|
||||
s.newColumnsVerifier = newDataColumnsVerifierFromInitializer(v)
|
||||
|
||||
go s.verifierRoutine()
|
||||
go s.kzgVerifierRoutine()
|
||||
go s.startDiscoveryAndSubscriptions()
|
||||
go s.processDataColumnLogs()
|
||||
|
||||
|
||||
@@ -144,9 +144,12 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
}
|
||||
|
||||
// [REJECT] The sidecar's column data is valid as verified by `verify_data_column_sidecar_kzg_proofs(sidecar)`.
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
validationResult, err := s.validateWithKzgBatchVerifier(ctx, roDataColumns)
|
||||
if validationResult != pubsub.ValidationAccept {
|
||||
return validationResult, err
|
||||
}
|
||||
// Mark KZG verification as satisfied since we did it via batch verifier
|
||||
verifier.SatisfyRequirement(verification.RequireSidecarKzgProofVerified)
|
||||
|
||||
// [IGNORE] The sidecar is the first sidecar for the tuple `(block_header.slot, block_header.proposer_index, sidecar.index)`
|
||||
// with valid header signature, sidecar inclusion proof, and kzg proof.
|
||||
|
||||
@@ -71,7 +71,10 @@ func TestValidateDataColumn(t *testing.T) {
|
||||
ctx: ctx,
|
||||
newColumnsVerifier: newDataColumnsVerifier,
|
||||
seenDataColumnCache: newSlotAwareCache(seenDataColumnSize),
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
// Start the KZG verifier routine for batch verification
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
// Encode a `beaconBlock` message instead of expected.
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
3
changelog/bastin_fix-lcp2p-bug.md
Normal file
3
changelog/bastin_fix-lcp2p-bug.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Fix the missing fork version object mapping for Fulu in light client p2p.
|
||||
3
changelog/builder-index.md
Normal file
3
changelog/builder-index.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- `primitives.BuilderIndex`: SSZ `uint64` wrapper for builder registry indices.
|
||||
3
changelog/james-prysm_align-atter-pool-apis.md
Normal file
3
changelog/james-prysm_align-atter-pool-apis.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- the /eth/v2/beacon/pool/attestations and /eth/v1/beacon/pool/sync_committees now returns a 503 error if the node is still syncing, the rest api is also working in a similar process to gRPC broadcasting immediately now.
|
||||
3
changelog/manu-agg.md
Normal file
3
changelog/manu-agg.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Pending aggregates: When multiple aggregated attestations only differing by the aggregator index are in the pending queue, only process one of them.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Removed
|
||||
|
||||
- Batching of KZG verification for incoming via gossip data column sidecars
|
||||
2
changelog/manu-remove-error-logs.md
Normal file
2
changelog/manu-remove-error-logs.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Changed
|
||||
- `validateDataColumn`: Remove error logs.
|
||||
3
changelog/potuz_next_epoch_attributes.md
Normal file
3
changelog/potuz_next_epoch_attributes.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Do not process slots and copy states for next epoch proposers after Fulu
|
||||
@@ -1,2 +0,0 @@
|
||||
### Added
|
||||
- Update spectests to v1.7.0-alpha.0
|
||||
@@ -1,3 +0,0 @@
|
||||
### Ignored
|
||||
|
||||
- Updated changelog for v7.1.2
|
||||
3
changelog/pvl-fix-16223.md
Normal file
3
changelog/pvl-fix-16223.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Added some defensive checks to prevent overflows in block batch requests.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- Performance improvement in state (MarshalSSZTo): use copy() instead of byte-by-byte loop which isn't required.
|
||||
@@ -55,8 +55,7 @@ var placeholderFields = []string{
|
||||
"MAX_REQUEST_BLOB_SIDECARS_FULU",
|
||||
"MAX_REQUEST_INCLUSION_LIST",
|
||||
"MAX_REQUEST_PAYLOADS", // Compile time constant on BeaconBlockBody.ExecutionRequests
|
||||
"MIN_BUILDER_WITHDRAWABILITY_DELAY",
|
||||
"NUMBER_OF_COLUMNS", // Configured as a constant in config/fieldparams/mainnet.go
|
||||
"NUMBER_OF_COLUMNS", // Configured as a constant in config/fieldparams/mainnet.go
|
||||
"PAYLOAD_ATTESTATION_DUE_BPS",
|
||||
"PROPOSER_INCLUSION_LIST_CUTOFF",
|
||||
"PROPOSER_INCLUSION_LIST_CUTOFF_BPS",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
version: v1.7.0-alpha.0
|
||||
version: v1.6.0
|
||||
style: full
|
||||
|
||||
specrefs:
|
||||
@@ -57,12 +57,6 @@ exceptions:
|
||||
- PAYLOAD_STATUS_EMPTY#gloas
|
||||
- PAYLOAD_STATUS_FULL#gloas
|
||||
- PAYLOAD_STATUS_PENDING#gloas
|
||||
- ATTESTATION_TIMELINESS_INDEX#gloas
|
||||
- BUILDER_INDEX_FLAG#gloas
|
||||
- BUILDER_INDEX_SELF_BUILD#gloas
|
||||
- DOMAIN_PROPOSER_PREFERENCES#gloas
|
||||
- NUM_BLOCK_TIMELINESS_DEADLINES#gloas
|
||||
- PTC_TIMELINESS_INDEX#gloas
|
||||
|
||||
configs:
|
||||
# Not implemented (placeholders)
|
||||
@@ -82,7 +76,6 @@ exceptions:
|
||||
- MAX_REQUEST_PAYLOADS#gloas
|
||||
- PAYLOAD_ATTESTATION_DUE_BPS#gloas
|
||||
- SYNC_MESSAGE_DUE_BPS_GLOAS#gloas
|
||||
- MIN_BUILDER_WITHDRAWABILITY_DELAY#gloas
|
||||
|
||||
ssz_objects:
|
||||
# Not implemented
|
||||
@@ -110,9 +103,6 @@ exceptions:
|
||||
- PayloadAttestationMessage#gloas
|
||||
- SignedExecutionPayloadEnvelope#gloas
|
||||
- SignedExecutionPayloadBid#gloas
|
||||
- Builder#gloas
|
||||
- ProposerPreferences#gloas
|
||||
- SignedProposerPreferences#gloas
|
||||
|
||||
dataclasses:
|
||||
# Not implemented
|
||||
@@ -341,8 +331,10 @@ exceptions:
|
||||
- get_ptc#gloas
|
||||
- get_ptc_assignment#gloas
|
||||
- get_weight#gloas
|
||||
- has_builder_withdrawal_credential#gloas
|
||||
- has_compounding_withdrawal_credential#gloas
|
||||
- is_attestation_same_slot#gloas
|
||||
- is_builder_payment_withdrawable#gloas
|
||||
- is_builder_withdrawal_credential#gloas
|
||||
- is_merge_transition_complete#gloas
|
||||
- is_parent_block_full#gloas
|
||||
@@ -366,6 +358,7 @@ exceptions:
|
||||
- process_proposer_slashing#gloas
|
||||
- process_slot#gloas
|
||||
- process_withdrawals#gloas
|
||||
- remove_flag#gloas
|
||||
- should_extend_payload#gloas
|
||||
- update_latest_messages#gloas
|
||||
- upgrade_to_gloas#gloas
|
||||
@@ -375,55 +368,3 @@ exceptions:
|
||||
- verify_data_column_sidecar_inclusion_proof#gloas
|
||||
- verify_execution_payload_envelope_signature#gloas
|
||||
- verify_execution_payload_bid_signature#gloas
|
||||
- add_builder_to_registry#gloas
|
||||
- apply_deposit_for_builder#gloas
|
||||
- apply_withdrawals#capella
|
||||
- apply_withdrawals#gloas
|
||||
- can_builder_cover_bid#gloas
|
||||
- compute_proposer_score#phase0
|
||||
- convert_builder_index_to_validator_index#gloas
|
||||
- convert_validator_index_to_builder_index#gloas
|
||||
- get_attestation_score#gloas
|
||||
- get_attestation_score#phase0
|
||||
- get_balance_after_withdrawals#capella
|
||||
- get_builder_from_deposit#gloas
|
||||
- get_builder_withdrawals#gloas
|
||||
- get_builders_sweep_withdrawals#gloas
|
||||
- get_index_for_new_builder#gloas
|
||||
- get_pending_balance_to_withdraw_for_builder#gloas
|
||||
- get_pending_partial_withdrawals#electra
|
||||
- get_proposer_preferences_signature#gloas
|
||||
- get_upcoming_proposal_slots#gloas
|
||||
- get_validators_sweep_withdrawals#capella
|
||||
- get_validators_sweep_withdrawals#electra
|
||||
- initiate_builder_exit#gloas
|
||||
- is_active_builder#gloas
|
||||
- is_builder_index#gloas
|
||||
- is_eligible_for_partial_withdrawals#electra
|
||||
- is_head_late#gloas
|
||||
- is_head_weak#gloas
|
||||
- is_parent_strong#gloas
|
||||
- is_proposer_equivocation#phase0
|
||||
- is_valid_proposal_slot#gloas
|
||||
- process_deposit_request#gloas
|
||||
- process_voluntary_exit#gloas
|
||||
- record_block_timeliness#gloas
|
||||
- record_block_timeliness#phase0
|
||||
- should_apply_proposer_boost#gloas
|
||||
- update_builder_pending_withdrawals#gloas
|
||||
- update_next_withdrawal_builder_index#gloas
|
||||
- update_next_withdrawal_index#capella
|
||||
- update_next_withdrawal_validator_index#capella
|
||||
- update_payload_expected_withdrawals#gloas
|
||||
- update_pending_partial_withdrawals#electra
|
||||
- update_proposer_boost_root#gloas
|
||||
- update_proposer_boost_root#phase0
|
||||
|
||||
presets:
|
||||
- CELLS_PER_EXT_BLOB#fulu
|
||||
- BUILDER_PENDING_WITHDRAWALS_LIMIT#gloas
|
||||
- BUILDER_REGISTRY_LIMIT#gloas
|
||||
- MAX_BUILDERS_PER_WITHDRAWALS_SWEEP#gloas
|
||||
- MAX_PAYLOAD_ATTESTATIONS#gloas
|
||||
- PTC_SIZE#gloas
|
||||
- UPDATE_TIMEOUT#altair
|
||||
|
||||
@@ -304,6 +304,16 @@
|
||||
GENESIS_SLOT: Slot = 0
|
||||
</spec>
|
||||
|
||||
- name: INTERVALS_PER_SLOT
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
search: IntervalsPerSlot\s+.*yaml:"INTERVALS_PER_SLOT"
|
||||
regex: true
|
||||
spec: |
|
||||
<spec constant_var="INTERVALS_PER_SLOT" fork="phase0" hash="3352e419">
|
||||
INTERVALS_PER_SLOT: uint64 = 3
|
||||
</spec>
|
||||
|
||||
- name: JUSTIFICATION_BITS_LENGTH
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
|
||||
@@ -698,7 +698,7 @@
|
||||
- name: compute_matrix
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="compute_matrix" fork="fulu" hash="0b88eac1">
|
||||
<spec fn="compute_matrix" fork="fulu" hash="b39370ca">
|
||||
def compute_matrix(blobs: Sequence[Blob]) -> Sequence[MatrixEntry]:
|
||||
"""
|
||||
Return the full, flattened sequence of matrix entries.
|
||||
@@ -714,8 +714,8 @@
|
||||
MatrixEntry(
|
||||
cell=cell,
|
||||
kzg_proof=proof,
|
||||
column_index=cell_index,
|
||||
row_index=blob_index,
|
||||
column_index=cell_index,
|
||||
)
|
||||
)
|
||||
return matrix
|
||||
@@ -739,7 +739,7 @@
|
||||
- file: beacon-chain/rpc/prysm/v1alpha1/validator/proposer_attestations_electra.go
|
||||
search: func computeOnChainAggregate(
|
||||
spec: |
|
||||
<spec fn="compute_on_chain_aggregate" fork="electra" hash="f020af4c">
|
||||
<spec fn="compute_on_chain_aggregate" fork="electra" hash="128055d6">
|
||||
def compute_on_chain_aggregate(network_aggregates: Sequence[Attestation]) -> Attestation:
|
||||
aggregates = sorted(
|
||||
network_aggregates, key=lambda a: get_committee_indices(a.committee_bits)[0]
|
||||
@@ -760,8 +760,8 @@
|
||||
return Attestation(
|
||||
aggregation_bits=aggregation_bits,
|
||||
data=data,
|
||||
signature=signature,
|
||||
committee_bits=committee_bits,
|
||||
signature=signature,
|
||||
)
|
||||
</spec>
|
||||
|
||||
@@ -2366,18 +2366,40 @@
|
||||
- file: beacon-chain/state/state-native/getters_withdrawal.go
|
||||
search: func (b *BeaconState) ExpectedWithdrawals(
|
||||
spec: |
|
||||
<spec fn="get_expected_withdrawals" fork="capella" hash="d6a98c14">
|
||||
def get_expected_withdrawals(state: BeaconState) -> Tuple[Sequence[Withdrawal], uint64]:
|
||||
<spec fn="get_expected_withdrawals" fork="capella" hash="09191977">
|
||||
def get_expected_withdrawals(state: BeaconState) -> Sequence[Withdrawal]:
|
||||
epoch = get_current_epoch(state)
|
||||
withdrawal_index = state.next_withdrawal_index
|
||||
validator_index = state.next_withdrawal_validator_index
|
||||
withdrawals: List[Withdrawal] = []
|
||||
|
||||
# Get validators sweep withdrawals
|
||||
validators_sweep_withdrawals, withdrawal_index, processed_validators_sweep_count = (
|
||||
get_validators_sweep_withdrawals(state, withdrawal_index, withdrawals)
|
||||
)
|
||||
withdrawals.extend(validators_sweep_withdrawals)
|
||||
|
||||
return withdrawals, processed_validators_sweep_count
|
||||
bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
|
||||
for _ in range(bound):
|
||||
validator = state.validators[validator_index]
|
||||
balance = state.balances[validator_index]
|
||||
if is_fully_withdrawable_validator(validator, balance, epoch):
|
||||
withdrawals.append(
|
||||
Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
amount=balance,
|
||||
)
|
||||
)
|
||||
withdrawal_index += WithdrawalIndex(1)
|
||||
elif is_partially_withdrawable_validator(validator, balance):
|
||||
withdrawals.append(
|
||||
Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
amount=balance - MAX_EFFECTIVE_BALANCE,
|
||||
)
|
||||
)
|
||||
withdrawal_index += WithdrawalIndex(1)
|
||||
if len(withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
break
|
||||
validator_index = ValidatorIndex((validator_index + 1) % len(state.validators))
|
||||
return withdrawals
|
||||
</spec>
|
||||
|
||||
- name: get_expected_withdrawals#electra
|
||||
@@ -2385,26 +2407,80 @@
|
||||
- file: beacon-chain/state/state-native/getters_withdrawal.go
|
||||
search: func (b *BeaconState) ExpectedWithdrawals(
|
||||
spec: |
|
||||
<spec fn="get_expected_withdrawals" fork="electra" hash="cfce862b">
|
||||
def get_expected_withdrawals(state: BeaconState) -> Tuple[Sequence[Withdrawal], uint64, uint64]:
|
||||
<spec fn="get_expected_withdrawals" fork="electra" hash="060932cd">
|
||||
def get_expected_withdrawals(state: BeaconState) -> Tuple[Sequence[Withdrawal], uint64]:
|
||||
epoch = get_current_epoch(state)
|
||||
withdrawal_index = state.next_withdrawal_index
|
||||
validator_index = state.next_withdrawal_validator_index
|
||||
withdrawals: List[Withdrawal] = []
|
||||
processed_partial_withdrawals_count = 0
|
||||
|
||||
# [New in Electra:EIP7251]
|
||||
# Get partial withdrawals
|
||||
partial_withdrawals, withdrawal_index, processed_partial_withdrawals_count = (
|
||||
get_pending_partial_withdrawals(state, withdrawal_index, withdrawals)
|
||||
)
|
||||
withdrawals.extend(partial_withdrawals)
|
||||
# Consume pending partial withdrawals
|
||||
for withdrawal in state.pending_partial_withdrawals:
|
||||
if (
|
||||
withdrawal.withdrawable_epoch > epoch
|
||||
or len(withdrawals) == MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP
|
||||
):
|
||||
break
|
||||
|
||||
# Get validators sweep withdrawals
|
||||
validators_sweep_withdrawals, withdrawal_index, processed_validators_sweep_count = (
|
||||
get_validators_sweep_withdrawals(state, withdrawal_index, withdrawals)
|
||||
)
|
||||
withdrawals.extend(validators_sweep_withdrawals)
|
||||
validator = state.validators[withdrawal.validator_index]
|
||||
has_sufficient_effective_balance = validator.effective_balance >= MIN_ACTIVATION_BALANCE
|
||||
total_withdrawn = sum(
|
||||
w.amount for w in withdrawals if w.validator_index == withdrawal.validator_index
|
||||
)
|
||||
balance = state.balances[withdrawal.validator_index] - total_withdrawn
|
||||
has_excess_balance = balance > MIN_ACTIVATION_BALANCE
|
||||
if (
|
||||
validator.exit_epoch == FAR_FUTURE_EPOCH
|
||||
and has_sufficient_effective_balance
|
||||
and has_excess_balance
|
||||
):
|
||||
withdrawable_balance = min(balance - MIN_ACTIVATION_BALANCE, withdrawal.amount)
|
||||
withdrawals.append(
|
||||
Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=withdrawal.validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
amount=withdrawable_balance,
|
||||
)
|
||||
)
|
||||
withdrawal_index += WithdrawalIndex(1)
|
||||
|
||||
# [Modified in Electra:EIP7251]
|
||||
return withdrawals, processed_partial_withdrawals_count, processed_validators_sweep_count
|
||||
processed_partial_withdrawals_count += 1
|
||||
|
||||
# Sweep for remaining.
|
||||
bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
|
||||
for _ in range(bound):
|
||||
validator = state.validators[validator_index]
|
||||
# [Modified in Electra:EIP7251]
|
||||
total_withdrawn = sum(w.amount for w in withdrawals if w.validator_index == validator_index)
|
||||
balance = state.balances[validator_index] - total_withdrawn
|
||||
if is_fully_withdrawable_validator(validator, balance, epoch):
|
||||
withdrawals.append(
|
||||
Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
amount=balance,
|
||||
)
|
||||
)
|
||||
withdrawal_index += WithdrawalIndex(1)
|
||||
elif is_partially_withdrawable_validator(validator, balance):
|
||||
withdrawals.append(
|
||||
Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
# [Modified in Electra:EIP7251]
|
||||
amount=balance - get_max_effective_balance(validator),
|
||||
)
|
||||
)
|
||||
withdrawal_index += WithdrawalIndex(1)
|
||||
if len(withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
break
|
||||
validator_index = ValidatorIndex((validator_index + 1) % len(state.validators))
|
||||
return withdrawals, processed_partial_withdrawals_count
|
||||
</spec>
|
||||
|
||||
- name: get_filtered_block_tree
|
||||
@@ -2977,7 +3053,7 @@
|
||||
- name: get_proposer_head
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="get_proposer_head" fork="phase0" hash="99e8fc05">
|
||||
<spec fn="get_proposer_head" fork="phase0" hash="15d44290">
|
||||
def get_proposer_head(store: Store, head_root: Root, slot: Slot) -> Root:
|
||||
head_block = store.blocks[head_root]
|
||||
parent_root = head_block.parent_root
|
||||
@@ -3008,10 +3084,7 @@
|
||||
head_weak = is_head_weak(store, head_root)
|
||||
|
||||
# Check that the missing votes are assigned to the parent and not being hoarded.
|
||||
parent_strong = is_parent_strong(store, head_root)
|
||||
|
||||
# Re-org more aggressively if there is a proposer equivocation in the previous slot.
|
||||
proposer_equivocation = is_proposer_equivocation(store, head_root)
|
||||
parent_strong = is_parent_strong(store, parent_root)
|
||||
|
||||
if all(
|
||||
[
|
||||
@@ -3027,8 +3100,6 @@
|
||||
):
|
||||
# We can re-org the current head by building upon its parent block.
|
||||
return parent_root
|
||||
elif all([head_weak, current_time_ok, proposer_equivocation]):
|
||||
return parent_root
|
||||
else:
|
||||
return head_root
|
||||
</spec>
|
||||
@@ -3046,10 +3117,11 @@
|
||||
- name: get_proposer_score
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="get_proposer_score" fork="phase0" hash="2c8d8a27">
|
||||
<spec fn="get_proposer_score" fork="phase0" hash="164b8de0">
|
||||
def get_proposer_score(store: Store) -> Gwei:
|
||||
justified_checkpoint_state = store.checkpoint_states[store.justified_checkpoint]
|
||||
return compute_proposer_score(justified_checkpoint_state)
|
||||
committee_weight = get_total_active_balance(justified_checkpoint_state) // SLOTS_PER_EPOCH
|
||||
return (committee_weight * PROPOSER_SCORE_BOOST) // 100
|
||||
</spec>
|
||||
|
||||
- name: get_randao_mix
|
||||
@@ -3437,10 +3509,26 @@
|
||||
- file: beacon-chain/forkchoice/doubly-linked-tree/forkchoice.go
|
||||
search: func (f *ForkChoice) Weight(
|
||||
spec: |
|
||||
<spec fn="get_weight" fork="phase0" hash="b18bf25c">
|
||||
<spec fn="get_weight" fork="phase0" hash="f2e4e8ef">
|
||||
def get_weight(store: Store, root: Root) -> Gwei:
|
||||
state = store.checkpoint_states[store.justified_checkpoint]
|
||||
attestation_score = get_attestation_score(store, root, state)
|
||||
unslashed_and_active_indices = [
|
||||
i
|
||||
for i in get_active_validator_indices(state, get_current_epoch(state))
|
||||
if not state.validators[i].slashed
|
||||
]
|
||||
attestation_score = Gwei(
|
||||
sum(
|
||||
state.validators[i].effective_balance
|
||||
for i in unslashed_and_active_indices
|
||||
if (
|
||||
i in store.latest_messages
|
||||
and i not in store.equivocating_indices
|
||||
and get_ancestor(store, store.latest_messages[i].root, store.blocks[root].slot)
|
||||
== root
|
||||
)
|
||||
)
|
||||
)
|
||||
if store.proposer_boost_root == Root():
|
||||
# Return only attestation score if ``proposer_boost_root`` is not set
|
||||
return attestation_score
|
||||
@@ -3527,7 +3615,7 @@
|
||||
- file: beacon-chain/core/transition/state.go
|
||||
search: func GenesisBeaconState(
|
||||
spec: |
|
||||
<spec fn="initialize_beacon_state_from_eth1" fork="phase0" hash="d3a0ddd4">
|
||||
<spec fn="initialize_beacon_state_from_eth1" fork="phase0" hash="c69537d6">
|
||||
def initialize_beacon_state_from_eth1(
|
||||
eth1_block_hash: Hash32, eth1_timestamp: uint64, deposits: Sequence[Deposit]
|
||||
) -> BeaconState:
|
||||
@@ -3539,7 +3627,7 @@
|
||||
state = BeaconState(
|
||||
genesis_time=eth1_timestamp + GENESIS_DELAY,
|
||||
fork=fork,
|
||||
eth1_data=Eth1Data(deposit_count=uint64(len(deposits)), block_hash=eth1_block_hash),
|
||||
eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))),
|
||||
latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
|
||||
randao_mixes=[eth1_block_hash]
|
||||
* EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy
|
||||
@@ -4074,11 +4162,10 @@
|
||||
- name: is_parent_strong
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="is_parent_strong" fork="phase0" hash="02a3fd0b">
|
||||
def is_parent_strong(store: Store, root: Root) -> bool:
|
||||
<spec fn="is_parent_strong" fork="phase0" hash="e06641a8">
|
||||
def is_parent_strong(store: Store, parent_root: Root) -> bool:
|
||||
justified_state = store.checkpoint_states[store.justified_checkpoint]
|
||||
parent_threshold = calculate_committee_fraction(justified_state, REORG_PARENT_WEIGHT_THRESHOLD)
|
||||
parent_root = store.blocks[root].parent_root
|
||||
parent_weight = get_weight(store, parent_root)
|
||||
return parent_weight > parent_threshold
|
||||
</spec>
|
||||
@@ -4596,7 +4683,7 @@
|
||||
- file: beacon-chain/blockchain/receive_block.go
|
||||
search: func (s *Service) ReceiveBlock(
|
||||
spec: |
|
||||
<spec fn="on_block" fork="phase0" hash="5f45947a">
|
||||
<spec fn="on_block" fork="phase0" hash="aff24b59">
|
||||
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
block = signed_block.message
|
||||
# Parent block must be known
|
||||
@@ -4626,8 +4713,19 @@
|
||||
# Add new state for this block to the store
|
||||
store.block_states[block_root] = state
|
||||
|
||||
record_block_timeliness(store, block_root)
|
||||
update_proposer_boost_root(store, block_root)
|
||||
# Add block timeliness to the store
|
||||
seconds_since_genesis = store.time - store.genesis_time
|
||||
time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS
|
||||
epoch = get_current_store_epoch(store)
|
||||
attestation_threshold_ms = get_attestation_due_ms(epoch)
|
||||
is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms
|
||||
is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval
|
||||
store.block_timeliness[hash_tree_root(block)] = is_timely
|
||||
|
||||
# Add proposer score boost if the block is timely and not conflicting with an existing block
|
||||
is_first_block = store.proposer_boost_root == Root()
|
||||
if is_timely and is_first_block:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
|
||||
# Update checkpoints in store if necessary
|
||||
update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
|
||||
@@ -4641,7 +4739,7 @@
|
||||
- file: beacon-chain/blockchain/receive_block.go
|
||||
search: func (s *Service) ReceiveBlock(
|
||||
spec: |
|
||||
<spec fn="on_block" fork="bellatrix" hash="e81d01c3">
|
||||
<spec fn="on_block" fork="bellatrix" hash="a3193d92">
|
||||
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
"""
|
||||
Run ``on_block`` upon receiving a new block.
|
||||
@@ -4682,8 +4780,19 @@
|
||||
# Add new state for this block to the store
|
||||
store.block_states[block_root] = state
|
||||
|
||||
record_block_timeliness(store, block_root)
|
||||
update_proposer_boost_root(store, block_root)
|
||||
# Add block timeliness to the store
|
||||
seconds_since_genesis = store.time - store.genesis_time
|
||||
time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS
|
||||
epoch = get_current_store_epoch(store)
|
||||
attestation_threshold_ms = get_attestation_due_ms(epoch)
|
||||
is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms
|
||||
is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval
|
||||
store.block_timeliness[hash_tree_root(block)] = is_timely
|
||||
|
||||
# Add proposer score boost if the block is timely and not conflicting with an existing block
|
||||
is_first_block = store.proposer_boost_root == Root()
|
||||
if is_timely and is_first_block:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
|
||||
# Update checkpoints in store if necessary
|
||||
update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
|
||||
@@ -4697,7 +4806,7 @@
|
||||
- file: beacon-chain/blockchain/receive_block.go
|
||||
search: func (s *Service) ReceiveBlock(
|
||||
spec: |
|
||||
<spec fn="on_block" fork="capella" hash="7450531c">
|
||||
<spec fn="on_block" fork="capella" hash="560056ad">
|
||||
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
"""
|
||||
Run ``on_block`` upon receiving a new block.
|
||||
@@ -4730,8 +4839,19 @@
|
||||
# Add new state for this block to the store
|
||||
store.block_states[block_root] = state
|
||||
|
||||
record_block_timeliness(store, block_root)
|
||||
update_proposer_boost_root(store, block_root)
|
||||
# Add block timeliness to the store
|
||||
seconds_since_genesis = store.time - store.genesis_time
|
||||
time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS
|
||||
epoch = get_current_store_epoch(store)
|
||||
attestation_threshold_ms = get_attestation_due_ms(epoch)
|
||||
is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms
|
||||
is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval
|
||||
store.block_timeliness[hash_tree_root(block)] = is_timely
|
||||
|
||||
# Add proposer score boost if the block is timely and not conflicting with an existing block
|
||||
is_first_block = store.proposer_boost_root == Root()
|
||||
if is_timely and is_first_block:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
|
||||
# Update checkpoints in store if necessary
|
||||
update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
|
||||
@@ -4745,7 +4865,7 @@
|
||||
- file: beacon-chain/blockchain/receive_block.go
|
||||
search: func (s *Service) ReceiveBlock(
|
||||
spec: |
|
||||
<spec fn="on_block" fork="deneb" hash="bbad196e">
|
||||
<spec fn="on_block" fork="deneb" hash="9565acee">
|
||||
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
"""
|
||||
Run ``on_block`` upon receiving a new block.
|
||||
@@ -4783,8 +4903,19 @@
|
||||
# Add new state for this block to the store
|
||||
store.block_states[block_root] = state
|
||||
|
||||
record_block_timeliness(store, block_root)
|
||||
update_proposer_boost_root(store, block_root)
|
||||
# Add block timeliness to the store
|
||||
seconds_since_genesis = store.time - store.genesis_time
|
||||
time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS
|
||||
epoch = get_current_store_epoch(store)
|
||||
attestation_threshold_ms = get_attestation_due_ms(epoch)
|
||||
is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms
|
||||
is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval
|
||||
store.block_timeliness[hash_tree_root(block)] = is_timely
|
||||
|
||||
# Add proposer score boost if the block is timely and not conflicting with an existing block
|
||||
is_first_block = store.proposer_boost_root == Root()
|
||||
if is_timely and is_first_block:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
|
||||
# Update checkpoints in store if necessary
|
||||
update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
|
||||
@@ -4798,7 +4929,7 @@
|
||||
- file: beacon-chain/blockchain/receive_block.go
|
||||
search: func (s *Service) ReceiveBlock(
|
||||
spec: |
|
||||
<spec fn="on_block" fork="fulu" hash="b8f279b9">
|
||||
<spec fn="on_block" fork="fulu" hash="4f955de9">
|
||||
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
"""
|
||||
Run ``on_block`` upon receiving a new block.
|
||||
@@ -4836,8 +4967,19 @@
|
||||
# Add new state for this block to the store
|
||||
store.block_states[block_root] = state
|
||||
|
||||
record_block_timeliness(store, block_root)
|
||||
update_proposer_boost_root(store, block_root)
|
||||
# Add block timeliness to the store
|
||||
seconds_since_genesis = store.time - store.genesis_time
|
||||
time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS
|
||||
epoch = get_current_store_epoch(store)
|
||||
attestation_threshold_ms = get_attestation_due_ms(epoch)
|
||||
is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms
|
||||
is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval
|
||||
store.block_timeliness[hash_tree_root(block)] = is_timely
|
||||
|
||||
# Add proposer score boost if the block is timely and not conflicting with an existing block
|
||||
is_first_block = store.proposer_boost_root == Root()
|
||||
if is_timely and is_first_block:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
|
||||
# Update checkpoints in store if necessary
|
||||
update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
|
||||
@@ -4932,7 +5074,7 @@
|
||||
- name: prepare_execution_payload#capella
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="prepare_execution_payload" fork="capella" hash="c258893e">
|
||||
<spec fn="prepare_execution_payload" fork="capella" hash="28db1590">
|
||||
def prepare_execution_payload(
|
||||
state: BeaconState,
|
||||
safe_block_hash: Hash32,
|
||||
@@ -4945,15 +5087,12 @@
|
||||
parent_hash = state.latest_execution_payload_header.block_hash
|
||||
|
||||
# Set the forkchoice head and initiate the payload build process
|
||||
# [New in Capella]
|
||||
withdrawals, _ = get_expected_withdrawals(state)
|
||||
|
||||
payload_attributes = PayloadAttributes(
|
||||
timestamp=compute_time_at_slot(state, state.slot),
|
||||
prev_randao=get_randao_mix(state, get_current_epoch(state)),
|
||||
suggested_fee_recipient=suggested_fee_recipient,
|
||||
# [New in Capella]
|
||||
withdrawals=withdrawals,
|
||||
withdrawals=get_expected_withdrawals(state),
|
||||
)
|
||||
return execution_engine.notify_forkchoice_updated(
|
||||
head_block_hash=parent_hash,
|
||||
@@ -4966,7 +5105,7 @@
|
||||
- name: prepare_execution_payload#deneb
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="prepare_execution_payload" fork="deneb" hash="59f61f3a">
|
||||
<spec fn="prepare_execution_payload" fork="deneb" hash="f3387ec6">
|
||||
def prepare_execution_payload(
|
||||
state: BeaconState,
|
||||
safe_block_hash: Hash32,
|
||||
@@ -4978,13 +5117,11 @@
|
||||
parent_hash = state.latest_execution_payload_header.block_hash
|
||||
|
||||
# Set the forkchoice head and initiate the payload build process
|
||||
withdrawals, _ = get_expected_withdrawals(state)
|
||||
|
||||
payload_attributes = PayloadAttributes(
|
||||
timestamp=compute_time_at_slot(state, state.slot),
|
||||
prev_randao=get_randao_mix(state, get_current_epoch(state)),
|
||||
suggested_fee_recipient=suggested_fee_recipient,
|
||||
withdrawals=withdrawals,
|
||||
withdrawals=get_expected_withdrawals(state),
|
||||
# [New in Deneb:EIP4788]
|
||||
parent_beacon_block_root=hash_tree_root(state.latest_block_header),
|
||||
)
|
||||
@@ -4999,7 +5136,7 @@
|
||||
- name: prepare_execution_payload#electra
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="prepare_execution_payload" fork="electra" hash="5414b883">
|
||||
<spec fn="prepare_execution_payload" fork="electra" hash="567b3739">
|
||||
def prepare_execution_payload(
|
||||
state: BeaconState,
|
||||
safe_block_hash: Hash32,
|
||||
@@ -5012,7 +5149,7 @@
|
||||
|
||||
# [Modified in EIP7251]
|
||||
# Set the forkchoice head and initiate the payload build process
|
||||
withdrawals, _, _ = get_expected_withdrawals(state)
|
||||
withdrawals, _ = get_expected_withdrawals(state)
|
||||
|
||||
payload_attributes = PayloadAttributes(
|
||||
timestamp=compute_time_at_slot(state, state.slot),
|
||||
@@ -5034,7 +5171,7 @@
|
||||
- file: beacon-chain/core/blocks/attestation.go
|
||||
search: func ProcessAttestationNoVerifySignature(
|
||||
spec: |
|
||||
<spec fn="process_attestation" fork="phase0" hash="d8e86aa9">
|
||||
<spec fn="process_attestation" fork="phase0" hash="6ac78cd0">
|
||||
def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
data = attestation.data
|
||||
assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state))
|
||||
@@ -5046,8 +5183,8 @@
|
||||
assert len(attestation.aggregation_bits) == len(committee)
|
||||
|
||||
pending_attestation = PendingAttestation(
|
||||
aggregation_bits=attestation.aggregation_bits,
|
||||
data=data,
|
||||
aggregation_bits=attestation.aggregation_bits,
|
||||
inclusion_delay=state.slot - data.slot,
|
||||
proposer_index=get_beacon_proposer_index(state),
|
||||
)
|
||||
@@ -7071,18 +7208,31 @@
|
||||
- file: beacon-chain/core/blocks/withdrawals.go
|
||||
search: func ProcessWithdrawals(
|
||||
spec: |
|
||||
<spec fn="process_withdrawals" fork="capella" hash="901f9fc4">
|
||||
<spec fn="process_withdrawals" fork="capella" hash="ed6a9c5a">
|
||||
def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
# Get expected withdrawals
|
||||
withdrawals, processed_validators_sweep_count = get_expected_withdrawals(state)
|
||||
assert payload.withdrawals == withdrawals
|
||||
expected_withdrawals = get_expected_withdrawals(state)
|
||||
assert payload.withdrawals == expected_withdrawals
|
||||
|
||||
# Apply expected withdrawals
|
||||
apply_withdrawals(state, withdrawals)
|
||||
for withdrawal in expected_withdrawals:
|
||||
decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
|
||||
|
||||
# Update withdrawals fields in the state
|
||||
update_next_withdrawal_index(state, withdrawals)
|
||||
update_next_withdrawal_validator_index(state, processed_validators_sweep_count)
|
||||
# Update the next withdrawal index if this block contained withdrawals
|
||||
if len(expected_withdrawals) != 0:
|
||||
latest_withdrawal = expected_withdrawals[-1]
|
||||
state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
|
||||
|
||||
# Update the next validator index to start the next withdrawal sweep
|
||||
if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
# Next sweep starts after the latest withdrawal's validator index
|
||||
next_validator_index = ValidatorIndex(
|
||||
(expected_withdrawals[-1].validator_index + 1) % len(state.validators)
|
||||
)
|
||||
state.next_withdrawal_validator_index = next_validator_index
|
||||
else:
|
||||
# Advance sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
next_validator_index = ValidatorIndex(next_index % len(state.validators))
|
||||
state.next_withdrawal_validator_index = next_validator_index
|
||||
</spec>
|
||||
|
||||
- name: process_withdrawals#electra
|
||||
@@ -7090,23 +7240,39 @@
|
||||
- file: beacon-chain/core/blocks/withdrawals.go
|
||||
search: func ProcessWithdrawals(
|
||||
spec: |
|
||||
<spec fn="process_withdrawals" fork="electra" hash="67870972">
|
||||
<spec fn="process_withdrawals" fork="electra" hash="dd99a91f">
|
||||
def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
# [Modified in Electra:EIP7251]
|
||||
# Get expected withdrawals
|
||||
withdrawals, processed_partial_withdrawals_count, processed_validators_sweep_count = (
|
||||
get_expected_withdrawals(state)
|
||||
)
|
||||
assert payload.withdrawals == withdrawals
|
||||
expected_withdrawals, processed_partial_withdrawals_count = get_expected_withdrawals(state)
|
||||
|
||||
# Apply expected withdrawals
|
||||
apply_withdrawals(state, withdrawals)
|
||||
assert payload.withdrawals == expected_withdrawals
|
||||
|
||||
for withdrawal in expected_withdrawals:
|
||||
decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
|
||||
|
||||
# Update withdrawals fields in the state
|
||||
update_next_withdrawal_index(state, withdrawals)
|
||||
# [New in Electra:EIP7251]
|
||||
update_pending_partial_withdrawals(state, processed_partial_withdrawals_count)
|
||||
update_next_withdrawal_validator_index(state, processed_validators_sweep_count)
|
||||
# Update pending partial withdrawals
|
||||
state.pending_partial_withdrawals = state.pending_partial_withdrawals[
|
||||
processed_partial_withdrawals_count:
|
||||
]
|
||||
|
||||
# Update the next withdrawal index if this block contained withdrawals
|
||||
if len(expected_withdrawals) != 0:
|
||||
latest_withdrawal = expected_withdrawals[-1]
|
||||
state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
|
||||
|
||||
# Update the next validator index to start the next withdrawal sweep
|
||||
if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
# Next sweep starts after the latest withdrawal's validator index
|
||||
next_validator_index = ValidatorIndex(
|
||||
(expected_withdrawals[-1].validator_index + 1) % len(state.validators)
|
||||
)
|
||||
state.next_withdrawal_validator_index = next_validator_index
|
||||
else:
|
||||
# Advance sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
next_validator_index = ValidatorIndex(next_index % len(state.validators))
|
||||
state.next_withdrawal_validator_index = next_validator_index
|
||||
</spec>
|
||||
|
||||
- name: queue_excess_active_balance
|
||||
@@ -7137,7 +7303,7 @@
|
||||
- name: recover_matrix
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="recover_matrix" fork="fulu" hash="3db21f50">
|
||||
<spec fn="recover_matrix" fork="fulu" hash="9b01f005">
|
||||
def recover_matrix(
|
||||
partial_matrix: Sequence[MatrixEntry], blob_count: uint64
|
||||
) -> Sequence[MatrixEntry]:
|
||||
@@ -7157,8 +7323,8 @@
|
||||
MatrixEntry(
|
||||
cell=cell,
|
||||
kzg_proof=proof,
|
||||
column_index=cell_index,
|
||||
row_index=blob_index,
|
||||
column_index=cell_index,
|
||||
)
|
||||
)
|
||||
return matrix
|
||||
@@ -7207,7 +7373,7 @@
|
||||
- file: beacon-chain/forkchoice/ro.go
|
||||
search: func (ro *ROForkChoice) ShouldOverrideFCU(
|
||||
spec: |
|
||||
<spec fn="should_override_forkchoice_update" fork="bellatrix" hash="c055d92a">
|
||||
<spec fn="should_override_forkchoice_update" fork="bellatrix" hash="9a8043af">
|
||||
def should_override_forkchoice_update(store: Store, head_root: Root) -> bool:
|
||||
head_block = store.blocks[head_root]
|
||||
parent_root = head_block.parent_root
|
||||
@@ -7248,7 +7414,7 @@
|
||||
# `store.time` early, or by counting queued attestations during the head block's slot.
|
||||
if current_slot > head_block.slot:
|
||||
head_weak = is_head_weak(store, head_root)
|
||||
parent_strong = is_parent_strong(store, head_root)
|
||||
parent_strong = is_parent_strong(store, parent_root)
|
||||
else:
|
||||
head_weak = True
|
||||
parent_strong = True
|
||||
|
||||
@@ -278,6 +278,7 @@ go_test(
|
||||
"//testing/spectest/shared/fulu/rewards:go_default_library",
|
||||
"//testing/spectest/shared/fulu/sanity:go_default_library",
|
||||
"//testing/spectest/shared/fulu/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/gloas/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/phase0/epoch_processing:go_default_library",
|
||||
"//testing/spectest/shared/phase0/finality:go_default_library",
|
||||
"//testing/spectest/shared/phase0/operations:go_default_library",
|
||||
|
||||
@@ -2,9 +2,10 @@ package mainnet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/ssz_static"
|
||||
)
|
||||
|
||||
func TestMainnet_Gloas_SSZStatic(t *testing.T) {
|
||||
t.Skip("Gloas is not implemented")
|
||||
// ssz_static.RunSSZStaticTests(t, "mainnet")
|
||||
ssz_static.RunSSZStaticTests(t, "mainnet")
|
||||
}
|
||||
|
||||
@@ -288,6 +288,7 @@ go_test(
|
||||
"//testing/spectest/shared/fulu/rewards:go_default_library",
|
||||
"//testing/spectest/shared/fulu/sanity:go_default_library",
|
||||
"//testing/spectest/shared/fulu/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/gloas/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/phase0/epoch_processing:go_default_library",
|
||||
"//testing/spectest/shared/phase0/finality:go_default_library",
|
||||
"//testing/spectest/shared/phase0/operations:go_default_library",
|
||||
|
||||
@@ -2,9 +2,10 @@ package minimal
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/ssz_static"
|
||||
)
|
||||
|
||||
func TestMinimal_Gloas_SSZStatic(t *testing.T) {
|
||||
t.Skip("Gloas is not implemented")
|
||||
// ssz_static.RunSSZStaticTests(t, "minimal")
|
||||
ssz_static.RunSSZStaticTests(t, "minimal")
|
||||
}
|
||||
|
||||
@@ -62,17 +62,8 @@ func runTest(t *testing.T, config string, fork int, basePath string) { // nolint
|
||||
if len(testFolders) == 0 {
|
||||
t.Fatalf("No test folders found for %s/%s/%s", config, version.String(fork), folderPath)
|
||||
}
|
||||
var skipTests = map[string]bool{
|
||||
// Skipping because of #4807 backporting issues
|
||||
"voting_source_beyond_two_epoch": true,
|
||||
"justified_update_always_if_better": true,
|
||||
"justified_update_not_realized_finality": true,
|
||||
}
|
||||
|
||||
for _, folder := range testFolders {
|
||||
if skipTests[folder.Name()] {
|
||||
t.Logf("Skipping test %s due to known issues", folder.Name())
|
||||
continue
|
||||
}
|
||||
t.Run(folder.Name(), func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
preStepsFile, err := util.BazelFileBytes(testsFolderPath, folder.Name(), "steps.yaml")
|
||||
|
||||
Reference in New Issue
Block a user