Compare commits

..

3 Commits

Author SHA1 Message Date
terence
8011396a48 James feedback 2 2026-02-04 17:17:15 -08:00
terence
d9dae87c74 James feedback 1 2026-02-04 08:10:03 -08:00
terence tsao
f75c3082d5 gloas: implement modified process withdrawals 2026-02-01 19:41:59 -08:00
190 changed files with 3247 additions and 7545 deletions

View File

@@ -12,11 +12,11 @@ jobs:
- name: Check version consistency
run: |
WORKSPACE_VERSION=$(grep 'consensus_spec_version = ' WORKSPACE | sed 's/.*"\(.*\)"/\1/')
ETHSPECIFY_VERSION=$(grep '^version:' .ethspecify.yml | sed 's/version: //')
ETHSPECIFY_VERSION=$(grep '^version:' specrefs/.ethspecify.yml | sed 's/version: //')
if [ "$WORKSPACE_VERSION" != "$ETHSPECIFY_VERSION" ]; then
echo "Version mismatch between WORKSPACE and ethspecify"
echo " WORKSPACE: $WORKSPACE_VERSION"
echo " .ethspecify.yml: $ETHSPECIFY_VERSION"
echo " specrefs/.ethspecify.yml: $ETHSPECIFY_VERSION"
exit 1
else
echo "Versions match: $WORKSPACE_VERSION"
@@ -26,7 +26,7 @@ jobs:
run: python3 -mpip install ethspecify
- name: Update spec references
run: ethspecify
run: ethspecify process --path=specrefs
- name: Check for differences
run: |
@@ -40,4 +40,4 @@ jobs:
fi
- name: Check spec references
run: ethspecify check
run: ethspecify check --path=specrefs

View File

@@ -2,7 +2,7 @@ name: Go
on:
push:
branches: [ master, develop ]
branches: [ master ]
pull_request:
branches: [ '*' ]
merge_group:

View File

@@ -33,8 +33,9 @@ formatters:
generated: lax
paths:
- validator/web/site_data.go
- .*_test.go
- proto
- tools/analyzers
- third_party$
- builtin$
- examples$
- examples$

View File

@@ -1,19 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"fallback.go",
"log.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/api/fallback",
visibility = ["//visibility:public"],
deps = ["@com_github_sirupsen_logrus//:go_default_library"],
)
go_test(
name = "go_default_test",
srcs = ["fallback_test.go"],
embed = [":go_default_library"],
deps = ["//testing/assert:go_default_library"],
)

View File

@@ -1,66 +0,0 @@
package fallback
import (
"context"
"github.com/sirupsen/logrus"
)
// HostProvider is the subset of connection-provider methods that EnsureReady
// needs. Both grpc.GrpcConnectionProvider and rest.RestConnectionProvider
// satisfy this interface.
type HostProvider interface {
Hosts() []string
CurrentHost() string
SwitchHost(index int) error
}
// ReadyChecker can report whether the current endpoint is ready.
// iface.NodeClient satisfies this implicitly.
type ReadyChecker interface {
IsReady(ctx context.Context) bool
}
// EnsureReady iterates through the configured hosts and returns true as soon as
// one responds as ready. It starts from the provider's current host and wraps
// around using modular arithmetic, performing failover when a host is not ready.
func EnsureReady(ctx context.Context, provider HostProvider, checker ReadyChecker) bool {
hosts := provider.Hosts()
numHosts := len(hosts)
startingHost := provider.CurrentHost()
var attemptedHosts []string
// Find current index
currentIdx := 0
for i, h := range hosts {
if h == startingHost {
currentIdx = i
break
}
}
for i := range numHosts {
if checker.IsReady(ctx) {
if len(attemptedHosts) > 0 {
log.WithFields(logrus.Fields{
"previous": startingHost,
"current": provider.CurrentHost(),
"tried": attemptedHosts,
}).Info("Switched to responsive beacon node")
}
return true
}
attemptedHosts = append(attemptedHosts, provider.CurrentHost())
// Try next host if not the last iteration
if i < numHosts-1 {
nextIdx := (currentIdx + i + 1) % numHosts
if err := provider.SwitchHost(nextIdx); err != nil {
log.WithError(err).Error("Failed to switch host")
}
}
}
log.WithField("tried", attemptedHosts).Warn("No responsive beacon node found")
return false
}

View File

@@ -1,94 +0,0 @@
package fallback
import (
"context"
"testing"
"github.com/OffchainLabs/prysm/v7/testing/assert"
)
// mockHostProvider is a minimal HostProvider for unit tests.
type mockHostProvider struct {
hosts []string
hostIndex int
}
func (m *mockHostProvider) Hosts() []string { return m.hosts }
func (m *mockHostProvider) CurrentHost() string {
return m.hosts[m.hostIndex%len(m.hosts)]
}
func (m *mockHostProvider) SwitchHost(index int) error { m.hostIndex = index; return nil }
// mockReadyChecker records per-call IsReady results in sequence.
type mockReadyChecker struct {
results []bool
idx int
}
func (m *mockReadyChecker) IsReady(_ context.Context) bool {
if m.idx >= len(m.results) {
return false
}
r := m.results[m.idx]
m.idx++
return r
}
func TestEnsureReady_SingleHostReady(t *testing.T) {
provider := &mockHostProvider{hosts: []string{"http://host1:3500"}, hostIndex: 0}
checker := &mockReadyChecker{results: []bool{true}}
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
assert.Equal(t, 0, provider.hostIndex)
}
func TestEnsureReady_SingleHostNotReady(t *testing.T) {
provider := &mockHostProvider{hosts: []string{"http://host1:3500"}, hostIndex: 0}
checker := &mockReadyChecker{results: []bool{false}}
assert.Equal(t, false, EnsureReady(t.Context(), provider, checker))
}
func TestEnsureReady_SingleHostError(t *testing.T) {
provider := &mockHostProvider{hosts: []string{"http://host1:3500"}, hostIndex: 0}
checker := &mockReadyChecker{results: []bool{false}}
assert.Equal(t, false, EnsureReady(t.Context(), provider, checker))
}
func TestEnsureReady_MultipleHostsFirstReady(t *testing.T) {
provider := &mockHostProvider{
hosts: []string{"http://host1:3500", "http://host2:3500"},
hostIndex: 0,
}
checker := &mockReadyChecker{results: []bool{true}}
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
assert.Equal(t, 0, provider.hostIndex)
}
func TestEnsureReady_MultipleHostsFailoverToSecond(t *testing.T) {
provider := &mockHostProvider{
hosts: []string{"http://host1:3500", "http://host2:3500"},
hostIndex: 0,
}
checker := &mockReadyChecker{results: []bool{false, true}}
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
assert.Equal(t, 1, provider.hostIndex)
}
func TestEnsureReady_MultipleHostsNoneReady(t *testing.T) {
provider := &mockHostProvider{
hosts: []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"},
hostIndex: 0,
}
checker := &mockReadyChecker{results: []bool{false, false, false}}
assert.Equal(t, false, EnsureReady(t.Context(), provider, checker))
}
func TestEnsureReady_WrapAroundFromNonZeroIndex(t *testing.T) {
provider := &mockHostProvider{
hosts: []string{"http://host0:3500", "http://host1:3500", "http://host2:3500"},
hostIndex: 1,
}
// host1 (start) fails, host2 fails, host0 succeeds
checker := &mockReadyChecker{results: []bool{false, false, true}}
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
assert.Equal(t, 0, provider.hostIndex)
}

View File

@@ -1,9 +0,0 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package fallback
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "api/fallback")

View File

@@ -3,16 +3,13 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"grpc_connection_provider.go",
"grpcutils.go",
"log.go",
"mock_grpc_provider.go",
"parameters.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/api/grpc",
visibility = ["//visibility:public"],
deps = [
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
@@ -21,17 +18,12 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"grpc_connection_provider_test.go",
"grpcutils_test.go",
],
srcs = ["grpcutils_test.go"],
embed = [":go_default_library"],
deps = [
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//credentials/insecure:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
],
)

View File

@@ -1,186 +0,0 @@
package grpc
import (
"context"
"strings"
"sync"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc"
)
// GrpcConnectionProvider manages gRPC connections for failover support.
// It allows switching between different beacon node endpoints when the current one becomes unavailable.
// Only one connection is maintained at a time - when switching hosts, the old connection is closed.
type GrpcConnectionProvider interface {
// CurrentConn returns the currently active gRPC connection.
// The connection is created lazily on first call.
// Returns nil if the provider has been closed.
CurrentConn() *grpc.ClientConn
// CurrentHost returns the address of the currently active endpoint.
CurrentHost() string
// Hosts returns all configured endpoint addresses.
Hosts() []string
// SwitchHost switches to the endpoint at the given index.
// The new connection is created lazily on next CurrentConn() call.
SwitchHost(index int) error
// ConnectionCounter returns a monotonically increasing counter that increments
// each time SwitchHost changes the active endpoint. This allows consumers to
// detect connection changes even when the host string returns to a previous value
// (e.g., host0 → host1 → host0).
ConnectionCounter() uint64
// Close closes the current connection.
Close()
}
type grpcConnectionProvider struct {
// Immutable after construction - no lock needed for reads
endpoints []string
ctx context.Context
dialOpts []grpc.DialOption
// Current connection state (protected by mutex)
currentIndex uint64
conn *grpc.ClientConn
connCounter uint64
mu sync.Mutex
closed bool
}
// NewGrpcConnectionProvider creates a new connection provider that manages gRPC connections.
// The endpoint parameter can be a comma-separated list of addresses (e.g., "host1:4000,host2:4000").
// Only one connection is maintained at a time, created lazily on first use.
func NewGrpcConnectionProvider(
ctx context.Context,
endpoint string,
dialOpts []grpc.DialOption,
) (GrpcConnectionProvider, error) {
endpoints := parseEndpoints(endpoint)
if len(endpoints) == 0 {
return nil, errors.New("no gRPC endpoints provided")
}
log.WithFields(logrus.Fields{
"endpoints": endpoints,
"count": len(endpoints),
}).Info("Initialized gRPC connection provider")
return &grpcConnectionProvider{
endpoints: endpoints,
ctx: ctx,
dialOpts: dialOpts,
}, nil
}
// parseEndpoints splits a comma-separated endpoint string into individual endpoints.
func parseEndpoints(endpoint string) []string {
if endpoint == "" {
return nil
}
endpoints := make([]string, 0, 1)
for p := range strings.SplitSeq(endpoint, ",") {
if p = strings.TrimSpace(p); p != "" {
endpoints = append(endpoints, p)
}
}
return endpoints
}
func (p *grpcConnectionProvider) CurrentConn() *grpc.ClientConn {
p.mu.Lock()
defer p.mu.Unlock()
if p.closed {
return nil
}
// Return existing connection if available
if p.conn != nil {
return p.conn
}
// Create connection lazily
ep := p.endpoints[p.currentIndex]
conn, err := grpc.DialContext(p.ctx, ep, p.dialOpts...)
if err != nil {
log.WithError(err).WithField("endpoint", ep).Error("Failed to create gRPC connection")
return nil
}
p.conn = conn
log.WithField("endpoint", ep).Debug("Created gRPC connection")
return conn
}
func (p *grpcConnectionProvider) CurrentHost() string {
p.mu.Lock()
defer p.mu.Unlock()
return p.endpoints[p.currentIndex]
}
func (p *grpcConnectionProvider) Hosts() []string {
// Return a copy to maintain immutability
hosts := make([]string, len(p.endpoints))
copy(hosts, p.endpoints)
return hosts
}
func (p *grpcConnectionProvider) SwitchHost(index int) error {
if index < 0 || index >= len(p.endpoints) {
return errors.Errorf("invalid host index %d, must be between 0 and %d", index, len(p.endpoints)-1)
}
p.mu.Lock()
defer p.mu.Unlock()
if uint64(index) == p.currentIndex {
return nil // Already on this host
}
oldHost := p.endpoints[p.currentIndex]
oldConn := p.conn
p.conn = nil // Clear immediately - new connection created lazily
p.currentIndex = uint64(index)
p.connCounter++
// Close old connection asynchronously to avoid blocking the caller
if oldConn != nil {
go func() {
if err := oldConn.Close(); err != nil {
log.WithError(err).WithField("endpoint", oldHost).Debug("Failed to close previous connection")
}
}()
}
log.WithFields(logrus.Fields{
"previousHost": oldHost,
"newHost": p.endpoints[index],
}).Debug("Switched gRPC endpoint")
return nil
}
func (p *grpcConnectionProvider) ConnectionCounter() uint64 {
p.mu.Lock()
defer p.mu.Unlock()
return p.connCounter
}
func (p *grpcConnectionProvider) Close() {
p.mu.Lock()
defer p.mu.Unlock()
if p.closed {
return
}
p.closed = true
if p.conn != nil {
if err := p.conn.Close(); err != nil {
log.WithError(err).WithField("endpoint", p.endpoints[p.currentIndex]).Debug("Failed to close gRPC connection")
}
p.conn = nil
}
}

View File

@@ -1,207 +0,0 @@
package grpc
import (
"context"
"net"
"reflect"
"strings"
"testing"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
func TestParseEndpoints(t *testing.T) {
tests := []struct {
name string
input string
expected []string
}{
{"single endpoint", "localhost:4000", []string{"localhost:4000"}},
{"multiple endpoints", "host1:4000,host2:4000,host3:4000", []string{"host1:4000", "host2:4000", "host3:4000"}},
{"endpoints with spaces", "host1:4000, host2:4000 , host3:4000", []string{"host1:4000", "host2:4000", "host3:4000"}},
{"empty string", "", nil},
{"only commas", ",,,", []string{}},
{"trailing comma", "host1:4000,host2:4000,", []string{"host1:4000", "host2:4000"}},
{"leading comma", ",host1:4000,host2:4000", []string{"host1:4000", "host2:4000"}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := parseEndpoints(tt.input)
if !reflect.DeepEqual(tt.expected, got) {
t.Errorf("parseEndpoints(%q) = %v, want %v", tt.input, got, tt.expected)
}
})
}
}
func TestNewGrpcConnectionProvider_Errors(t *testing.T) {
t.Run("no endpoints", func(t *testing.T) {
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
_, err := NewGrpcConnectionProvider(context.Background(), "", dialOpts)
require.ErrorContains(t, "no gRPC endpoints provided", err)
})
}
func TestGrpcConnectionProvider_LazyConnection(t *testing.T) {
// Start only one server but configure provider with two endpoints
lis, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
server := grpc.NewServer()
go func() { _ = server.Serve(lis) }()
defer server.Stop()
validAddr := lis.Addr().String()
invalidAddr := "127.0.0.1:1" // Port 1 is unlikely to be listening
// Provider should succeed even though second endpoint is invalid (lazy connections)
endpoint := validAddr + "," + invalidAddr
ctx := context.Background()
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
require.NoError(t, err, "Provider creation should succeed with lazy connections")
defer func() { provider.Close() }()
// First endpoint should work
conn := provider.CurrentConn()
assert.NotNil(t, conn, "First connection should be created lazily")
}
func TestGrpcConnectionProvider_SingleConnectionModel(t *testing.T) {
// Create provider with 3 endpoints
var addrs []string
var servers []*grpc.Server
for range 3 {
lis, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
server := grpc.NewServer()
go func() { _ = server.Serve(lis) }()
addrs = append(addrs, lis.Addr().String())
servers = append(servers, server)
}
defer func() {
for _, s := range servers {
s.Stop()
}
}()
endpoint := strings.Join(addrs, ",")
ctx := context.Background()
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
require.NoError(t, err)
defer func() { provider.Close() }()
// Access the internal state to verify single connection behavior
p := provider.(*grpcConnectionProvider)
// Initially no connection
p.mu.Lock()
assert.Equal(t, (*grpc.ClientConn)(nil), p.conn, "Connection should be nil before access")
p.mu.Unlock()
// Access connection - should create one
conn0 := provider.CurrentConn()
assert.NotNil(t, conn0)
p.mu.Lock()
assert.NotNil(t, p.conn, "Connection should be created after CurrentConn()")
firstConn := p.conn
p.mu.Unlock()
// Call CurrentConn again - should return same connection
conn0Again := provider.CurrentConn()
assert.Equal(t, conn0, conn0Again, "Should return same connection")
// Switch to different host - old connection should be closed, new one created lazily
require.NoError(t, provider.SwitchHost(1))
p.mu.Lock()
assert.Equal(t, (*grpc.ClientConn)(nil), p.conn, "Connection should be nil after SwitchHost (lazy)")
p.mu.Unlock()
// Get new connection
conn1 := provider.CurrentConn()
assert.NotNil(t, conn1)
assert.NotEqual(t, firstConn, conn1, "Should be a different connection after switching hosts")
}
// testProvider creates a provider with n test servers and returns cleanup function.
func testProvider(t *testing.T, n int) (GrpcConnectionProvider, []string, func()) {
var addrs []string
var cleanups []func()
for range n {
lis, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
server := grpc.NewServer()
go func() { _ = server.Serve(lis) }()
addrs = append(addrs, lis.Addr().String())
cleanups = append(cleanups, server.Stop)
}
endpoint := strings.Join(addrs, ",")
ctx := context.Background()
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
require.NoError(t, err)
cleanup := func() {
provider.Close()
for _, c := range cleanups {
c()
}
}
return provider, addrs, cleanup
}
func TestGrpcConnectionProvider(t *testing.T) {
provider, addrs, cleanup := testProvider(t, 3)
defer cleanup()
t.Run("initial state", func(t *testing.T) {
assert.Equal(t, 3, len(provider.Hosts()))
assert.Equal(t, addrs[0], provider.CurrentHost())
assert.NotNil(t, provider.CurrentConn())
})
t.Run("SwitchHost", func(t *testing.T) {
require.NoError(t, provider.SwitchHost(1))
assert.Equal(t, addrs[1], provider.CurrentHost())
assert.NotNil(t, provider.CurrentConn()) // New connection created lazily
require.NoError(t, provider.SwitchHost(0))
assert.Equal(t, addrs[0], provider.CurrentHost())
require.ErrorContains(t, "invalid host index", provider.SwitchHost(-1))
require.ErrorContains(t, "invalid host index", provider.SwitchHost(3))
})
t.Run("SwitchHost circular", func(t *testing.T) {
// Test round-robin style switching using SwitchHost with manual index
indices := []int{1, 2, 0, 1} // Simulate circular switching
for i, idx := range indices {
require.NoError(t, provider.SwitchHost(idx))
assert.Equal(t, addrs[idx], provider.CurrentHost(), "iteration %d", i)
}
})
t.Run("Hosts returns copy", func(t *testing.T) {
hosts := provider.Hosts()
original := hosts[0]
hosts[0] = "modified"
assert.Equal(t, original, provider.Hosts()[0])
})
}
func TestGrpcConnectionProvider_Close(t *testing.T) {
provider, _, cleanup := testProvider(t, 1)
defer cleanup()
assert.NotNil(t, provider.CurrentConn())
provider.Close()
assert.Equal(t, (*grpc.ClientConn)(nil), provider.CurrentConn())
provider.Close() // Double close is safe
}

View File

@@ -1,27 +0,0 @@
package grpc
import "google.golang.org/grpc"
// MockGrpcProvider implements GrpcConnectionProvider for testing.
type MockGrpcProvider struct {
MockConn *grpc.ClientConn
MockHosts []string
CurrentIndex int
ConnCounter uint64
}
func (m *MockGrpcProvider) CurrentConn() *grpc.ClientConn { return m.MockConn }
func (m *MockGrpcProvider) CurrentHost() string {
if len(m.MockHosts) > 0 {
return m.MockHosts[m.CurrentIndex]
}
return ""
}
func (m *MockGrpcProvider) Hosts() []string { return m.MockHosts }
func (m *MockGrpcProvider) SwitchHost(idx int) error {
m.CurrentIndex = idx
m.ConnCounter++
return nil
}
func (m *MockGrpcProvider) ConnectionCounter() uint64 { return m.ConnCounter }
func (m *MockGrpcProvider) Close() {}

View File

@@ -1,34 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"log.go",
"mock_rest_provider.go",
"rest_connection_provider.go",
"rest_handler.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/api/rest",
visibility = ["//visibility:public"],
deps = [
"//api:go_default_library",
"//api/apiutil:go_default_library",
"//api/client:go_default_library",
"//config/params:go_default_library",
"//network/httputil:go_default_library",
"//runtime/version:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opentelemetry_go_contrib_instrumentation_net_http_otelhttp//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["rest_connection_provider_test.go"],
embed = [":go_default_library"],
deps = [
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
],
)

View File

@@ -1,9 +0,0 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package rest
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "api/rest")

View File

@@ -1,46 +0,0 @@
package rest
import (
"bytes"
"context"
"net/http"
)
// MockRestProvider implements RestConnectionProvider for testing.
type MockRestProvider struct {
MockClient *http.Client
MockHandler Handler
MockHosts []string
HostIndex int
}
func (m *MockRestProvider) HttpClient() *http.Client { return m.MockClient }
func (m *MockRestProvider) Handler() Handler { return m.MockHandler }
func (m *MockRestProvider) CurrentHost() string {
if len(m.MockHosts) > 0 {
return m.MockHosts[m.HostIndex%len(m.MockHosts)]
}
return ""
}
func (m *MockRestProvider) Hosts() []string { return m.MockHosts }
func (m *MockRestProvider) SwitchHost(index int) error { m.HostIndex = index; return nil }
// MockHandler implements Handler for testing.
type MockHandler struct {
MockHost string
}
func (m *MockHandler) Get(_ context.Context, _ string, _ any) error { return nil }
func (m *MockHandler) GetStatusCode(_ context.Context, _ string) (int, error) {
return http.StatusOK, nil
}
func (m *MockHandler) GetSSZ(_ context.Context, _ string) ([]byte, http.Header, error) {
return nil, nil, nil
}
func (m *MockHandler) Post(_ context.Context, _ string, _ map[string]string, _ *bytes.Buffer, _ any) error {
return nil
}
func (m *MockHandler) PostSSZ(_ context.Context, _ string, _ map[string]string, _ *bytes.Buffer) ([]byte, http.Header, error) {
return nil, nil, nil
}
func (m *MockHandler) Host() string { return m.MockHost }

View File

@@ -1,158 +0,0 @@
package rest
import (
"net/http"
"strings"
"sync/atomic"
"time"
"github.com/OffchainLabs/prysm/v7/api/client"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
)
// RestConnectionProvider manages HTTP client configuration for REST API with failover support.
// It allows switching between different beacon node REST endpoints when the current one becomes unavailable.
type RestConnectionProvider interface {
// HttpClient returns the configured HTTP client with headers, timeout, and optional tracing.
HttpClient() *http.Client
// Handler returns the REST handler for making API requests.
Handler() Handler
// CurrentHost returns the current REST API endpoint URL.
CurrentHost() string
// Hosts returns all configured REST API endpoint URLs.
Hosts() []string
// SwitchHost switches to the endpoint at the given index.
SwitchHost(index int) error
}
// RestConnectionProviderOption is a functional option for configuring the REST connection provider.
type RestConnectionProviderOption func(*restConnectionProvider)
// WithHttpTimeout sets the HTTP client timeout.
func WithHttpTimeout(timeout time.Duration) RestConnectionProviderOption {
return func(p *restConnectionProvider) {
p.timeout = timeout
}
}
// WithHttpHeaders sets custom HTTP headers to include in all requests.
func WithHttpHeaders(headers map[string][]string) RestConnectionProviderOption {
return func(p *restConnectionProvider) {
p.headers = headers
}
}
// WithTracing enables OpenTelemetry tracing for HTTP requests.
func WithTracing() RestConnectionProviderOption {
return func(p *restConnectionProvider) {
p.enableTracing = true
}
}
type restConnectionProvider struct {
endpoints []string
httpClient *http.Client
restHandler *handler
currentIndex atomic.Uint64
timeout time.Duration
headers map[string][]string
enableTracing bool
}
// NewRestConnectionProvider creates a new REST connection provider that manages HTTP client configuration.
// The endpoint parameter can be a comma-separated list of URLs (e.g., "http://host1:3500,http://host2:3500").
func NewRestConnectionProvider(endpoint string, opts ...RestConnectionProviderOption) (RestConnectionProvider, error) {
endpoints := parseEndpoints(endpoint)
if len(endpoints) == 0 {
return nil, errors.New("no REST API endpoints provided")
}
p := &restConnectionProvider{
endpoints: endpoints,
}
for _, opt := range opts {
opt(p)
}
// Build the HTTP transport chain
var transport http.RoundTripper = http.DefaultTransport
// Add custom headers if configured
if len(p.headers) > 0 {
transport = client.NewCustomHeadersTransport(transport, p.headers)
}
// Add tracing if enabled
if p.enableTracing {
transport = otelhttp.NewTransport(transport)
}
p.httpClient = &http.Client{
Timeout: p.timeout,
Transport: transport,
}
// Create the REST handler with the HTTP client and initial host
p.restHandler = newHandler(*p.httpClient, endpoints[0])
log.WithFields(logrus.Fields{
"endpoints": endpoints,
"count": len(endpoints),
}).Info("Initialized REST connection provider")
return p, nil
}
// parseEndpoints splits a comma-separated endpoint string into individual endpoints.
func parseEndpoints(endpoint string) []string {
if endpoint == "" {
return nil
}
endpoints := make([]string, 0, 1)
for p := range strings.SplitSeq(endpoint, ",") {
if p = strings.TrimSpace(p); p != "" {
endpoints = append(endpoints, p)
}
}
return endpoints
}
func (p *restConnectionProvider) HttpClient() *http.Client {
return p.httpClient
}
func (p *restConnectionProvider) Handler() Handler {
return p.restHandler
}
func (p *restConnectionProvider) CurrentHost() string {
return p.endpoints[p.currentIndex.Load()]
}
func (p *restConnectionProvider) Hosts() []string {
// Return a copy to maintain immutability
hosts := make([]string, len(p.endpoints))
copy(hosts, p.endpoints)
return hosts
}
func (p *restConnectionProvider) SwitchHost(index int) error {
if index < 0 || index >= len(p.endpoints) {
return errors.Errorf("invalid host index %d, must be between 0 and %d", index, len(p.endpoints)-1)
}
oldIdx := p.currentIndex.Load()
p.currentIndex.Store(uint64(index))
// Update the rest handler's host
p.restHandler.SwitchHost(p.endpoints[index])
log.WithFields(logrus.Fields{
"previousHost": p.endpoints[oldIdx],
"newHost": p.endpoints[index],
}).Debug("Switched REST endpoint")
return nil
}

View File

@@ -1,80 +0,0 @@
package rest
import (
"reflect"
"testing"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestParseEndpoints(t *testing.T) {
tests := []struct {
name string
input string
expected []string
}{
{"single endpoint", "http://localhost:3500", []string{"http://localhost:3500"}},
{"multiple endpoints", "http://host1:3500,http://host2:3500,http://host3:3500", []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"}},
{"endpoints with spaces", "http://host1:3500, http://host2:3500 , http://host3:3500", []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"}},
{"empty string", "", nil},
{"only commas", ",,,", []string{}},
{"trailing comma", "http://host1:3500,http://host2:3500,", []string{"http://host1:3500", "http://host2:3500"}},
{"leading comma", ",http://host1:3500,http://host2:3500", []string{"http://host1:3500", "http://host2:3500"}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := parseEndpoints(tt.input)
if !reflect.DeepEqual(tt.expected, got) {
t.Errorf("parseEndpoints(%q) = %v, want %v", tt.input, got, tt.expected)
}
})
}
}
func TestNewRestConnectionProvider_Errors(t *testing.T) {
t.Run("no endpoints", func(t *testing.T) {
_, err := NewRestConnectionProvider("")
require.ErrorContains(t, "no REST API endpoints provided", err)
})
}
func TestRestConnectionProvider(t *testing.T) {
provider, err := NewRestConnectionProvider("http://host1:3500,http://host2:3500,http://host3:3500")
require.NoError(t, err)
t.Run("initial state", func(t *testing.T) {
assert.Equal(t, 3, len(provider.Hosts()))
assert.Equal(t, "http://host1:3500", provider.CurrentHost())
assert.NotNil(t, provider.HttpClient())
})
t.Run("SwitchHost", func(t *testing.T) {
require.NoError(t, provider.SwitchHost(1))
assert.Equal(t, "http://host2:3500", provider.CurrentHost())
require.NoError(t, provider.SwitchHost(0))
assert.Equal(t, "http://host1:3500", provider.CurrentHost())
require.ErrorContains(t, "invalid host index", provider.SwitchHost(-1))
require.ErrorContains(t, "invalid host index", provider.SwitchHost(3))
})
t.Run("Hosts returns copy", func(t *testing.T) {
hosts := provider.Hosts()
original := hosts[0]
hosts[0] = "modified"
assert.Equal(t, original, provider.Hosts()[0])
})
}
func TestRestConnectionProvider_WithOptions(t *testing.T) {
headers := map[string][]string{"Authorization": {"Bearer token"}}
provider, err := NewRestConnectionProvider(
"http://localhost:3500",
WithHttpHeaders(headers),
WithHttpTimeout(30000000000), // 30 seconds in nanoseconds
WithTracing(),
)
require.NoError(t, err)
assert.NotNil(t, provider.HttpClient())
assert.Equal(t, "http://localhost:3500", provider.CurrentHost())
}

View File

@@ -85,7 +85,6 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//io/logs:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//monitoring/tracing/trace:go_default_library",

View File

@@ -10,7 +10,6 @@ import (
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/io/logs"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
prysmTime "github.com/OffchainLabs/prysm/v7/time"
@@ -88,45 +87,36 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesis time.Time, daWaitedTime time.Duration) error {
startTime, err := slots.StartTime(genesis, block.Slot())
if err != nil {
return errors.Wrap(err, "failed to get slot start time")
return err
}
parentRoot := block.ParentRoot()
blkRoot := fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8])
finalizedRoot := fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8])
sinceSlotStartTime := prysmTime.Now().Sub(startTime)
lessFields := logrus.Fields{
"slot": block.Slot(),
"block": blkRoot,
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": finalizedRoot,
"epoch": slots.ToEpoch(block.Slot()),
"sinceSlotStartTime": sinceSlotStartTime,
}
moreFields := logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": blkRoot,
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": finalizedRoot,
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": sinceSlotStartTime,
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"dataAvailabilityWaitedTime": daWaitedTime,
}
level := logs.PackageVerbosity("beacon-chain/blockchain")
level := log.Logger.GetLevel()
if level >= logrus.DebugLevel {
log.WithFields(moreFields).Info("Synced new block")
return nil
parentRoot := block.ParentRoot()
lf := logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"dataAvailabilityWaitedTime": daWaitedTime,
}
log.WithFields(lf).Debug("Synced new block")
} else {
log.WithFields(logrus.Fields{
"slot": block.Slot(),
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"epoch": slots.ToEpoch(block.Slot()),
}).Info("Synced new block")
}
log.WithFields(lessFields).WithField(logs.LogTargetField, logs.LogTargetUser).Info("Synced new block")
log.WithFields(moreFields).WithField(logs.LogTargetField, logs.LogTargetEphemeral).Info("Synced new block")
return nil
}

View File

@@ -7,6 +7,7 @@ go_library(
"payload_attestation.go",
"pending_payment.go",
"proposer_slashing.go",
"withdrawal.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas",
visibility = ["//visibility:public"],
@@ -38,6 +39,7 @@ go_test(
"payload_attestation_test.go",
"pending_payment_test.go",
"proposer_slashing_test.go",
"withdrawal_test.go",
],
embed = [":go_default_library"],
deps = [

View File

@@ -17,50 +17,27 @@ import (
)
// ProcessExecutionPayloadBid processes a signed execution payload bid in the Gloas fork.
// Spec v1.7.0-alpha.0 (pseudocode):
// process_execution_payload_bid(state: BeaconState, block: BeaconBlock):
//
// <spec fn="process_execution_payload_bid" fork="gloas" hash="6dc696bb">
// def process_execution_payload_bid(state: BeaconState, block: BeaconBlock) -> None:
// signed_bid = block.body.signed_execution_payload_bid
// bid = signed_bid.message
// builder_index = bid.builder_index
// amount = bid.value
//
// # For self-builds, amount must be zero regardless of withdrawal credential prefix
// if builder_index == BUILDER_INDEX_SELF_BUILD:
// assert amount == 0
// assert signed_bid.signature == bls.G2_POINT_AT_INFINITY
// else:
// # Verify that the builder is active
// assert is_active_builder(state, builder_index)
// # Verify that the builder has funds to cover the bid
// assert can_builder_cover_bid(state, builder_index, amount)
// # Verify that the bid signature is valid
// assert verify_execution_payload_bid_signature(state, signed_bid)
//
// # Verify that the bid is for the current slot
// assert bid.slot == block.slot
// # Verify that the bid is for the right parent block
// assert bid.parent_block_hash == state.latest_block_hash
// assert bid.parent_block_root == block.parent_root
// assert bid.prev_randao == get_randao_mix(state, get_current_epoch(state))
//
// # Record the pending payment if there is some payment
// if amount > 0:
// pending_payment = BuilderPendingPayment(
// weight=0,
// withdrawal=BuilderPendingWithdrawal(
// fee_recipient=bid.fee_recipient,
// amount=amount,
// builder_index=builder_index,
// ),
// )
// state.builder_pending_payments[SLOTS_PER_EPOCH + bid.slot % SLOTS_PER_EPOCH] = (
// pending_payment
// )
//
// # Cache the signed execution payload bid
// state.latest_execution_payload_bid = bid
// </spec>
// signed_bid = block.body.signed_execution_payload_bid
// bid = signed_bid.message
// builder_index = bid.builder_index
// amount = bid.value
// if builder_index == BUILDER_INDEX_SELF_BUILD:
// assert amount == 0
// assert signed_bid.signature == G2_POINT_AT_INFINITY
// else:
// assert is_active_builder(state, builder_index)
// assert can_builder_cover_bid(state, builder_index, amount)
// assert verify_execution_payload_bid_signature(state, signed_bid)
// assert bid.slot == block.slot
// assert bid.parent_block_hash == state.latest_block_hash
// assert bid.parent_block_root == block.parent_root
// assert bid.prev_randao == get_randao_mix(state, get_current_epoch(state))
// if amount > 0:
// state.builder_pending_payments[...] = BuilderPendingPayment(weight=0, withdrawal=BuilderPendingWithdrawal(fee_recipient=bid.fee_recipient, amount=amount, builder_index=builder_index))
// state.latest_execution_payload_bid = bid
func ProcessExecutionPayloadBid(st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) error {
signedBid, err := block.Body().SignedExecutionPayloadBid()
if err != nil {

View File

@@ -24,21 +24,14 @@ import (
)
// ProcessPayloadAttestations validates payload attestations in a block body.
// Spec v1.7.0-alpha.0 (pseudocode):
// process_payload_attestation(state: BeaconState, payload_attestation: PayloadAttestation):
//
// <spec fn="process_payload_attestation" fork="gloas" hash="f46bf0b0">
// def process_payload_attestation(
// state: BeaconState, payload_attestation: PayloadAttestation
// ) -> None:
// data = payload_attestation.data
//
// # Check that the attestation is for the parent beacon block
// assert data.beacon_block_root == state.latest_block_header.parent_root
// # Check that the attestation is for the previous slot
// assert data.slot + 1 == state.slot
// # Verify signature
// indexed_payload_attestation = get_indexed_payload_attestation(state, payload_attestation)
// assert is_valid_indexed_payload_attestation(state, indexed_payload_attestation)
// </spec>
// data = payload_attestation.data
// assert data.beacon_block_root == state.latest_block_header.parent_root
// assert data.slot + 1 == state.slot
// indexed = get_indexed_payload_attestation(state, data.slot, payload_attestation)
// assert is_valid_indexed_payload_attestation(state, indexed)
func ProcessPayloadAttestations(ctx context.Context, st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) error {
atts, err := body.PayloadAttestations()
if err != nil {
@@ -97,24 +90,17 @@ func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState
}
// payloadCommittee returns the payload timeliness committee for a given slot for the state.
// Spec v1.7.0-alpha.0 (pseudocode):
// get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
//
// <spec fn="get_ptc" fork="gloas" hash="ae15f761">
// def get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
// """
// Get the payload timeliness committee for the given ``slot``.
// """
// epoch = compute_epoch_at_slot(slot)
// seed = hash(get_seed(state, epoch, DOMAIN_PTC_ATTESTER) + uint_to_bytes(slot))
// indices: List[ValidatorIndex] = []
// # Concatenate all committees for this slot in order
// committees_per_slot = get_committee_count_per_slot(state, epoch)
// for i in range(committees_per_slot):
// committee = get_beacon_committee(state, slot, CommitteeIndex(i))
// indices.extend(committee)
// return compute_balance_weighted_selection(
// state, indices, seed, size=PTC_SIZE, shuffle_indices=False
// )
// </spec>
// epoch = compute_epoch_at_slot(slot)
// seed = hash(get_seed(state, epoch, DOMAIN_PTC_ATTESTER) + uint_to_bytes(slot))
// indices = []
// committees_per_slot = get_committee_count_per_slot(state, epoch)
// for i in range(committees_per_slot):
// committee = get_beacon_committee(state, slot, CommitteeIndex(i))
// indices.extend(committee)
// return compute_balance_weighted_selection(state, indices, seed, size=PTC_SIZE, shuffle_indices=False)
func payloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
epoch := slots.ToEpoch(slot)
seed, err := ptcSeed(st, epoch, slot)
@@ -166,35 +152,17 @@ func ptcSeed(st state.ReadOnlyBeaconState, epoch primitives.Epoch, slot primitiv
}
// selectByBalance selects a balance-weighted subset of input candidates.
// Spec v1.7.0-alpha.0 (pseudocode):
// compute_balance_weighted_selection(state, indices, seed, size, shuffle_indices):
// Note: shuffle_indices is false for PTC.
//
// <spec fn="compute_balance_weighted_selection" fork="gloas" hash="2c9f1c23">
// def compute_balance_weighted_selection(
// state: BeaconState,
// indices: Sequence[ValidatorIndex],
// seed: Bytes32,
// size: uint64,
// shuffle_indices: bool,
// ) -> Sequence[ValidatorIndex]:
// """
// Return ``size`` indices sampled by effective balance, using ``indices``
// as candidates. If ``shuffle_indices`` is ``True``, candidate indices
// are themselves sampled from ``indices`` by shuffling it, otherwise
// ``indices`` is traversed in order.
// """
// total = uint64(len(indices))
// assert total > 0
// selected: List[ValidatorIndex] = []
// i = uint64(0)
// while len(selected) < size:
// next_index = i % total
// if shuffle_indices:
// next_index = compute_shuffled_index(next_index, total, seed)
// candidate_index = indices[next_index]
// if compute_balance_weighted_acceptance(state, candidate_index, seed, i):
// selected.append(candidate_index)
// i += 1
// return selected
// </spec>
// total = len(indices); selected = []; i = 0
// while len(selected) < size:
// next = i % total
// if shuffle_indices: next = compute_shuffled_index(next, total, seed)
// if compute_balance_weighted_acceptance(state, indices[next], seed, i):
// selected.append(indices[next])
// i += 1
func selectByBalanceFill(
ctx context.Context,
st state.ReadOnlyBeaconState,
@@ -231,22 +199,15 @@ func selectByBalanceFill(
}
// acceptByBalance determines if a validator is accepted based on its effective balance.
// Spec v1.7.0-alpha.0 (pseudocode):
// compute_balance_weighted_acceptance(state, index, seed, i):
//
// <spec fn="compute_balance_weighted_acceptance" fork="gloas" hash="9954dcd0">
// def compute_balance_weighted_acceptance(
// state: BeaconState, index: ValidatorIndex, seed: Bytes32, i: uint64
// ) -> bool:
// """
// Return whether to accept the selection of the validator ``index``, with probability
// proportional to its ``effective_balance``, and randomness given by ``seed`` and ``i``.
// """
// MAX_RANDOM_VALUE = 2**16 - 1
// random_bytes = hash(seed + uint_to_bytes(i // 16))
// offset = i % 16 * 2
// random_value = bytes_to_uint64(random_bytes[offset : offset + 2])
// effective_balance = state.validators[index].effective_balance
// return effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value
// </spec>
// MAX_RANDOM_VALUE = 2**16 - 1
// random_bytes = hash(seed + uint_to_bytes(i // 16))
// offset = i % 16 * 2
// random_value = bytes_to_uint64(random_bytes[offset:offset+2])
// effective_balance = state.validators[index].effective_balance
// return effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value
func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex, seedBuf []byte, hashFunc func([]byte) [32]byte, maxBalance uint64, round uint64) (bool, error) {
// Reuse the seed buffer by overwriting the last 8 bytes with the round counter.
binary.LittleEndian.PutUint64(seedBuf[len(seedBuf)-8:], round/16)
@@ -263,26 +224,16 @@ func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex
}
// validIndexedPayloadAttestation verifies the signature of an indexed payload attestation.
// Spec v1.7.0-alpha.0 (pseudocode):
// is_valid_indexed_payload_attestation(state: BeaconState, indexed_payload_attestation: IndexedPayloadAttestation) -> bool:
//
// <spec fn="is_valid_indexed_payload_attestation" fork="gloas" hash="cf1e65b5">
// def is_valid_indexed_payload_attestation(
// state: BeaconState, indexed_payload_attestation: IndexedPayloadAttestation
// ) -> bool:
// """
// Check if ``indexed_payload_attestation`` is non-empty, has sorted indices, and has
// a valid aggregate signature.
// """
// # Verify indices are non-empty and sorted
// indices = indexed_payload_attestation.attesting_indices
// if len(indices) == 0 or not indices == sorted(indices):
// return False
//
// # Verify aggregate signature
// pubkeys = [state.validators[i].pubkey for i in indices]
// domain = get_domain(state, DOMAIN_PTC_ATTESTER, None)
// signing_root = compute_signing_root(indexed_payload_attestation.data, domain)
// return bls.FastAggregateVerify(pubkeys, signing_root, indexed_payload_attestation.signature)
// </spec>
// indices = indexed_payload_attestation.attesting_indices
// return len(indices) > 0 and indices == sorted(indices) and
// bls.FastAggregateVerify(
// [state.validators[i].pubkey for i in indices],
// compute_signing_root(indexed_payload_attestation.data, get_domain(state, DOMAIN_PTC_ATTESTER, compute_epoch_at_slot(attestation.data.slot)),
// indexed_payload_attestation.signature,
// )
func validIndexedPayloadAttestation(st state.ReadOnlyBeaconState, att *consensus_types.IndexedPayloadAttestation) error {
indices := att.AttestingIndices
if len(indices) == 0 || !slices.IsSorted(indices) {

View File

@@ -10,21 +10,17 @@ import (
)
// ProcessBuilderPendingPayments processes the builder pending payments from the previous epoch.
// Spec v1.7.0-alpha.0 (pseudocode):
// def process_builder_pending_payments(state: BeaconState) -> None:
//
// <spec fn="process_builder_pending_payments" fork="gloas" hash="10da48dd">
// def process_builder_pending_payments(state: BeaconState) -> None:
// """
// Processes the builder pending payments from the previous epoch.
// """
// quorum = get_builder_payment_quorum_threshold(state)
// for payment in state.builder_pending_payments[:SLOTS_PER_EPOCH]:
// if payment.weight >= quorum:
// state.builder_pending_withdrawals.append(payment.withdrawal)
// quorum = get_builder_payment_quorum_threshold(state)
// for payment in state.builder_pending_payments[:SLOTS_PER_EPOCH]:
// if payment.weight >= quorum:
// state.builder_pending_withdrawals.append(payment.withdrawal)
//
// old_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:]
// new_payments = [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
// state.builder_pending_payments = old_payments + new_payments
// </spec>
// old_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:]
// new_payments = [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
// state.builder_pending_payments = old_payments + new_payments
func ProcessBuilderPendingPayments(state state.BeaconState) error {
quorum, err := builderQuorumThreshold(state)
if err != nil {
@@ -57,16 +53,12 @@ func ProcessBuilderPendingPayments(state state.BeaconState) error {
}
// builderQuorumThreshold calculates the quorum threshold for builder payments.
// Spec v1.7.0-alpha.0 (pseudocode):
// def get_builder_payment_quorum_threshold(state: BeaconState) -> uint64:
//
// <spec fn="get_builder_payment_quorum_threshold" fork="gloas" hash="a64b7ffb">
// def get_builder_payment_quorum_threshold(state: BeaconState) -> uint64:
// """
// Calculate the quorum threshold for builder payments.
// """
// per_slot_balance = get_total_active_balance(state) // SLOTS_PER_EPOCH
// quorum = per_slot_balance * BUILDER_PAYMENT_THRESHOLD_NUMERATOR
// return uint64(quorum // BUILDER_PAYMENT_THRESHOLD_DENOMINATOR)
// </spec>
// per_slot_balance = get_total_active_balance(state) // SLOTS_PER_EPOCH
// quorum = per_slot_balance * BUILDER_PAYMENT_THRESHOLD_NUMERATOR
// return uint64(quorum // BUILDER_PAYMENT_THRESHOLD_DENOMINATOR)
func builderQuorumThreshold(state state.ReadOnlyBeaconState) (primitives.Gwei, error) {
activeBalance, err := helpers.TotalActiveBalance(state)
if err != nil {

View File

@@ -11,20 +11,16 @@ import (
)
// RemoveBuilderPendingPayment removes the pending builder payment for the proposal slot.
// Spec v1.7.0 (pseudocode):
//
// <spec fn="process_proposer_slashing" fork="gloas" lines="22-32" hash="4da721ef">
// # [New in Gloas:EIP7732]
// # Remove the BuilderPendingPayment corresponding to
// # this proposal if it is still in the 2-epoch window.
// slot = header_1.slot
// proposal_epoch = compute_epoch_at_slot(slot)
// if proposal_epoch == get_current_epoch(state):
// payment_index = SLOTS_PER_EPOCH + slot % SLOTS_PER_EPOCH
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
// payment_index = SLOTS_PER_EPOCH + slot % SLOTS_PER_EPOCH
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
// elif proposal_epoch == get_previous_epoch(state):
// payment_index = slot % SLOTS_PER_EPOCH
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
// </spec>
// payment_index = slot % SLOTS_PER_EPOCH
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
func RemoveBuilderPendingPayment(st state.BeaconState, header *eth.BeaconBlockHeader) error {
proposalEpoch := slots.ToEpoch(header.Slot)
currentEpoch := time.CurrentEpoch(st)

View File

@@ -0,0 +1,105 @@
package gloas
import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/pkg/errors"
)
// ProcessWithdrawals applies withdrawals to the state for Gloas.
//
// Spec v1.7.0-alpha.1 (pseudocode):
//
// def process_withdrawals(
//
// state: BeaconState,
// # [Modified in Gloas:EIP7732]
// # Removed `payload`
//
// ) -> None:
//
// # [New in Gloas:EIP7732]
// # Return early if the parent block is empty
// if not is_parent_block_full(state):
// return
//
// # Get expected withdrawals
// expected = get_expected_withdrawals(state)
//
// # Apply expected withdrawals
// apply_withdrawals(state, expected.withdrawals)
//
// # Update withdrawals fields in the state
// update_next_withdrawal_index(state, expected.withdrawals)
// # [New in Gloas:EIP7732]
// update_payload_expected_withdrawals(state, expected.withdrawals)
// # [New in Gloas:EIP7732]
// update_builder_pending_withdrawals(state, expected.processed_builder_withdrawals_count)
// update_pending_partial_withdrawals(state, expected.processed_partial_withdrawals_count)
// # [New in Gloas:EIP7732]
// update_next_withdrawal_builder_index(state, expected.processed_builders_sweep_count)
// update_next_withdrawal_validator_index(state, expected.withdrawals)
func ProcessWithdrawals(st state.BeaconState) error {
full, err := st.IsParentBlockFull()
if err != nil {
return errors.Wrap(err, "could not get parent block full status")
}
if !full {
return nil
}
expected, err := st.ExpectedWithdrawalsGloas()
if err != nil {
return errors.Wrap(err, "could not get expected withdrawals")
}
if err := st.DecreaseWithdrawalBalances(expected.Withdrawals); err != nil {
return errors.Wrap(err, "could not decrease withdrawal balances")
}
if len(expected.Withdrawals) > 0 {
if err := st.SetNextWithdrawalIndex(expected.Withdrawals[len(expected.Withdrawals)-1].Index + 1); err != nil {
return errors.Wrap(err, "could not set next withdrawal index")
}
}
err = st.SetPayloadExpectedWithdrawals(expected.Withdrawals)
if err != nil {
return errors.Wrap(err, "could not set payload expected withdrawals")
}
err = st.DequeueBuilderPendingWithdrawals(expected.ProcessedBuilderWithdrawalsCount)
if err != nil {
return errors.Wrap(err, "unable to dequeue builder pending withdrawals from state")
}
if err := st.DequeuePendingPartialWithdrawals(expected.ProcessedPartialWithdrawalsCount); err != nil {
return errors.Wrap(err, "unable to dequeue partial withdrawals from state")
}
err = st.SetNextWithdrawalBuilderIndex(expected.NextWithdrawalBuilderIndex)
if err != nil {
return errors.Wrap(err, "could not set next withdrawal builder index")
}
var nextValidatorIndex primitives.ValidatorIndex
if uint64(len(expected.Withdrawals)) < params.BeaconConfig().MaxWithdrawalsPerPayload {
nextValidatorIndex, err = st.NextWithdrawalValidatorIndex()
if err != nil {
return errors.Wrap(err, "could not get next withdrawal validator index")
}
nextValidatorIndex += primitives.ValidatorIndex(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
nextValidatorIndex = nextValidatorIndex % primitives.ValidatorIndex(st.NumValidators())
} else {
nextValidatorIndex = expected.Withdrawals[len(expected.Withdrawals)-1].ValidatorIndex + 1
if nextValidatorIndex == primitives.ValidatorIndex(st.NumValidators()) {
nextValidatorIndex = 0
}
}
if err := st.SetNextWithdrawalValidatorIndex(nextValidatorIndex); err != nil {
return errors.Wrap(err, "could not set next withdrawal validator index")
}
return nil
}

View File

@@ -0,0 +1,34 @@
package gloas
import (
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestProcessWithdrawals_ParentBlockNotFull(t *testing.T) {
state, err := state_native.InitializeFromProtoUnsafeGloas(&ethpb.BeaconStateGloas{})
require.NoError(t, err)
st := &withdrawalsState{BeaconState: state}
require.NoError(t, ProcessWithdrawals(st))
require.Equal(t, false, st.expectedCalled)
}
type withdrawalsState struct {
state.BeaconState
expectedCalled bool
decreaseCalled bool
}
func (w *withdrawalsState) IsParentBlockFull() (bool, error) {
return false, nil
}
func (w *withdrawalsState) ExpectedWithdrawalsGloas() (state.ExpectedWithdrawalsGloasResult, error) {
w.expectedCalled = true
return state.ExpectedWithdrawalsGloasResult{}, nil
}

View File

@@ -143,11 +143,10 @@ func ProcessSlot(ctx context.Context, state state.BeaconState) (state.BeaconStat
return nil, err
}
// <spec fn="process_slot" fork="gloas" lines="11-13" hash="62b28839">
// Spec v1.6.1 (pseudocode):
// # [New in Gloas:EIP7732]
// # Unset the next payload availability
// state.execution_payload_availability[(state.slot + 1) % SLOTS_PER_HISTORICAL_ROOT] = 0b0
// </spec>
if state.Version() >= version.Gloas {
index := uint64((state.Slot() + 1) % params.BeaconConfig().SlotsPerHistoricalRoot)
if err := state.UpdateExecutionPayloadAvailabilityAtIndex(index, 0x0); err != nil {

View File

@@ -67,6 +67,7 @@ func getSubscriptionStatusFromDB(t *testing.T, db *Store) bool {
return subscribed
}
func TestUpdateCustodyInfo(t *testing.T) {
ctx := t.Context()

View File

@@ -26,15 +26,6 @@ var ErrNotFoundMetadataSeqNum = errors.Wrap(ErrNotFound, "metadata sequence numb
// but the database was created without state-diff support.
var ErrStateDiffIncompatible = errors.New("state-diff feature enabled but database was created without state-diff support")
// ErrStateDiffCorrupted is returned when state-diff metadata or data is missing or invalid.
var ErrStateDiffCorrupted = errors.New("state-diff database corrupted")
// ErrStateDiffExponentMismatch is returned when configured exponents differ from stored metadata.
var ErrStateDiffExponentMismatch = errors.New("state-diff exponents mismatch")
// ErrStateDiffMissingSnapshot is returned when the offset snapshot is missing.
var ErrStateDiffMissingSnapshot = errors.New("state-diff offset snapshot missing")
var errEmptyBlockSlice = errors.New("[]blocks.ROBlock is empty")
var errIncorrectBlockParent = errors.New("unexpected missing or forked blocks in a []ROBlock")
var errFinalizedChildNotFound = errors.New("unable to find finalized root descending from backfill batch")

View File

@@ -7,11 +7,9 @@ import (
"fmt"
"os"
"path"
"slices"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/iface"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
@@ -23,7 +21,6 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
prombolt "github.com/prysmaticlabs/prombbolt"
logrus "github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt"
)
@@ -226,42 +223,8 @@ func (kv *Store) startStateDiff(ctx context.Context) error {
}
if hasOffset {
storedExponents, err := kv.loadStateDiffExponents()
if err != nil {
return fmt.Errorf("%w: state-diff metadata missing or invalid; re-sync required: %v", ErrStateDiffCorrupted, err)
}
currentExponents := flags.Get().StateDiffExponents
if !slices.Equal(storedExponents, currentExponents) {
return errors.Wrapf(
ErrStateDiffExponentMismatch,
"state-diff exponents changed; database incompatible. "+
"Database was initialized with: %v. "+
"Current configuration: %v. "+
"Options: use original exponents (--state-diff-exponents=%s) or delete database and re-sync from genesis/checkpoint.",
storedExponents,
currentExponents,
formatStateDiffExponents(storedExponents),
)
}
offset, err := kv.loadOffset()
if err != nil {
return err
}
cache, err := populateStateDiffCacheFromDB(kv, offset)
if err != nil {
return err
}
kv.stateDiffCache = cache
if flags.Get().StateDiffValidateOnStartup {
if err := validateStateDiffCache(ctx, kv, cache); err != nil {
return err
}
}
log.WithFields(logrus.Fields{
"offset": offset,
"exponents": storedExponents,
}).Info("State-diff cache initialized from existing database")
return nil
// Existing state-diff database - restarts not yet supported.
return errors.New("restarting with existing state-diff database not yet supported")
}
// Check if this is a new database (no head block).

View File

@@ -3,15 +3,12 @@ package kv
import (
"bytes"
"context"
"encoding/binary"
"fmt"
"testing"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
bolt "go.etcd.io/bbolt"
@@ -30,108 +27,6 @@ func setupDB(t testing.TB) *Store {
return db
}
func TestStartStateDiff_ExponentMismatch(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{EnableStateDiff: true})
defer resetCfg()
setDefaultStateDiffExponents()
store := setupDB(t)
require.NoError(t, store.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(stateDiffBucket)
if bucket == nil {
return bolt.ErrBucketNotFound
}
offsetBytes := make([]byte, 8)
binary.LittleEndian.PutUint64(offsetBytes, 0)
if err := bucket.Put(offsetKey, offsetBytes); err != nil {
return err
}
encoded, err := encodeStateDiffExponents([]int{20, 10})
if err != nil {
return err
}
return bucket.Put(exponentsKey, encoded)
}))
ctx := t.Context()
err := store.startStateDiff(ctx)
require.ErrorContains(t, "state-diff exponents changed", err)
}
func TestStartStateDiff_MissingOffsetSnapshot(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{EnableStateDiff: true})
defer resetCfg()
setDefaultStateDiffExponents()
store := setupDB(t)
require.NoError(t, store.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(stateDiffBucket)
if bucket == nil {
return bolt.ErrBucketNotFound
}
offsetBytes := make([]byte, 8)
binary.LittleEndian.PutUint64(offsetBytes, 0)
if err := bucket.Put(offsetKey, offsetBytes); err != nil {
return err
}
encoded, err := encodeStateDiffExponents(flags.Get().StateDiffExponents)
if err != nil {
return err
}
return bucket.Put(exponentsKey, encoded)
}))
ctx := t.Context()
err := store.startStateDiff(ctx)
require.ErrorContains(t, "missing offset snapshot", err)
}
func TestStartStateDiff_ValidateOnStartup(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{EnableStateDiff: true})
defer resetCfg()
setDefaultStateDiffExponents()
globalFlags := flags.GlobalFlags{
StateDiffExponents: flags.Get().StateDiffExponents,
StateDiffValidateOnStartup: true,
}
flags.Init(&globalFlags)
store := setupDB(t)
require.NoError(t, store.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(stateDiffBucket)
if bucket == nil {
return bolt.ErrBucketNotFound
}
st, _ := createState(t, 0, version.Phase0)
stateBytes, err := st.MarshalSSZ()
if err != nil {
return err
}
enc, err := addKey(st.Version(), stateBytes)
if err != nil {
return err
}
offsetBytes := make([]byte, 8)
binary.LittleEndian.PutUint64(offsetBytes, 0)
if err := bucket.Put(offsetKey, offsetBytes); err != nil {
return err
}
encoded, err := encodeStateDiffExponents(flags.Get().StateDiffExponents)
if err != nil {
return err
}
if err := bucket.Put(exponentsKey, encoded); err != nil {
return err
}
key := makeKeyForStateDiffTree(0, 0)
return bucket.Put(key, enc)
}))
err := store.startStateDiff(t.Context())
require.NoError(t, err)
}
func Test_setupBlockStorageType(t *testing.T) {
ctx := t.Context()
t.Run("fresh database with feature enabled to store full blocks should store full blocks", func(t *testing.T) {

View File

@@ -132,9 +132,6 @@ func (s *Store) saveHdiff(lvl int, anchor, st state.ReadOnlyBeaconState) error {
return err
}
}
if err := s.stateDiffCache.setLevelHasData(lvl); err != nil {
return err
}
return nil
}
@@ -174,9 +171,6 @@ func (s *Store) saveFullSnapshot(st state.ReadOnlyBeaconState) error {
if err != nil {
return err
}
if err := s.stateDiffCache.setLevelHasData(0); err != nil {
return err
}
return nil
}

View File

@@ -1,131 +1,19 @@
package kv
import (
"context"
"encoding/binary"
"errors"
"sync"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
pkgerrors "github.com/pkg/errors"
"go.etcd.io/bbolt"
)
type stateDiffCache struct {
sync.RWMutex
anchors []state.ReadOnlyBeaconState
levelsWithData []bool
offset uint64
}
func populateStateDiffCacheFromDB(s *Store, offset uint64) (*stateDiffCache, error) {
cache := &stateDiffCache{
anchors: make([]state.ReadOnlyBeaconState, len(flags.Get().StateDiffExponents)-1),
levelsWithData: make([]bool, len(flags.Get().StateDiffExponents)),
offset: offset,
}
if err := s.db.View(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(stateDiffBucket)
if bucket == nil {
return bbolt.ErrBucketNotFound
}
for level := range cache.levelsWithData {
if level == 0 {
if bucket.Get(makeKeyForStateDiffTree(0, offset)) != nil {
cache.levelsWithData[level] = true
}
continue
}
cursor := bucket.Cursor()
prefix := []byte{byte(level)}
key, _ := cursor.Seek(prefix)
if key != nil && key[0] == byte(level) {
slot, ok := slotFromStateDiffKey(key)
if !ok {
return ErrStateDiffCorrupted
}
if slot < offset {
return ErrStateDiffCorrupted
}
if level == 0 && slot != offset {
return ErrStateDiffCorrupted
}
if computeLevel(offset, primitives.Slot(slot)) != level {
return ErrStateDiffCorrupted
}
cache.levelsWithData[level] = true
}
}
return nil
}); err != nil {
return nil, err
}
anchor0, err := s.getFullSnapshot(offset)
if err != nil {
return nil, pkgerrors.Wrapf(ErrStateDiffMissingSnapshot, "state diff cache: missing offset snapshot at %d", offset)
}
cache.anchors[0] = anchor0
cache.levelsWithData[0] = true
return cache, nil
}
func validateStateDiffCache(ctx context.Context, s *Store, cache *stateDiffCache) error {
for level, hasData := range cache.levelsWithData {
if !hasData || level == 0 {
continue
}
maxSlot, err := latestSlotForLevel(s, level)
if err != nil {
return err
}
if _, err := s.stateByDiff(ctx, primitives.Slot(maxSlot)); err != nil {
return pkgerrors.Wrapf(ErrStateDiffCorrupted, "state diff validation failed for level %d slot %d: %v", level, maxSlot, err)
}
}
return nil
}
func latestSlotForLevel(s *Store, level int) (uint64, error) {
var maxSlot uint64
found := false
err := s.db.View(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(stateDiffBucket)
if bucket == nil {
return bbolt.ErrBucketNotFound
}
cursor := bucket.Cursor()
prefix := []byte{byte(level)}
for key, _ := cursor.Seek(prefix); key != nil && key[0] == byte(level); key, _ = cursor.Next() {
slot, ok := slotFromStateDiffKey(key)
if !ok {
return ErrStateDiffCorrupted
}
if !found || slot > maxSlot {
maxSlot = slot
found = true
}
}
return nil
})
if err != nil {
return 0, err
}
if !found {
return 0, ErrStateDiffCorrupted
}
return maxSlot, nil
}
func slotFromStateDiffKey(key []byte) (uint64, bool) {
if len(key) < 9 {
return 0, false
}
return binary.LittleEndian.Uint64(key[1:9]), true
anchors []state.ReadOnlyBeaconState
offset uint64
}
func newStateDiffCache(s *Store) (*stateDiffCache, error) {
@@ -149,9 +37,8 @@ func newStateDiffCache(s *Store) (*stateDiffCache, error) {
}
return &stateDiffCache{
anchors: make([]state.ReadOnlyBeaconState, len(flags.Get().StateDiffExponents)-1), // -1 because last level doesn't need to be cached
levelsWithData: make([]bool, len(flags.Get().StateDiffExponents)),
offset: offset,
anchors: make([]state.ReadOnlyBeaconState, len(flags.Get().StateDiffExponents)-1), // -1 because last level doesn't need to be cached
offset: offset,
}, nil
}
@@ -171,25 +58,6 @@ func (c *stateDiffCache) setAnchor(level int, anchor state.ReadOnlyBeaconState)
return nil
}
func (c *stateDiffCache) levelHasData(level int) bool {
c.RLock()
defer c.RUnlock()
if level < 0 || level >= len(c.levelsWithData) {
return false
}
return c.levelsWithData[level]
}
func (c *stateDiffCache) setLevelHasData(level int) error {
c.Lock()
defer c.Unlock()
if level < 0 || level >= len(c.levelsWithData) {
return errors.New("state diff cache: level data index out of range")
}
c.levelsWithData[level] = true
return nil
}
func (c *stateDiffCache) getOffset() uint64 {
c.RLock()
defer c.RUnlock()

View File

@@ -5,7 +5,6 @@ import (
"encoding/binary"
"errors"
"fmt"
"strings"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
statenative "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
@@ -22,78 +21,9 @@ import (
var (
offsetKey = []byte("offset")
exponentsKey = []byte("exponents")
ErrSlotBeforeOffset = errors.New("slot is before state-diff root offset")
)
func encodeStateDiffExponents(exponents []int) ([]byte, error) {
if len(exponents) == 0 {
return nil, errors.New("state diff exponents cannot be empty")
}
if len(exponents) > 255 {
return nil, fmt.Errorf("state diff exponents length %d exceeds max 255", len(exponents))
}
encoded := make([]byte, len(exponents)+1)
encoded[0] = byte(len(exponents))
for i, exp := range exponents {
if exp < 2 || exp > flags.MaxStateDiffExponent {
return nil, fmt.Errorf("state diff exponent %d out of range for encoding", exp)
}
encoded[i+1] = byte(exp)
}
return encoded, nil
}
func decodeStateDiffExponents(encoded []byte) ([]int, error) {
if len(encoded) == 0 {
return nil, errors.New("state diff exponents missing length prefix")
}
count := int(encoded[0])
if count == 0 {
return nil, errors.New("state diff exponents length cannot be zero")
}
if len(encoded) != count+1 {
return nil, fmt.Errorf("state diff exponents length mismatch: expected %d got %d", count, len(encoded)-1)
}
exponents := make([]int, count)
for i := range count {
exponents[i] = int(encoded[i+1])
}
return exponents, nil
}
func formatStateDiffExponents(exponents []int) string {
if len(exponents) == 0 {
return ""
}
parts := make([]string, len(exponents))
for i, exp := range exponents {
parts[i] = fmt.Sprintf("%d", exp)
}
return strings.Join(parts, ",")
}
func (s *Store) loadStateDiffExponents() ([]int, error) {
var encoded []byte
err := s.db.View(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(stateDiffBucket)
if bucket == nil {
return bbolt.ErrBucketNotFound
}
value := bucket.Get(exponentsKey)
if value == nil {
return errors.New("state diff exponents not found")
}
encoded = make([]byte, len(value))
copy(encoded, value)
return nil
})
if err != nil {
return nil, err
}
return decodeStateDiffExponents(encoded)
}
func makeKeyForStateDiffTree(level int, slot uint64) []byte {
buf := make([]byte, 16)
buf[0] = byte(level)
@@ -194,29 +124,6 @@ func (s *Store) getOffset() uint64 {
return s.stateDiffCache.getOffset()
}
func (s *Store) loadOffset() (uint64, error) {
var offset uint64
err := s.db.View(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(stateDiffBucket)
if bucket == nil {
return bbolt.ErrBucketNotFound
}
offsetBytes := bucket.Get(offsetKey)
if offsetBytes == nil {
return errors.New("state diff offset not found")
}
if len(offsetBytes) != 8 {
return fmt.Errorf("state diff offset has invalid length %d", len(offsetBytes))
}
offset = binary.LittleEndian.Uint64(offsetBytes)
return nil
})
if err != nil {
return 0, err
}
return offset, nil
}
// hasStateDiffOffset checks if the state-diff offset has been set in the database.
// This is used to detect if an existing database has state-diff enabled.
func (s *Store) hasStateDiffOffset() (bool, error) {
@@ -246,13 +153,8 @@ func (s *Store) initializeStateDiff(slot primitives.Slot, initialState state.Rea
return nil
}
}
exponentsBytes, err := encodeStateDiffExponents(flags.Get().StateDiffExponents)
if err != nil {
return pkgerrors.Wrap(err, "failed to encode state diff exponents")
}
// Write metadata directly to the database (without using cache which doesn't exist yet).
err = s.db.Update(func(tx *bbolt.Tx) error {
// Write offset directly to the database (without using cache which doesn't exist yet).
err := s.db.Update(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(stateDiffBucket)
if bucket == nil {
return bbolt.ErrBucketNotFound
@@ -260,10 +162,7 @@ func (s *Store) initializeStateDiff(slot primitives.Slot, initialState state.Rea
offsetBytes := make([]byte, 8)
binary.LittleEndian.PutUint64(offsetBytes, uint64(slot))
if err := bucket.Put(offsetKey, offsetBytes); err != nil {
return err
}
return bucket.Put(exponentsKey, exponentsBytes)
return bucket.Put(offsetKey, offsetBytes)
})
if err != nil {
return pkgerrors.Wrap(err, "failed to set offset")
@@ -394,12 +293,7 @@ func (s *Store) getBaseAndDiffChain(offset uint64, slot primitives.Slot) (state.
if diffSlot == lastSeenAnchorSlot {
continue
}
level := i + 1
if s.stateDiffCache != nil && !s.stateDiffCache.levelHasData(level) {
lastSeenAnchorSlot = diffSlot
continue
}
diffChainItems = append(diffChainItems, diffItem{level: level, slot: diffSlot + offset})
diffChainItems = append(diffChainItems, diffItem{level: i + 1, slot: diffSlot + offset})
lastSeenAnchorSlot = diffSlot
}

View File

@@ -9,7 +9,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/math"
@@ -35,81 +34,6 @@ func TestStateDiff_LoadOrInitOffset(t *testing.T) {
require.Equal(t, uint64(10), offset)
}
func TestStateDiff_LoadOffset(t *testing.T) {
setDefaultStateDiffExponents()
db := setupDB(t)
_, err := db.loadOffset()
require.ErrorContains(t, "offset not found", err)
err = setOffsetInDB(db, 10)
require.NoError(t, err)
offset, err := db.loadOffset()
require.NoError(t, err)
require.Equal(t, uint64(10), offset)
}
func TestStateDiff_EncodeDecodeExponents(t *testing.T) {
t.Run("roundtrip", func(t *testing.T) {
exponents := []int{21, 18, 16, 13}
encoded, err := encodeStateDiffExponents(exponents)
require.NoError(t, err)
decoded, err := decodeStateDiffExponents(encoded)
require.NoError(t, err)
require.DeepEqual(t, exponents, decoded)
})
t.Run("encode-empty", func(t *testing.T) {
_, err := encodeStateDiffExponents(nil)
require.ErrorContains(t, "cannot be empty", err)
})
t.Run("encode-negative", func(t *testing.T) {
_, err := encodeStateDiffExponents([]int{21, -1})
require.ErrorContains(t, "out of range", err)
})
t.Run("encode-too-large", func(t *testing.T) {
_, err := encodeStateDiffExponents([]int{flags.MaxStateDiffExponent + 1})
require.ErrorContains(t, "out of range", err)
})
t.Run("decode-empty", func(t *testing.T) {
_, err := decodeStateDiffExponents(nil)
require.ErrorContains(t, "missing length prefix", err)
})
t.Run("decode-zero-length", func(t *testing.T) {
_, err := decodeStateDiffExponents([]byte{0})
require.ErrorContains(t, "length cannot be zero", err)
})
t.Run("decode-length-mismatch", func(t *testing.T) {
_, err := decodeStateDiffExponents([]byte{2, 10})
require.ErrorContains(t, "length mismatch", err)
})
}
func TestStateDiff_InitializeStoresExponents(t *testing.T) {
setDefaultStateDiffExponents()
resetCfg := features.InitWithReset(&features.Flags{EnableStateDiff: true})
defer resetCfg()
db := setupDB(t)
st, _ := createState(t, 0, version.Phase0)
require.NoError(t, db.initializeStateDiff(0, st))
stored, err := db.loadStateDiffExponents()
require.NoError(t, err)
require.DeepEqual(t, flags.Get().StateDiffExponents, stored)
}
func TestStateDiff_LoadExponentsMissing(t *testing.T) {
db := setupDB(t)
_, err := db.loadStateDiffExponents()
require.ErrorContains(t, "exponents not found", err)
}
func TestStateDiff_ComputeLevel(t *testing.T) {
db := setupDB(t)
setDefaultStateDiffExponents()
@@ -230,94 +154,6 @@ func TestStateDiff_SaveFullSnapshot(t *testing.T) {
}
}
func TestStateDiff_PopulateStateDiffCacheFromDB(t *testing.T) {
setDefaultStateDiffExponents()
db := setupDB(t)
_, err := populateStateDiffCacheFromDB(db, 0)
require.ErrorContains(t, "missing offset snapshot", err)
st, _ := createState(t, 0, version.Phase0)
require.NoError(t, setOffsetInDB(db, 0))
require.NoError(t, db.saveStateByDiff(context.Background(), st))
err = db.db.Update(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(stateDiffBucket)
if bucket == nil {
return bbolt.ErrBucketNotFound
}
key := makeKeyForStateDiffTree(2, math.PowerOf2(16))
return bucket.Put(append(key, stateSuffix...), []byte{1})
})
require.NoError(t, err)
cache, err := populateStateDiffCacheFromDB(db, 0)
require.NoError(t, err)
require.NotNil(t, cache)
require.Equal(t, uint64(0), cache.getOffset())
require.NotNil(t, cache.getAnchor(0))
require.Equal(t, true, cache.levelHasData(0))
require.Equal(t, false, cache.levelHasData(1))
require.Equal(t, true, cache.levelHasData(2))
}
func TestStateDiff_PopulateStateDiffCacheFromDB_InvalidLevelKey(t *testing.T) {
setDefaultStateDiffExponents()
db := setupDB(t)
st, _ := createState(t, 0, version.Phase0)
require.NoError(t, setOffsetInDB(db, 0))
require.NoError(t, db.saveStateByDiff(context.Background(), st))
require.NoError(t, db.db.Update(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(stateDiffBucket)
if bucket == nil {
return bbolt.ErrBucketNotFound
}
key := makeKeyForStateDiffTree(2, 1)
return bucket.Put(append(key, stateSuffix...), []byte{1})
}))
_, err := populateStateDiffCacheFromDB(db, 0)
require.ErrorIs(t, ErrStateDiffCorrupted, err)
}
func TestStateDiff_GetBaseAndDiffChainSkipsEmptyLevels(t *testing.T) {
setDefaultStateDiffExponents()
db := setupDB(t)
require.NoError(t, setOffsetInDB(db, 0))
st, _ := createState(t, 0, version.Phase0)
require.NoError(t, db.saveFullSnapshot(st))
cache, err := populateStateDiffCacheFromDB(db, 0)
require.NoError(t, err)
cache.levelsWithData[0] = true
cache.levelsWithData[1] = false
cache.levelsWithData[2] = true
db.stateDiffCache = cache
slot := primitives.Slot(math.PowerOf2(18) + math.PowerOf2(16))
key := makeKeyForStateDiffTree(2, uint64(slot))
require.NoError(t, db.db.Update(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(stateDiffBucket)
if bucket == nil {
return bbolt.ErrBucketNotFound
}
if err := bucket.Put(append(key, stateSuffix...), []byte{1}); err != nil {
return err
}
if err := bucket.Put(append(key, validatorSuffix...), []byte{2}); err != nil {
return err
}
return bucket.Put(append(key, balancesSuffix...), []byte{3})
}))
_, diffChain, err := db.getBaseAndDiffChain(0, slot)
require.NoError(t, err)
require.Equal(t, 1, len(diffChain))
}
func TestStateDiff_SaveAndReadFullSnapshot(t *testing.T) {
setDefaultStateDiffExponents()

View File

@@ -32,6 +32,7 @@ func New() *ForkChoice {
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
proposerBoostRoot: [32]byte{},
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
slashedIndices: make(map[primitives.ValidatorIndex]bool),
receivedBlocksLastEpoch: [fieldparams.SlotsPerEpoch]primitives.Slot{},
}

View File

@@ -94,5 +94,6 @@ func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node, invalidRo
s.previousProposerBoostScore = 0
}
delete(s.nodeByRoot, node.root)
delete(s.nodeByPayload, node.payloadHash)
return invalidRoots, nil
}

View File

@@ -113,6 +113,7 @@ func (s *Store) insert(ctx context.Context,
}
}
s.nodeByPayload[payloadHash] = n
s.nodeByRoot[root] = n
if parent == nil {
if s.treeRootNode == nil {
@@ -121,6 +122,7 @@ func (s *Store) insert(ctx context.Context,
s.highestReceivedNode = n
} else {
delete(s.nodeByRoot, root)
delete(s.nodeByPayload, payloadHash)
return nil, errInvalidParentRoot
}
} else {
@@ -189,6 +191,7 @@ func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalized
node.children = nil
delete(s.nodeByRoot, node.root)
delete(s.nodeByPayload, node.payloadHash)
return nil
}

View File

@@ -128,9 +128,10 @@ func TestStore_Insert(t *testing.T) {
// The new node does not have a parent.
treeRootNode := &Node{slot: 0, root: indexToHash(0)}
nodeByRoot := map[[32]byte]*Node{indexToHash(0): treeRootNode}
nodeByPayload := map[[32]byte]*Node{indexToHash(0): treeRootNode}
jc := &forkchoicetypes.Checkpoint{Epoch: 0}
fc := &forkchoicetypes.Checkpoint{Epoch: 0}
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, nodeByPayload: nodeByPayload, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
payloadHash := [32]byte{'a'}
ctx := t.Context()
_, blk, err := prepareForkchoiceState(ctx, 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1)
@@ -237,6 +238,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
s.finalizedCheckpoint.Root = indexToHash(1)
require.NoError(t, s.prune(t.Context()))
require.Equal(t, len(s.nodeByRoot), 1)
require.Equal(t, len(s.nodeByPayload), 1)
}
// This test starts with the following branching diagram
@@ -317,6 +319,8 @@ func TestStore_PruneMapsNodes(t *testing.T) {
s.finalizedCheckpoint.Root = indexToHash(1)
require.NoError(t, s.prune(t.Context()))
require.Equal(t, len(s.nodeByRoot), 1)
require.Equal(t, len(s.nodeByPayload), 1)
}
func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {

View File

@@ -36,6 +36,7 @@ type Store struct {
treeRootNode *Node // the root node of the store tree.
headNode *Node // last head Node
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
nodeByPayload map[[fieldparams.RootLength]byte]*Node // nodes indexed by payload Hash
slashedIndices map[primitives.ValidatorIndex]bool // the list of equivocating validator indices
originRoot [fieldparams.RootLength]byte // The genesis block root
genesisTime time.Time

View File

@@ -134,20 +134,10 @@ type BeaconNode struct {
// New creates a new node instance, sets up configuration options, and registers
// every required service to the node.
func New(cliCtx *cli.Context, cancel context.CancelFunc, optFuncs []func(*cli.Context) ([]Option, error), opts ...Option) (*BeaconNode, error) {
func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*BeaconNode, error) {
if err := configureBeacon(cliCtx); err != nil {
return nil, errors.Wrap(err, "could not set beacon configuration options")
}
for _, of := range optFuncs {
ofo, err := of(cliCtx)
if err != nil {
return nil, err
}
if ofo != nil {
opts = append(opts, ofo...)
}
}
ctx := cliCtx.Context
beacon := &BeaconNode{
@@ -550,12 +540,6 @@ func openDB(ctx context.Context, dbPath string, clearer *dbClearer) (*kv.Store,
cfg := features.Get()
cfg.EnableStateDiff = false
features.Init(cfg)
} else if errors.Is(err, kv.ErrStateDiffExponentMismatch) {
log.WithError(err).Error("State-diff configuration mismatch; restart aborted. Use the stored exponents or re-sync the database.")
return nil, err
} else if errors.Is(err, kv.ErrStateDiffMissingSnapshot) || errors.Is(err, kv.ErrStateDiffCorrupted) {
log.WithError(err).Error("State-diff database corrupted; restart aborted. Delete database and re-sync from genesis/checkpoint.")
return nil, err
} else if err != nil {
return nil, errors.Wrapf(err, "could not create database at %s", dbPath)
}

View File

@@ -59,7 +59,7 @@ func TestNodeClose_OK(t *testing.T) {
WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
}
node, err := New(ctx, cancel, nil, options...)
node, err := New(ctx, cancel, options...)
require.NoError(t, err)
node.Close()
@@ -87,7 +87,7 @@ func TestNodeStart_Ok(t *testing.T) {
WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
}
node, err := New(ctx, cancel, nil, options...)
node, err := New(ctx, cancel, options...)
require.NoError(t, err)
require.NotNil(t, node.lcStore)
node.services = &runtime.ServiceRegistry{}
@@ -116,7 +116,7 @@ func TestNodeStart_SyncChecker(t *testing.T) {
WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
}
node, err := New(ctx, cancel, nil, options...)
node, err := New(ctx, cancel, options...)
require.NoError(t, err)
go func() {
node.Start()
@@ -151,7 +151,7 @@ func TestClearDB(t *testing.T) {
WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
}
_, err = New(context, cancel, nil, options...)
_, err = New(context, cancel, options...)
require.NoError(t, err)
require.LogsContain(t, hook, "Removing database")
}

View File

@@ -26,8 +26,8 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits/mock"
p2pMock "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/crypto/bls"

View File

@@ -84,6 +84,7 @@ func TestGetSpec(t *testing.T) {
config.FuluForkVersion = []byte("FuluForkVersion")
config.FuluForkEpoch = 109
config.GloasForkEpoch = 110
config.MaxBuildersPerWithdrawalsSweep = 112
config.BLSWithdrawalPrefixByte = byte('b')
config.ETH1AddressWithdrawalPrefixByte = byte('c')
config.GenesisDelay = 24
@@ -220,7 +221,7 @@ func TestGetSpec(t *testing.T) {
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
data, ok := resp.Data.(map[string]any)
require.Equal(t, true, ok)
assert.Equal(t, 192, len(data))
assert.Equal(t, 193, len(data))
for k, v := range data {
t.Run(k, func(t *testing.T) {
switch k {
@@ -302,6 +303,8 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "109", v)
case "GLOAS_FORK_EPOCH":
assert.Equal(t, "110", v)
case "MAX_BUILDERS_PER_WITHDRAWALS_SWEEP":
assert.Equal(t, "112", v)
case "MIN_ANCHOR_POW_BLOCK_DIFFICULTY":
assert.Equal(t, "1000", v)
case "BLS_WITHDRAWAL_PREFIX":

View File

@@ -3,6 +3,7 @@ package state
import (
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
)
@@ -13,6 +14,10 @@ type writeOnlyGloasFields interface {
RotateBuilderPendingPayments() error
AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal) error
UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val byte) error
SetPayloadExpectedWithdrawals(withdrawals []*enginev1.Withdrawal) error
DecreaseWithdrawalBalances(withdrawals []*enginev1.Withdrawal) error
DequeueBuilderPendingWithdrawals(num uint64) error
SetNextWithdrawalBuilderIndex(idx primitives.BuilderIndex) error
}
type readOnlyGloasFields interface {
@@ -21,4 +26,15 @@ type readOnlyGloasFields interface {
CanBuilderCoverBid(primitives.BuilderIndex, primitives.Gwei) (bool, error)
LatestBlockHash() ([32]byte, error)
BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error)
IsParentBlockFull() (bool, error)
ExpectedWithdrawalsGloas() (ExpectedWithdrawalsGloasResult, error)
}
// ExpectedWithdrawalsGloasResult bundles the expected withdrawals and related counters
// for the Gloas fork to avoid positional return mistakes.
type ExpectedWithdrawalsGloasResult struct {
Withdrawals []*enginev1.Withdrawal
ProcessedBuilderWithdrawalsCount uint64
ProcessedPartialWithdrawalsCount uint64
NextWithdrawalBuilderIndex primitives.BuilderIndex
}

View File

@@ -1,13 +1,17 @@
package state_native
import (
"bytes"
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
)
// LatestBlockHash returns the hash of the latest execution block.
@@ -46,20 +50,14 @@ func (b *BeaconState) BuilderPubkey(builderIndex primitives.BuilderIndex) ([fiel
}
// IsActiveBuilder returns true if the builder placement is finalized and it has not initiated exit.
// Spec v1.7.0-alpha.0 (pseudocode):
// def is_active_builder(state: BeaconState, builder_index: BuilderIndex) -> bool:
//
// <spec fn="is_active_builder" fork="gloas" hash="1a599fb2">
// def is_active_builder(state: BeaconState, builder_index: BuilderIndex) -> bool:
// """
// Check if the builder at ``builder_index`` is active for the given ``state``.
// """
// builder = state.builders[builder_index]
// return (
// # Placement in builder list is finalized
// builder.deposit_epoch < state.finalized_checkpoint.epoch
// # Has not initiated exit
// and builder.withdrawable_epoch == FAR_FUTURE_EPOCH
// )
// </spec>
// builder = state.builders[builder_index]
// return (
// builder.deposit_epoch < state.finalized_checkpoint.epoch
// and builder.withdrawable_epoch == FAR_FUTURE_EPOCH
// )
func (b *BeaconState) IsActiveBuilder(builderIndex primitives.BuilderIndex) (bool, error) {
if b.version < version.Gloas {
return false, errNotSupported("IsActiveBuilder", b.version)
@@ -78,18 +76,15 @@ func (b *BeaconState) IsActiveBuilder(builderIndex primitives.BuilderIndex) (boo
}
// CanBuilderCoverBid returns true if the builder has enough balance to cover the given bid amount.
// Spec v1.7.0-alpha.0 (pseudocode):
// def can_builder_cover_bid(state: BeaconState, builder_index: BuilderIndex, bid_amount: Gwei) -> bool:
//
// <spec fn="can_builder_cover_bid" fork="gloas" hash="9e3f2d7c">
// def can_builder_cover_bid(
// state: BeaconState, builder_index: BuilderIndex, bid_amount: Gwei
// ) -> bool:
// builder_balance = state.builders[builder_index].balance
// pending_withdrawals_amount = get_pending_balance_to_withdraw_for_builder(state, builder_index)
// min_balance = MIN_DEPOSIT_AMOUNT + pending_withdrawals_amount
// if builder_balance < min_balance:
// return False
// return builder_balance - min_balance >= bid_amount
// </spec>
// builder_balance = state.builders[builder_index].balance
// pending_withdrawals_amount = get_pending_balance_to_withdraw_for_builder(state, builder_index)
// min_balance = MIN_DEPOSIT_AMOUNT + pending_withdrawals_amount
// if builder_balance < min_balance:
// return False
// return builder_balance - min_balance >= bid_amount
func (b *BeaconState) CanBuilderCoverBid(builderIndex primitives.BuilderIndex, bidAmount primitives.Gwei) (bool, error) {
if b.version < version.Gloas {
return false, errNotSupported("CanBuilderCoverBid", b.version)
@@ -156,3 +151,220 @@ func (b *BeaconState) BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment,
return b.builderPendingPaymentsVal(), nil
}
// IsParentBlockFull returns true if the last committed payload bid was fulfilled with a payload,
// which can only happen when both beacon block and payload were present.
// This function must be called on a beacon state before processing the execution payload bid in the block.
// Spec v1.7.0-alpha.2 (pseudocode):
// def is_parent_block_full(state: BeaconState) -> bool:
//
// return state.latest_execution_payload_bid.block_hash == state.latest_block_hash
func (b *BeaconState) IsParentBlockFull() (bool, error) {
if b.version < version.Gloas {
return false, errNotSupported("IsParentBlockFull", b.version)
}
b.lock.RLock()
defer b.lock.RUnlock()
if b.latestExecutionPayloadBid == nil {
return false, nil
}
return bytes.Equal(b.latestExecutionPayloadBid.BlockHash, b.latestBlockHash), nil
}
// ExpectedWithdrawalsGloas returns the withdrawals that a proposer will need to pack in the next block
// applied to the current state. It is also used by validators to check that the execution payload carried
// the right number of withdrawals.
//
// Spec v1.7.0-alpha.1:
//
// def get_expected_withdrawals(state: BeaconState) -> ExpectedWithdrawals:
// withdrawal_index = state.next_withdrawal_index
// withdrawals: List[Withdrawal] = []
//
// # [New in Gloas:EIP7732]
// # Get builder withdrawals
// builder_withdrawals, withdrawal_index, processed_builder_withdrawals_count = (
// get_builder_withdrawals(state, withdrawal_index, withdrawals)
// )
// withdrawals.extend(builder_withdrawals)
//
// # Get partial withdrawals
// partial_withdrawals, withdrawal_index, processed_partial_withdrawals_count = (
// get_pending_partial_withdrawals(state, withdrawal_index, withdrawals)
// )
// withdrawals.extend(partial_withdrawals)
//
// # [New in Gloas:EIP7732]
// # Get builders sweep withdrawals
// builders_sweep_withdrawals, withdrawal_index, processed_builders_sweep_count = (
// get_builders_sweep_withdrawals(state, withdrawal_index, withdrawals)
// )
// withdrawals.extend(builders_sweep_withdrawals)
//
// # Get validators sweep withdrawals
// validators_sweep_withdrawals, withdrawal_index, processed_validators_sweep_count = (
// get_validators_sweep_withdrawals(state, withdrawal_index, withdrawals)
// )
// withdrawals.extend(validators_sweep_withdrawals)
//
// return ExpectedWithdrawals(
// withdrawals,
// # [New in Gloas:EIP7732]
// processed_builder_withdrawals_count,
// processed_partial_withdrawals_count,
// # [New in Gloas:EIP7732]
// processed_builders_sweep_count,
// processed_validators_sweep_count,
// )
func (b *BeaconState) ExpectedWithdrawalsGloas() (state.ExpectedWithdrawalsGloasResult, error) {
if b.version < version.Gloas {
return state.ExpectedWithdrawalsGloasResult{}, errNotSupported("ExpectedWithdrawalsGloas", b.version)
}
b.lock.RLock()
defer b.lock.RUnlock()
cfg := params.BeaconConfig()
withdrawals := make([]*enginev1.Withdrawal, 0, cfg.MaxWithdrawalsPerPayload)
withdrawalIndex := b.nextWithdrawalIndex
withdrawalIndex, processedBuilderWithdrawalsCount, err := b.appendBuilderWithdrawals(withdrawalIndex, &withdrawals)
if err != nil {
return state.ExpectedWithdrawalsGloasResult{}, err
}
withdrawalIndex, processedPartialWithdrawalsCount, err := b.appendPendingPartialWithdrawals(withdrawalIndex, &withdrawals)
if err != nil {
return state.ExpectedWithdrawalsGloasResult{}, err
}
withdrawalIndex, nextBuilderIndex, err := b.appendBuildersSweepWithdrawals(withdrawalIndex, &withdrawals)
if err != nil {
return state.ExpectedWithdrawalsGloasResult{}, err
}
err = b.appendValidatorsSweepWithdrawals(withdrawalIndex, &withdrawals)
if err != nil {
return state.ExpectedWithdrawalsGloasResult{}, err
}
return state.ExpectedWithdrawalsGloasResult{
Withdrawals: withdrawals,
ProcessedBuilderWithdrawalsCount: processedBuilderWithdrawalsCount,
ProcessedPartialWithdrawalsCount: processedPartialWithdrawalsCount,
NextWithdrawalBuilderIndex: nextBuilderIndex,
}, nil
}
// appendBuilderWithdrawals returns builder pending withdrawals, the updated withdrawal index,
// and the processed count, following spec v1.7.0-alpha.2:
//
// def get_builder_withdrawals(state, withdrawal_index, prior_withdrawals):
// withdrawals_limit = MAX_WITHDRAWALS_PER_PAYLOAD - 1
// assert len(prior_withdrawals) <= withdrawals_limit
// processed_count = 0
// withdrawals = []
// for withdrawal in state.builder_pending_withdrawals:
// if len(prior_withdrawals + withdrawals) >= withdrawals_limit:
// break
// withdrawals.append(Withdrawal(
// index=withdrawal_index,
// validator_index=convert_builder_index_to_validator_index(withdrawal.builder_index),
// address=withdrawal.fee_recipient,
// amount=withdrawal.amount,
// ))
// withdrawal_index += 1
// processed_count += 1
// return withdrawals, withdrawal_index, processed_count
func (b *BeaconState) appendBuilderWithdrawals(withdrawalIndex uint64, withdrawals *[]*enginev1.Withdrawal) (uint64, uint64, error) {
cfg := params.BeaconConfig()
withdrawalsLimit := int(cfg.MaxWithdrawalsPerPayload - 1)
ws := *withdrawals
if len(ws) > withdrawalsLimit {
return withdrawalIndex, 0, fmt.Errorf("prior withdrawals length %d exceeds limit %d", len(ws), withdrawalsLimit)
}
var processedCount uint64
for _, w := range b.builderPendingWithdrawals {
if len(ws) >= withdrawalsLimit {
break
}
ws = append(ws, &enginev1.Withdrawal{
Index: withdrawalIndex,
ValidatorIndex: w.BuilderIndex.ToValidatorIndex(),
Address: w.FeeRecipient,
Amount: uint64(w.Amount),
})
withdrawalIndex++
processedCount++
}
*withdrawals = ws
return withdrawalIndex, processedCount, nil
}
// appendBuildersSweepWithdrawals returns builder sweep withdrawals, the updated withdrawal index,
// and the processed count, following spec v1.7.0-alpha.2:
//
// def get_builders_sweep_withdrawals(state, withdrawal_index, prior_withdrawals):
// epoch = get_current_epoch(state)
// builders_limit = min(len(state.builders), MAX_BUILDERS_PER_WITHDRAWALS_SWEEP)
// withdrawals_limit = MAX_WITHDRAWALS_PER_PAYLOAD - 1
// assert len(prior_withdrawals) <= withdrawals_limit
// processed_count = 0
// withdrawals = []
// builder_index = state.next_withdrawal_builder_index
// for _ in range(builders_limit):
// if len(prior_withdrawals + withdrawals) >= withdrawals_limit:
// break
// builder = state.builders[builder_index]
// if builder.withdrawable_epoch <= epoch and builder.balance > 0:
// withdrawals.append(Withdrawal(
// index=withdrawal_index,
// validator_index=convert_builder_index_to_validator_index(builder_index),
// address=builder.execution_address,
// amount=builder.balance,
// ))
// withdrawal_index += 1
// builder_index = BuilderIndex((builder_index + 1) % len(state.builders))
// processed_count += 1
// return withdrawals, withdrawal_index, processed_count
func (b *BeaconState) appendBuildersSweepWithdrawals(withdrawalIndex uint64, withdrawals *[]*enginev1.Withdrawal) (uint64, primitives.BuilderIndex, error) {
cfg := params.BeaconConfig()
withdrawalsLimit := int(cfg.MaxWithdrawalsPerPayload - 1)
if len(*withdrawals) > withdrawalsLimit {
return withdrawalIndex, 0, fmt.Errorf("prior withdrawals length %d exceeds limit %d", len(*withdrawals), withdrawalsLimit)
}
ws := *withdrawals
buildersLimit := min(len(b.builders), int(cfg.MaxBuildersPerWithdrawalsSweep))
builderIndex := b.nextWithdrawalBuilderIndex
epoch := slots.ToEpoch(b.slot)
for range buildersLimit {
if len(ws) >= withdrawalsLimit {
break
}
builder := b.builders[builderIndex]
if builder != nil && builder.WithdrawableEpoch <= epoch && builder.Balance > 0 {
ws = append(ws, &enginev1.Withdrawal{
Index: withdrawalIndex,
ValidatorIndex: builderIndex.ToValidatorIndex(),
Address: builder.ExecutionAddress,
Amount: uint64(builder.Balance),
})
withdrawalIndex++
}
builderIndex = primitives.BuilderIndex((uint64(builderIndex) + 1) % uint64(len(b.builders)))
}
*withdrawals = ws
return withdrawalIndex, builderIndex, nil
}

View File

@@ -1,26 +1,27 @@
package state_native_test
package state_native
import (
"bytes"
"testing"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
"github.com/OffchainLabs/prysm/v7/time/slots"
)
func TestLatestBlockHash(t *testing.T) {
t.Run("returns error before gloas", func(t *testing.T) {
st, _ := util.DeterministicGenesisState(t, 1)
st := &BeaconState{version: version.Fulu}
_, err := st.LatestBlockHash()
require.ErrorContains(t, "is not supported", err)
})
t.Run("returns zero hash when unset", func(t *testing.T) {
st, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{})
st, err := InitializeFromProtoGloas(&ethpb.BeaconStateGloas{})
require.NoError(t, err)
got, err := st.LatestBlockHash()
@@ -33,7 +34,7 @@ func TestLatestBlockHash(t *testing.T) {
var want [32]byte
copy(want[:], hashBytes)
st, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
st, err := InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
LatestBlockHash: hashBytes,
})
require.NoError(t, err)
@@ -46,17 +47,14 @@ func TestLatestBlockHash(t *testing.T) {
func TestBuilderPubkey(t *testing.T) {
t.Run("returns error before gloas", func(t *testing.T) {
stIface, _ := util.DeterministicGenesisState(t, 1)
native, ok := stIface.(*state_native.BeaconState)
require.Equal(t, true, ok)
_, err := native.BuilderPubkey(0)
st := &BeaconState{version: version.Fulu}
_, err := st.BuilderPubkey(0)
require.ErrorContains(t, "is not supported", err)
})
t.Run("returns pubkey copy", func(t *testing.T) {
pubkey := bytes.Repeat([]byte{0xAA}, 48)
stIface, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
stIface, err := InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
Builders: []*ethpb.Builder{
{
Pubkey: pubkey,
@@ -80,12 +78,12 @@ func TestBuilderPubkey(t *testing.T) {
})
t.Run("out of range returns error", func(t *testing.T) {
stIface, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
stIface, err := InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
Builders: []*ethpb.Builder{},
})
require.NoError(t, err)
st := stIface.(*state_native.BeaconState)
st := stIface.(*BeaconState)
_, err = st.BuilderPubkey(1)
require.ErrorContains(t, "out of range", err)
})
@@ -93,7 +91,7 @@ func TestBuilderPubkey(t *testing.T) {
func TestBuilderHelpers(t *testing.T) {
t.Run("is active builder", func(t *testing.T) {
st, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
st, err := InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
Builders: []*ethpb.Builder{
{
Balance: 10,
@@ -120,7 +118,7 @@ func TestBuilderHelpers(t *testing.T) {
},
FinalizedCheckpoint: &ethpb.Checkpoint{Epoch: 2},
}
stInactive, err := state_native.InitializeFromProtoGloas(stProto)
stInactive, err := InitializeFromProtoGloas(stProto)
require.NoError(t, err)
active, err = stInactive.IsActiveBuilder(0)
@@ -129,7 +127,7 @@ func TestBuilderHelpers(t *testing.T) {
})
t.Run("can builder cover bid", func(t *testing.T) {
stIface, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
stIface, err := InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
Builders: []*ethpb.Builder{
{
Balance: primitives.Gwei(params.BeaconConfig().MinDepositAmount + 50),
@@ -147,7 +145,7 @@ func TestBuilderHelpers(t *testing.T) {
})
require.NoError(t, err)
st := stIface.(*state_native.BeaconState)
st := stIface.(*BeaconState)
ok, err := st.CanBuilderCoverBid(0, 20)
require.NoError(t, err)
require.Equal(t, true, ok)
@@ -159,10 +157,245 @@ func TestBuilderHelpers(t *testing.T) {
}
func TestBuilderPendingPayments_UnsupportedVersion(t *testing.T) {
stIface, err := state_native.InitializeFromProtoElectra(&ethpb.BeaconStateElectra{})
stIface, err := InitializeFromProtoElectra(&ethpb.BeaconStateElectra{})
require.NoError(t, err)
st := stIface.(*state_native.BeaconState)
st := stIface.(*BeaconState)
_, err = st.BuilderPendingPayments()
require.ErrorContains(t, "BuilderPendingPayments", err)
}
func TestIsParentBlockFull(t *testing.T) {
t.Run("returns error before gloas", func(t *testing.T) {
st := &BeaconState{version: version.Fulu}
_, err := st.IsParentBlockFull()
require.ErrorContains(t, "is not supported", err)
})
t.Run("returns false when bid is nil", func(t *testing.T) {
st := &BeaconState{version: version.Gloas}
got, err := st.IsParentBlockFull()
require.NoError(t, err)
require.Equal(t, false, got)
})
t.Run("returns true when hashes match", func(t *testing.T) {
hash := bytes.Repeat([]byte{0xAB}, 32)
st := &BeaconState{
version: version.Gloas,
latestExecutionPayloadBid: &ethpb.ExecutionPayloadBid{
BlockHash: hash,
},
latestBlockHash: hash,
}
got, err := st.IsParentBlockFull()
require.NoError(t, err)
require.Equal(t, true, got)
})
t.Run("returns false when hashes differ", func(t *testing.T) {
hash := bytes.Repeat([]byte{0xAB}, 32)
other := bytes.Repeat([]byte{0xCD}, 32)
st := &BeaconState{
version: version.Gloas,
latestExecutionPayloadBid: &ethpb.ExecutionPayloadBid{
BlockHash: hash,
},
latestBlockHash: other,
}
got, err := st.IsParentBlockFull()
require.NoError(t, err)
require.Equal(t, false, got)
})
}
func TestAppendBuilderWithdrawals(t *testing.T) {
t.Run("errors when prior withdrawals exceed limit", func(t *testing.T) {
st := &BeaconState{}
limit := params.BeaconConfig().MaxWithdrawalsPerPayload - 1
withdrawals := make([]*enginev1.Withdrawal, limit+1)
nextIndex, processed, err := st.appendBuilderWithdrawals(5, &withdrawals)
require.ErrorContains(t, "exceeds limit", err)
require.Equal(t, uint64(5), nextIndex)
require.Equal(t, uint64(0), processed)
require.Equal(t, int(limit+1), len(withdrawals))
})
t.Run("appends builder withdrawals and increments index", func(t *testing.T) {
st := &BeaconState{
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{
{BuilderIndex: 1, FeeRecipient: []byte{0x01}, Amount: 11},
{BuilderIndex: 2, FeeRecipient: []byte{0x02}, Amount: 22},
{BuilderIndex: 3, FeeRecipient: []byte{0x03}, Amount: 33},
},
}
withdrawals := []*enginev1.Withdrawal{
{Index: 7, ValidatorIndex: 9, Address: []byte{0xAA}, Amount: 99},
}
nextIndex, processed, err := st.appendBuilderWithdrawals(10, &withdrawals)
require.NoError(t, err)
require.Equal(t, uint64(13), nextIndex)
require.Equal(t, uint64(3), processed)
require.Equal(t, 4, len(withdrawals))
require.DeepEqual(t, &enginev1.Withdrawal{
Index: 10,
ValidatorIndex: primitives.BuilderIndex(1).ToValidatorIndex(),
Address: []byte{0x01},
Amount: 11,
}, withdrawals[1])
require.DeepEqual(t, &enginev1.Withdrawal{
Index: 11,
ValidatorIndex: primitives.BuilderIndex(2).ToValidatorIndex(),
Address: []byte{0x02},
Amount: 22,
}, withdrawals[2])
require.DeepEqual(t, &enginev1.Withdrawal{
Index: 12,
ValidatorIndex: primitives.BuilderIndex(3).ToValidatorIndex(),
Address: []byte{0x03},
Amount: 33,
}, withdrawals[3])
})
t.Run("respects per-payload limit", func(t *testing.T) {
limit := params.BeaconConfig().MaxWithdrawalsPerPayload - 1
st := &BeaconState{
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{
{BuilderIndex: 4, FeeRecipient: []byte{0x04}, Amount: 44},
{BuilderIndex: 5, FeeRecipient: []byte{0x05}, Amount: 55},
},
}
withdrawals := make([]*enginev1.Withdrawal, limit-1)
nextIndex, processed, err := st.appendBuilderWithdrawals(20, &withdrawals)
require.NoError(t, err)
require.Equal(t, uint64(21), nextIndex)
require.Equal(t, uint64(1), processed)
require.Equal(t, int(limit), len(withdrawals))
require.DeepEqual(t, &enginev1.Withdrawal{
Index: 20,
ValidatorIndex: primitives.BuilderIndex(4).ToValidatorIndex(),
Address: []byte{0x04},
Amount: 44,
}, withdrawals[len(withdrawals)-1])
})
t.Run("does not append when already at limit", func(t *testing.T) {
limit := params.BeaconConfig().MaxWithdrawalsPerPayload - 1
if limit == 0 {
t.Skip("withdrawals limit too small")
}
st := &BeaconState{
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{
{BuilderIndex: 6, FeeRecipient: []byte{0x06}, Amount: 66},
},
}
withdrawals := make([]*enginev1.Withdrawal, limit)
nextIndex, processed, err := st.appendBuilderWithdrawals(30, &withdrawals)
require.NoError(t, err)
require.Equal(t, uint64(30), nextIndex)
require.Equal(t, uint64(0), processed)
require.Equal(t, int(limit), len(withdrawals))
})
}
func TestAppendBuildersSweepWithdrawals(t *testing.T) {
t.Run("errors when prior withdrawals exceed limit", func(t *testing.T) {
st := &BeaconState{}
limit := params.BeaconConfig().MaxWithdrawalsPerPayload - 1
withdrawals := make([]*enginev1.Withdrawal, limit+1)
nextIndex, nextBuilderIndex, err := st.appendBuildersSweepWithdrawals(5, &withdrawals)
require.ErrorContains(t, "exceeds limit", err)
require.Equal(t, uint64(5), nextIndex)
require.Equal(t, primitives.BuilderIndex(0), nextBuilderIndex)
require.Equal(t, int(limit+1), len(withdrawals))
})
t.Run("appends eligible builders, skips ineligible", func(t *testing.T) {
epoch := primitives.Epoch(3)
st := &BeaconState{
slot: slots.UnsafeEpochStart(epoch),
nextWithdrawalBuilderIndex: 2,
builders: []*ethpb.Builder{
{ExecutionAddress: []byte{0x01}, Balance: 0, WithdrawableEpoch: epoch},
{ExecutionAddress: []byte{0x02}, Balance: 10, WithdrawableEpoch: epoch + 1},
{ExecutionAddress: []byte{0x03}, Balance: 20, WithdrawableEpoch: epoch},
},
}
withdrawals := []*enginev1.Withdrawal{}
nextIndex, nextBuilderIndex, err := st.appendBuildersSweepWithdrawals(100, &withdrawals)
require.NoError(t, err)
require.Equal(t, uint64(101), nextIndex)
require.Equal(t, primitives.BuilderIndex(2), nextBuilderIndex)
require.Equal(t, 1, len(withdrawals))
require.DeepEqual(t, &enginev1.Withdrawal{
Index: 100,
ValidatorIndex: primitives.BuilderIndex(2).ToValidatorIndex(),
Address: []byte{0x03},
Amount: 20,
}, withdrawals[0])
})
t.Run("respects max builders per sweep", func(t *testing.T) {
cfg := params.BeaconConfig()
max := int(cfg.MaxBuildersPerWithdrawalsSweep)
epoch := primitives.Epoch(1)
builders := make([]*ethpb.Builder, max+2)
for i := range builders {
builders[i] = &ethpb.Builder{
ExecutionAddress: []byte{byte(i + 1)},
Balance: 1,
WithdrawableEpoch: epoch,
}
}
start := len(builders) - 1
st := &BeaconState{
slot: slots.UnsafeEpochStart(epoch),
nextWithdrawalBuilderIndex: primitives.BuilderIndex(start),
builders: builders,
}
withdrawals := []*enginev1.Withdrawal{}
nextIndex, nextBuilderIndex, err := st.appendBuildersSweepWithdrawals(7, &withdrawals)
require.NoError(t, err)
limit := int(cfg.MaxWithdrawalsPerPayload - 1)
expectedCount := min(max, limit)
require.Equal(t, uint64(7)+uint64(expectedCount), nextIndex)
require.Equal(t, expectedCount, len(withdrawals))
expectedNext := primitives.BuilderIndex((uint64(start) + uint64(expectedCount)) % uint64(len(builders)))
require.Equal(t, expectedNext, nextBuilderIndex)
})
t.Run("stops when payload limit reached", func(t *testing.T) {
cfg := params.BeaconConfig()
limit := cfg.MaxWithdrawalsPerPayload - 1
if limit < 1 {
t.Skip("withdrawals limit too small")
}
epoch := primitives.Epoch(2)
builders := []*ethpb.Builder{
{ExecutionAddress: []byte{0x0A}, Balance: 3, WithdrawableEpoch: epoch},
{ExecutionAddress: []byte{0x0B}, Balance: 4, WithdrawableEpoch: epoch},
}
st := &BeaconState{
slot: slots.UnsafeEpochStart(epoch),
nextWithdrawalBuilderIndex: 0,
builders: builders,
}
withdrawals := make([]*enginev1.Withdrawal, limit)
nextIndex, nextBuilderIndex, err := st.appendBuildersSweepWithdrawals(20, &withdrawals)
require.NoError(t, err)
require.Equal(t, uint64(20), nextIndex)
require.Equal(t, int(limit), len(withdrawals))
require.Equal(t, primitives.BuilderIndex(0), nextBuilderIndex)
})
}

View File

@@ -133,11 +133,22 @@ func (b *BeaconState) appendPendingPartialWithdrawals(withdrawalIndex uint64, wi
return withdrawalIndex, 0, nil
}
cfg := params.BeaconConfig()
withdrawalsLimit := min(
len(*withdrawals)+int(cfg.MaxPendingPartialsPerWithdrawalsSweep),
int(cfg.MaxWithdrawalsPerPayload-1),
)
if len(*withdrawals) > withdrawalsLimit {
return withdrawalIndex, 0, fmt.Errorf("prior withdrawals length %d exceeds limit %d", len(*withdrawals), withdrawalsLimit)
}
ws := *withdrawals
epoch := slots.ToEpoch(b.slot)
var processedPartialWithdrawalsCount uint64
for _, w := range b.pendingPartialWithdrawals {
if w.WithdrawableEpoch > epoch || len(ws) >= int(params.BeaconConfig().MaxPendingPartialsPerWithdrawalsSweep) {
isWithdrawable := w.WithdrawableEpoch <= epoch
hasReachedLimit := len(ws) >= withdrawalsLimit
if !isWithdrawable || hasReachedLimit {
break
}
@@ -149,7 +160,7 @@ func (b *BeaconState) appendPendingPartialWithdrawals(withdrawalIndex uint64, wi
if err != nil {
return withdrawalIndex, 0, fmt.Errorf("could not retrieve balance at index %d: %w", w.Index, err)
}
hasSufficientEffectiveBalance := v.EffectiveBalance() >= params.BeaconConfig().MinActivationBalance
hasSufficientEffectiveBalance := v.EffectiveBalance() >= cfg.MinActivationBalance
var totalWithdrawn uint64
for _, wi := range ws {
if wi.ValidatorIndex == w.Index {
@@ -160,9 +171,9 @@ func (b *BeaconState) appendPendingPartialWithdrawals(withdrawalIndex uint64, wi
if err != nil {
return withdrawalIndex, 0, errors.Wrapf(err, "failed to subtract balance %d with total withdrawn %d", vBal, totalWithdrawn)
}
hasExcessBalance := balance > params.BeaconConfig().MinActivationBalance
if v.ExitEpoch() == params.BeaconConfig().FarFutureEpoch && hasSufficientEffectiveBalance && hasExcessBalance {
amount := min(balance-params.BeaconConfig().MinActivationBalance, w.Amount)
hasExcessBalance := balance > cfg.MinActivationBalance
if v.ExitEpoch() == cfg.FarFutureEpoch && hasSufficientEffectiveBalance && hasExcessBalance {
amount := min(balance-cfg.MinActivationBalance, w.Amount)
ws = append(ws, &enginev1.Withdrawal{
Index: withdrawalIndex,
ValidatorIndex: w.Index,
@@ -183,7 +194,7 @@ func (b *BeaconState) appendValidatorsSweepWithdrawals(withdrawalIndex uint64, w
validatorIndex := b.nextWithdrawalValidatorIndex
validatorsLen := b.validatorsLen()
epoch := slots.ToEpoch(b.slot)
bound := min(uint64(validatorsLen), params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
bound := min(validatorsLen, int(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep))
for range bound {
val, err := b.validatorAtIndexReadOnly(validatorIndex)
if err != nil {
@@ -222,7 +233,7 @@ func (b *BeaconState) appendValidatorsSweepWithdrawals(withdrawalIndex uint64, w
})
withdrawalIndex++
}
if uint64(len(ws)) == params.BeaconConfig().MaxWithdrawalsPerPayload {
if len(ws) == int(params.BeaconConfig().MaxWithdrawalsPerPayload) {
break
}
validatorIndex += 1

View File

@@ -1,6 +1,7 @@
package state_native
import (
"errors"
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
@@ -8,8 +9,10 @@ import (
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
pkgerrors "github.com/pkg/errors"
)
// RotateBuilderPendingPayments rotates the queue by dropping slots per epoch payments from the
@@ -161,3 +164,154 @@ func (b *BeaconState) UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val
b.markFieldAsDirty(types.ExecutionPayloadAvailability)
return nil
}
// SetPayloadExpectedWithdrawals stores the expected withdrawals for the next payload.
func (b *BeaconState) SetPayloadExpectedWithdrawals(withdrawals []*enginev1.Withdrawal) error {
if b.version < version.Gloas {
return errNotSupported("SetPayloadExpectedWithdrawals", b.version)
}
b.lock.Lock()
defer b.lock.Unlock()
b.payloadExpectedWithdrawals = withdrawals
b.markFieldAsDirty(types.PayloadExpectedWithdrawals)
return nil
}
// DequeueBuilderPendingWithdrawals removes processed builder withdrawals from the front of the queue.
func (b *BeaconState) DequeueBuilderPendingWithdrawals(n uint64) error {
if b.version < version.Gloas {
return errNotSupported("DequeueBuilderPendingWithdrawals", b.version)
}
if n == 0 {
return nil
}
b.lock.Lock()
defer b.lock.Unlock()
if n > uint64(len(b.builderPendingWithdrawals)) {
return errors.New("cannot dequeue more builder withdrawals than are in the queue")
}
if b.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs() > 1 {
withdrawals := make([]*ethpb.BuilderPendingWithdrawal, len(b.builderPendingWithdrawals))
copy(withdrawals, b.builderPendingWithdrawals)
b.builderPendingWithdrawals = withdrawals
b.sharedFieldReferences[types.BuilderPendingWithdrawals].MinusRef()
b.sharedFieldReferences[types.BuilderPendingWithdrawals] = stateutil.NewRef(1)
}
b.builderPendingWithdrawals = b.builderPendingWithdrawals[n:]
b.markFieldAsDirty(types.BuilderPendingWithdrawals)
b.rebuildTrie[types.BuilderPendingWithdrawals] = true
return nil
}
// SetNextWithdrawalBuilderIndex sets the next builder index for the withdrawals sweep.
func (b *BeaconState) SetNextWithdrawalBuilderIndex(index primitives.BuilderIndex) error {
if b.version < version.Gloas {
return errNotSupported("SetNextWithdrawalBuilderIndex", b.version)
}
b.lock.Lock()
defer b.lock.Unlock()
b.nextWithdrawalBuilderIndex = index
b.markFieldAsDirty(types.NextWithdrawalBuilderIndex)
return nil
}
// DecreaseWithdrawalBalances applies withdrawal balance decreases for validators and builders.
// This method holds the state lock for the full batch to avoid lock churn.
func (b *BeaconState) DecreaseWithdrawalBalances(withdrawals []*enginev1.Withdrawal) error {
if b.version < version.Gloas {
return errNotSupported("DecreaseWithdrawalBalances", b.version)
}
if len(withdrawals) == 0 {
return nil
}
b.lock.Lock()
defer b.lock.Unlock()
var (
balanceIndices []uint64
builderIndices []uint64
)
for _, withdrawal := range withdrawals {
if withdrawal == nil {
return errors.New("withdrawal is nil")
}
if withdrawal.Amount == 0 {
continue
}
if withdrawal.ValidatorIndex.IsBuilderIndex() {
builderIndex := withdrawal.ValidatorIndex.ToBuilderIndex()
if err := b.decreaseBuilderBalanceLockFree(builderIndex, withdrawal.Amount); err != nil {
return err
}
builderIndices = append(builderIndices, uint64(builderIndex))
continue
}
balAtIdx, err := b.balanceAtIndex(withdrawal.ValidatorIndex)
if err != nil {
return err
}
newBal := decreaseBalanceWithVal(balAtIdx, withdrawal.Amount)
if err := b.balancesMultiValue.UpdateAt(b, uint64(withdrawal.ValidatorIndex), newBal); err != nil {
return pkgerrors.Wrap(err, "could not update balances")
}
balanceIndices = append(balanceIndices, uint64(withdrawal.ValidatorIndex))
}
if len(balanceIndices) > 0 {
b.markFieldAsDirty(types.Balances)
b.addDirtyIndices(types.Balances, balanceIndices)
}
if len(builderIndices) > 0 {
b.markFieldAsDirty(types.Builders)
b.addDirtyIndices(types.Builders, builderIndices)
}
return nil
}
func (b *BeaconState) decreaseBuilderBalanceLockFree(builderIndex primitives.BuilderIndex, amount uint64) error {
idx := uint64(builderIndex)
if idx >= uint64(len(b.builders)) {
return fmt.Errorf("builder index %d out of range (len=%d)", builderIndex, len(b.builders))
}
if b.sharedFieldReferences[types.Builders].Refs() > 1 {
builders := make([]*ethpb.Builder, len(b.builders))
copy(builders, b.builders)
b.builders = builders
b.sharedFieldReferences[types.Builders].MinusRef()
b.sharedFieldReferences[types.Builders] = stateutil.NewRef(1)
}
builder := b.builders[idx]
bal := uint64(builder.Balance)
if amount >= bal {
builder.Balance = 0
} else {
builder.Balance = primitives.Gwei(bal - amount)
}
return nil
}
func decreaseBalanceWithVal(currBalance, delta uint64) uint64 {
if delta > currBalance {
return 0
}
return currBalance - delta
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/testing/require"
@@ -229,6 +230,235 @@ func TestRotateBuilderPendingPayments_UnsupportedVersion(t *testing.T) {
require.ErrorContains(t, "RotateBuilderPendingPayments", err)
}
func TestSetPayloadExpectedWithdrawals(t *testing.T) {
t.Run("previous fork returns expected error", func(t *testing.T) {
st := &BeaconState{version: version.Fulu}
err := st.SetPayloadExpectedWithdrawals([]*enginev1.Withdrawal{})
require.ErrorContains(t, "SetPayloadExpectedWithdrawals", err)
})
t.Run("allows nil input and marks dirty", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
}
require.NoError(t, st.SetPayloadExpectedWithdrawals(nil))
require.Equal(t, true, st.payloadExpectedWithdrawals == nil)
require.Equal(t, true, st.dirtyFields[types.PayloadExpectedWithdrawals])
})
t.Run("sets and marks dirty", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
payloadExpectedWithdrawals: []*enginev1.Withdrawal{{Index: 1}, {Index: 2}},
}
withdrawals := []*enginev1.Withdrawal{{Index: 3}}
require.NoError(t, st.SetPayloadExpectedWithdrawals(withdrawals))
require.DeepEqual(t, withdrawals, st.payloadExpectedWithdrawals)
require.Equal(t, true, st.dirtyFields[types.PayloadExpectedWithdrawals])
})
}
func TestDecreaseWithdrawalBalances(t *testing.T) {
t.Run("previous fork returns expected error", func(t *testing.T) {
st := &BeaconState{version: version.Fulu}
err := st.DecreaseWithdrawalBalances([]*enginev1.Withdrawal{{}})
require.ErrorContains(t, "DecreaseWithdrawalBalances", err)
})
t.Run("rejects nil withdrawal", func(t *testing.T) {
st := &BeaconState{version: version.Gloas}
err := st.DecreaseWithdrawalBalances([]*enginev1.Withdrawal{nil})
require.ErrorContains(t, "withdrawal is nil", err)
})
t.Run("no-op on empty input", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
dirtyIndices: make(map[types.FieldIndex][]uint64),
rebuildTrie: make(map[types.FieldIndex]bool),
}
require.NoError(t, st.DecreaseWithdrawalBalances(nil))
require.Equal(t, 0, len(st.dirtyFields))
require.Equal(t, 0, len(st.dirtyIndices))
})
t.Run("updates validator and builder balances and tracks dirty indices", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
dirtyIndices: make(map[types.FieldIndex][]uint64),
rebuildTrie: make(map[types.FieldIndex]bool),
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.Builders: stateutil.NewRef(1),
},
balancesMultiValue: NewMultiValueBalances([]uint64{100, 200, 300}),
builders: []*ethpb.Builder{
{Balance: 1000},
{Balance: 50},
},
}
withdrawals := []*enginev1.Withdrawal{
{ValidatorIndex: primitives.ValidatorIndex(1), Amount: 20},
{ValidatorIndex: primitives.BuilderIndex(1).ToValidatorIndex(), Amount: 30},
{ValidatorIndex: primitives.ValidatorIndex(2), Amount: 400},
{ValidatorIndex: primitives.BuilderIndex(0).ToValidatorIndex(), Amount: 2000},
{ValidatorIndex: primitives.ValidatorIndex(0), Amount: 0},
}
require.NoError(t, st.DecreaseWithdrawalBalances(withdrawals))
require.DeepEqual(t, []uint64{100, 180, 0}, st.Balances())
require.Equal(t, primitives.Gwei(0), st.builders[0].Balance)
require.Equal(t, primitives.Gwei(20), st.builders[1].Balance)
require.Equal(t, true, st.dirtyFields[types.Balances])
require.Equal(t, true, st.dirtyFields[types.Builders])
require.DeepEqual(t, []uint64{1, 2}, st.dirtyIndices[types.Balances])
require.DeepEqual(t, []uint64{1, 0}, st.dirtyIndices[types.Builders])
})
t.Run("returns error on builder index out of range", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
dirtyIndices: make(map[types.FieldIndex][]uint64),
rebuildTrie: make(map[types.FieldIndex]bool),
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.Builders: stateutil.NewRef(1),
},
builders: []*ethpb.Builder{{Balance: 5}},
}
err := st.DecreaseWithdrawalBalances([]*enginev1.Withdrawal{
{ValidatorIndex: primitives.BuilderIndex(2).ToValidatorIndex(), Amount: 1},
})
require.ErrorContains(t, "out of range", err)
require.Equal(t, false, st.dirtyFields[types.Builders])
require.Equal(t, 0, len(st.dirtyIndices[types.Builders]))
})
}
func TestDequeueBuilderPendingWithdrawals(t *testing.T) {
t.Run("previous fork returns expected error", func(t *testing.T) {
st := &BeaconState{version: version.Fulu}
err := st.DequeueBuilderPendingWithdrawals(1)
require.ErrorContains(t, "DequeueBuilderPendingWithdrawals", err)
})
t.Run("returns error when dequeueing more than length", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.BuilderPendingWithdrawals: stateutil.NewRef(1),
},
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{{Amount: 1}},
}
err := st.DequeueBuilderPendingWithdrawals(2)
require.ErrorContains(t, "cannot dequeue more builder withdrawals", err)
require.Equal(t, 1, len(st.builderPendingWithdrawals))
require.Equal(t, false, st.dirtyFields[types.BuilderPendingWithdrawals])
})
t.Run("no-op on zero", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.BuilderPendingWithdrawals: stateutil.NewRef(1),
},
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{{Amount: 1}},
}
require.NoError(t, st.DequeueBuilderPendingWithdrawals(0))
require.Equal(t, 1, len(st.builderPendingWithdrawals))
require.Equal(t, false, st.dirtyFields[types.BuilderPendingWithdrawals])
require.Equal(t, false, st.rebuildTrie[types.BuilderPendingWithdrawals])
})
t.Run("dequeues and marks dirty", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.BuilderPendingWithdrawals: stateutil.NewRef(1),
},
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{
{Amount: 1},
{Amount: 2},
{Amount: 3},
},
rebuildTrie: make(map[types.FieldIndex]bool),
}
require.NoError(t, st.DequeueBuilderPendingWithdrawals(2))
require.Equal(t, 1, len(st.builderPendingWithdrawals))
require.Equal(t, primitives.Gwei(3), st.builderPendingWithdrawals[0].Amount)
require.Equal(t, true, st.dirtyFields[types.BuilderPendingWithdrawals])
require.Equal(t, true, st.rebuildTrie[types.BuilderPendingWithdrawals])
})
t.Run("copy-on-write preserves shared state", func(t *testing.T) {
sharedRef := stateutil.NewRef(2)
sharedWithdrawals := []*ethpb.BuilderPendingWithdrawal{
{Amount: 1},
{Amount: 2},
{Amount: 3},
}
st1 := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.BuilderPendingWithdrawals: sharedRef,
},
builderPendingWithdrawals: sharedWithdrawals,
rebuildTrie: make(map[types.FieldIndex]bool),
}
st2 := &BeaconState{
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.BuilderPendingWithdrawals: sharedRef,
},
builderPendingWithdrawals: sharedWithdrawals,
}
require.NoError(t, st1.DequeueBuilderPendingWithdrawals(2))
require.Equal(t, primitives.Gwei(3), st1.builderPendingWithdrawals[0].Amount)
require.Equal(t, 3, len(st2.builderPendingWithdrawals))
require.Equal(t, primitives.Gwei(1), st2.builderPendingWithdrawals[0].Amount)
require.Equal(t, uint(1), st1.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs())
require.Equal(t, uint(1), st2.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs())
})
}
func TestSetNextWithdrawalBuilderIndex(t *testing.T) {
t.Run("previous fork returns expected error", func(t *testing.T) {
st := &BeaconState{version: version.Fulu}
err := st.SetNextWithdrawalBuilderIndex(1)
require.ErrorContains(t, "SetNextWithdrawalBuilderIndex", err)
})
t.Run("sets and marks dirty", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
}
require.NoError(t, st.SetNextWithdrawalBuilderIndex(7))
require.Equal(t, primitives.BuilderIndex(7), st.nextWithdrawalBuilderIndex)
require.Equal(t, true, st.dirtyFields[types.NextWithdrawalBuilderIndex])
})
}
func TestAppendBuilderPendingWithdrawal_CopyOnWrite(t *testing.T) {
wd := &ethpb.BuilderPendingWithdrawal{
FeeRecipient: make([]byte, 20),

View File

@@ -1027,10 +1027,10 @@ func TestGetVerifyingStateEdgeCases(t *testing.T) {
sc: signatureCache,
sr: &mockStateByRooter{sbr: sbrErrorIfCalled(t)}, // Should not be called
hsp: &mockHeadStateProvider{
headRoot: parentRoot[:], // Same as parent
headSlot: 32, // Epoch 1
headState: fuluState.Copy(), // HeadState (not ReadOnly) for ProcessSlots
headStateReadOnly: nil, // Should not use ReadOnly path
headRoot: parentRoot[:], // Same as parent
headSlot: 32, // Epoch 1
headState: fuluState.Copy(), // HeadState (not ReadOnly) for ProcessSlots
headStateReadOnly: nil, // Should not use ReadOnly path
},
fc: &mockForkchoicer{
// Return same root for both to simulate same chain
@@ -1045,8 +1045,8 @@ func TestGetVerifyingStateEdgeCases(t *testing.T) {
// Wrap to detect HeadState call
originalHsp := initializer.shared.hsp.(*mockHeadStateProvider)
wrappedHsp := &mockHeadStateProvider{
headRoot: originalHsp.headRoot,
headSlot: originalHsp.headSlot,
headRoot: originalHsp.headRoot,
headSlot: originalHsp.headSlot,
headState: originalHsp.headState,
}
initializer.shared.hsp = &headStateCallTracker{

View File

@@ -1,3 +0,0 @@
### Added
- Set beacon node options after reading the config file.

View File

@@ -1,3 +0,0 @@
### Fixed
- Fixed a bug where `cmd/beacon-chain/execution` was being ignored by `hack/gen-logs.sh` due to a `.gitignore` rule.

View File

@@ -1,3 +0,0 @@
### Changed
- Fixed the logging issue described in #16314.

View File

@@ -1,7 +0,0 @@
### Changed
- gRPC fallback now matches rest api implementation and will also check and connect to only synced nodes.
### Removed
- gRPC resolver for load balancing, the new implementation matches rest api's so we should remove the resolver so it's handled the same way for consistency.

View File

@@ -1,11 +0,0 @@
### Ignored
- moved finding healthy node logic to connection provider and other various cleanup on naming.
### Changed
- Improved node fallback logs.
### Fixed
- a potential race condition when switching hosts quickly and reconnecting to same host on an old connection.

View File

@@ -1,3 +0,0 @@
### Changed
- Improved integrations with ethspecify so specrefs can be used throughout the codebase.

View File

@@ -1,2 +0,0 @@
### Ignored
- Remove unused map in forkchoice.

View File

@@ -1,3 +0,0 @@
### Ignored
- Updated golangci to run lint on tests too.

View File

@@ -1,3 +0,0 @@
### Ignored
- Add handy documentation for SSZ Query package (`encoding/ssz/query`).

View File

@@ -0,0 +1,2 @@
### Added
- add modified process withdrawals for gloas

View File

@@ -1,2 +0,0 @@
### Ignored
- Run go fmt

View File

@@ -1,9 +1,5 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package execution
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "cmd/beacon-chain/execution")
var log = logrus.WithField("prefix", "execution")

View File

@@ -356,11 +356,6 @@ var (
Usage: "A comma-separated list of exponents (of 2) in decreasing order, defining the state diff hierarchy levels. The last exponent must be greater than or equal to 5.",
Value: cli.NewIntSlice(21, 18, 16, 13, 11, 9, 5),
}
// StateDiffValidateOnStartup validates state diff data on startup.
StateDiffValidateOnStartup = &cli.BoolFlag{
Name: "disable-hdiff-validate-on-startup",
Usage: "Disables state-diff validation on startup (enabled by default).",
}
// DisableEphemeralLogFile disables the 24 hour debug log file.
DisableEphemeralLogFile = &cli.BoolFlag{
Name: "disable-ephemeral-log-file",

View File

@@ -9,25 +9,24 @@ import (
"github.com/urfave/cli/v2"
)
const MaxStateDiffExponent = 30
const maxStateDiffExponents = 30
// GlobalFlags specifies all the global flags for the
// beacon node.
type GlobalFlags struct {
StateDiffValidateOnStartup bool
Supernode bool
DisableGetBlobsV2 bool
SemiSupernode bool
SubscribeToAllSubnets bool
BlobBatchLimitBurstFactor int
DataColumnBatchLimit int
BlockBatchLimit int
MaxConcurrentDials int
MinimumPeersPerSubnet int
Supernode bool
SemiSupernode bool
DisableGetBlobsV2 bool
MinimumSyncPeers int
DataColumnBatchLimitBurstFactor int
MinimumPeersPerSubnet int
MaxConcurrentDials int
BlockBatchLimit int
BlockBatchLimitBurstFactor int
BlobBatchLimit int
BlobBatchLimitBurstFactor int
DataColumnBatchLimit int
DataColumnBatchLimitBurstFactor int
StateDiffExponents []int
}
@@ -81,7 +80,6 @@ func ConfigureGlobalFlags(ctx *cli.Context) error {
// State-diff-exponents
cfg.StateDiffExponents = ctx.IntSlice(StateDiffExponents.Name)
cfg.StateDiffValidateOnStartup = !ctx.Bool(StateDiffValidateOnStartup.Name)
if features.Get().EnableStateDiff {
if err := validateStateDiffExponents(cfg.StateDiffExponents); err != nil {
return err
@@ -90,9 +88,6 @@ func ConfigureGlobalFlags(ctx *cli.Context) error {
if ctx.IsSet(StateDiffExponents.Name) {
log.Warn("--state-diff-exponents is set but --enable-state-diff is not; the value will be ignored.")
}
if ctx.IsSet(StateDiffValidateOnStartup.Name) {
log.Warn("--disable-hdiff-validate-on-startup is set but --enable-state-diff is not; the value will be ignored.")
}
}
cfg.BlockBatchLimit = ctx.Int(BlockBatchLimit.Name)
@@ -137,7 +132,7 @@ func validateStateDiffExponents(exponents []int) error {
if exponents[length-1] < 5 {
return errors.New("the last state diff exponent must be at least 5")
}
prev := MaxStateDiffExponent + 1
prev := maxStateDiffExponents + 1
for _, exp := range exponents {
if exp >= prev {
return errors.New("state diff exponents must be in strictly decreasing order, and each exponent must be <= 30")

View File

@@ -161,7 +161,6 @@ var appFlags = []cli.Flag{
dasFlags.BlobRetentionEpochFlag,
flags.BatchVerifierLimit,
flags.StateDiffExponents,
flags.StateDiffValidateOnStartup,
flags.DisableEphemeralLogFile,
}
@@ -189,8 +188,8 @@ func before(ctx *cli.Context) error {
return errors.Wrap(err, "failed to parse log vmodule")
}
// set the global logging level and data
logs.SetLoggingLevelAndData(verbosityLevel, vmodule, maxLevel, ctx.Bool(flags.DisableEphemeralLogFile.Name))
// set the global logging level to allow for the highest verbosity requested
logs.SetLoggingLevel(max(verbosityLevel, maxLevel))
format := ctx.String(cmd.LogFormat.Name)
switch format {
@@ -211,7 +210,6 @@ func before(ctx *cli.Context) error {
Formatter: formatter,
Writer: os.Stderr,
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
Identifier: logs.LogTargetUser,
})
case "fluentd":
f := joonix.NewFormatter()
@@ -369,8 +367,17 @@ func startNode(ctx *cli.Context, cancel context.CancelFunc) error {
backfill.BeaconNodeOptions,
das.BeaconNodeOptions,
}
for _, of := range optFuncs {
ofo, err := of(ctx)
if err != nil {
return err
}
if ofo != nil {
opts = append(opts, ofo...)
}
}
beacon, err := node.New(ctx, cancel, optFuncs, opts...)
beacon, err := node.New(ctx, cancel, opts...)
if err != nil {
return fmt.Errorf("unable to start beacon node: %w", err)
}

View File

@@ -75,7 +75,6 @@ var appHelpFlagGroups = []flagGroup{
flags.RPCPort,
flags.BatchVerifierLimit,
flags.StateDiffExponents,
flags.StateDiffValidateOnStartup,
},
},
{

View File

@@ -164,8 +164,8 @@ func main() {
return errors.Wrap(err, "failed to parse log vmodule")
}
// set the global logging level and data
logs.SetLoggingLevelAndData(verbosityLevel, vmodule, maxLevel, ctx.Bool(flags.DisableEphemeralLogFile.Name))
// set the global logging level to allow for the highest verbosity requested
logs.SetLoggingLevel(max(maxLevel, verbosityLevel))
logFileName := ctx.String(cmd.LogFileName.Name)
@@ -188,7 +188,6 @@ func main() {
Formatter: formatter,
Writer: os.Stderr,
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
Identifier: logs.LogTargetUser,
})
case "fluentd":
f := joonix.NewFormatter()

View File

@@ -130,6 +130,7 @@ type BeaconChainConfig struct {
MaxWithdrawalsPerPayload uint64 `yaml:"MAX_WITHDRAWALS_PER_PAYLOAD" spec:"true"` // MaxWithdrawalsPerPayload defines the maximum number of withdrawals in a block.
MaxBlsToExecutionChanges uint64 `yaml:"MAX_BLS_TO_EXECUTION_CHANGES" spec:"true"` // MaxBlsToExecutionChanges defines the maximum number of BLS-to-execution-change objects in a block.
MaxValidatorsPerWithdrawalsSweep uint64 `yaml:"MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP" spec:"true"` // MaxValidatorsPerWithdrawalsSweep bounds the size of the sweep searching for withdrawals per slot.
MaxBuildersPerWithdrawalsSweep uint64 `yaml:"MAX_BUILDERS_PER_WITHDRAWALS_SWEEP" spec:"true"` // MaxBuildersPerWithdrawalsSweep bounds the size of the builder withdrawals sweep per slot.
// BLS domain values.
DomainBeaconProposer [4]byte `yaml:"DOMAIN_BEACON_PROPOSER" spec:"true"` // DomainBeaconProposer defines the BLS signature domain for beacon proposal verification.

View File

@@ -194,6 +194,7 @@ func ConfigToYaml(cfg *BeaconChainConfig) []byte {
fmt.Sprintf("SHARD_COMMITTEE_PERIOD: %d", cfg.ShardCommitteePeriod),
fmt.Sprintf("MIN_VALIDATOR_WITHDRAWABILITY_DELAY: %d", cfg.MinValidatorWithdrawabilityDelay),
fmt.Sprintf("MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: %d", cfg.MaxValidatorsPerWithdrawalsSweep),
fmt.Sprintf("MAX_BUILDERS_PER_WITHDRAWALS_SWEEP: %d", cfg.MaxBuildersPerWithdrawalsSweep),
fmt.Sprintf("MAX_SEED_LOOKAHEAD: %d", cfg.MaxSeedLookahead),
fmt.Sprintf("EJECTION_BALANCE: %d", cfg.EjectionBalance),
fmt.Sprintf("MIN_PER_EPOCH_CHURN_LIMIT: %d", cfg.MinPerEpochChurnLimit),

View File

@@ -175,6 +175,7 @@ var mainnetBeaconConfig = &BeaconChainConfig{
MaxWithdrawalsPerPayload: 16,
MaxBlsToExecutionChanges: 16,
MaxValidatorsPerWithdrawalsSweep: 16384,
MaxBuildersPerWithdrawalsSweep: 16384,
// BLS domain values.
DomainBeaconProposer: bytesutil.Uint32ToBytes4(0x00000000),

View File

@@ -94,6 +94,7 @@ func compareConfigs(t *testing.T, expected, actual *params.BeaconChainConfig) {
require.DeepEqual(t, expected.MaxDeposits, actual.MaxDeposits)
require.DeepEqual(t, expected.MaxVoluntaryExits, actual.MaxVoluntaryExits)
require.DeepEqual(t, expected.MaxWithdrawalsPerPayload, actual.MaxWithdrawalsPerPayload)
require.DeepEqual(t, expected.MaxBuildersPerWithdrawalsSweep, actual.MaxBuildersPerWithdrawalsSweep)
require.DeepEqual(t, expected.DomainBeaconProposer, actual.DomainBeaconProposer)
require.DeepEqual(t, expected.DomainRandao, actual.DomainRandao)
require.DeepEqual(t, expected.DomainBeaconAttester, actual.DomainBeaconAttester)

View File

@@ -13,6 +13,16 @@ var _ fssz.Unmarshaler = (*BuilderIndex)(nil)
// BuilderIndex is an index into the builder registry.
type BuilderIndex uint64
// ToValidatorIndex sets the builder flag on a builder index.
//
// Spec v1.6.1 (pseudocode):
// def convert_builder_index_to_validator_index(builder_index: BuilderIndex) -> ValidatorIndex:
//
// return ValidatorIndex(builder_index | BUILDER_INDEX_FLAG)
func (b BuilderIndex) ToValidatorIndex() ValidatorIndex {
return ValidatorIndex(uint64(b) | BuilderIndexFlag)
}
// HashTreeRoot returns the SSZ hash tree root of the index.
func (b BuilderIndex) HashTreeRoot() ([32]byte, error) {
return fssz.HashWithDefaultHasher(b)

View File

@@ -10,9 +10,34 @@ var _ fssz.HashRoot = (ValidatorIndex)(0)
var _ fssz.Marshaler = (*ValidatorIndex)(nil)
var _ fssz.Unmarshaler = (*ValidatorIndex)(nil)
// BuilderIndexFlag marks a ValidatorIndex as a BuilderIndex when the bit is set.
//
// Spec v1.6.1: BUILDER_INDEX_FLAG.
const BuilderIndexFlag uint64 = 1 << 40
// ValidatorIndex in eth2.
type ValidatorIndex uint64
// IsBuilderIndex returns true when the BuilderIndex flag is set on the validator index.
//
// Spec v1.6.1 (pseudocode):
// def is_builder_index(validator_index: ValidatorIndex) -> bool:
//
// return (validator_index & BUILDER_INDEX_FLAG) != 0
func (v ValidatorIndex) IsBuilderIndex() bool {
return uint64(v)&BuilderIndexFlag != 0
}
// ToBuilderIndex strips the builder flag from a validator index.
//
// Spec v1.6.1 (pseudocode):
// def convert_validator_index_to_builder_index(validator_index: ValidatorIndex) -> BuilderIndex:
//
// return BuilderIndex(validator_index & ~BUILDER_INDEX_FLAG)
func (v ValidatorIndex) ToBuilderIndex() BuilderIndex {
return BuilderIndex(uint64(v) & ^BuilderIndexFlag)
}
// Div divides validator index by x.
// This method panics if dividing by zero!
func (v ValidatorIndex) Div(x uint64) ValidatorIndex {

View File

@@ -33,3 +33,32 @@ func TestValidatorIndex_Casting(t *testing.T) {
}
})
}
func TestValidatorIndex_BuilderIndexFlagConversions(t *testing.T) {
base := uint64(42)
unflagged := ValidatorIndex(base)
if unflagged.IsBuilderIndex() {
t.Fatalf("expected unflagged validator index to not be a builder index")
}
if got, want := unflagged.ToBuilderIndex(), BuilderIndex(base); got != want {
t.Fatalf("unexpected builder index: got %d want %d", got, want)
}
flagged := ValidatorIndex(base | BuilderIndexFlag)
if !flagged.IsBuilderIndex() {
t.Fatalf("expected flagged validator index to be a builder index")
}
if got, want := flagged.ToBuilderIndex(), BuilderIndex(base); got != want {
t.Fatalf("unexpected builder index: got %d want %d", got, want)
}
builder := BuilderIndex(base)
roundTrip := builder.ToValidatorIndex()
if !roundTrip.IsBuilderIndex() {
t.Fatalf("expected round-tripped validator index to be a builder index")
}
if got, want := roundTrip.ToBuilderIndex(), builder; got != want {
t.Fatalf("unexpected round-trip builder index: got %d want %d", got, want)
}
}

View File

@@ -1,190 +0,0 @@
# SSZ Query Package
The `encoding/ssz/query` package provides a system for analyzing and querying SSZ ([Simple Serialize](https://github.com/ethereum/consensus-specs/blob/master/ssz/simple-serialize.md)) data structures, as well as generating Merkle proofs from them. It enables runtime analysis of SSZ-serialized Go objects with reflection, path-based queries through nested structures, generalized index calculation, and Merkle proof generation.
This package is designed to be generic. It operates on arbitrary SSZ-serialized Go values at runtime, so the same query/proof machinery applies equally to any SSZ type, including the BeaconState/BeaconBlock.
## Usage Example
```go
// 1. Analyze an SSZ object
block := &ethpb.BeaconBlock{...}
info, err := query.AnalyzeObject(block)
// 2. Parse a path
path, err := query.ParsePath(".body.attestations[0].data.slot")
// 3. Get the generalized index
gindex, err := query.GetGeneralizedIndexFromPath(info, path)
// 4. Generate a Merkle proof
proof, err := info.Prove(gindex)
// 5. Get offset and length to slice the SSZ-encoded bytes
sszBytes, _ := block.MarshalSSZ()
_, offset, length, err := query.CalculateOffsetAndLength(info, path)
// slotBytes contains the SSZ-encoded value at the queried path
slotBytes := sszBytes[offset : offset+length]
```
## Exported API
The main exported API consists of:
```go
// AnalyzeObject analyzes an SSZ object and returns its structural information
func AnalyzeObject(obj SSZObject) (*SszInfo, error)
// ParsePath parses a path string like ".field1.field2[0].field3"
func ParsePath(rawPath string) (Path, error)
// CalculateOffsetAndLength computes byte offset and length for a path within an SSZ object
func CalculateOffsetAndLength(sszInfo *SszInfo, path Path) (*SszInfo, uint64, uint64, error)
// GetGeneralizedIndexFromPath calculates the generalized index for a given path
func GetGeneralizedIndexFromPath(info *SszInfo, path Path) (uint64, error)
// Prove generates a Merkle proof for a target generalized index
func (s *SszInfo) Prove(gindex uint64) (*fastssz.Proof, error)
```
## Type System
### SSZ Types
The package now supports [all standard SSZ types](https://github.com/ethereum/consensus-specs/blob/master/ssz/simple-serialize.md#typing) except `ProgressiveList`, `ProgressiveContainer`, `ProgressiveBitlist`, `Union`, and `CompatibleUnion`.
### Core Data Structures
#### `SszInfo`
The `SszInfo` structure contains complete structural metadata for an SSZ type:
```go
type SszInfo struct {
sszType SszType // SSZ Type classification
typ reflect.Type // Go reflect.Type
source SSZObject // Original SSZObject reference. Mostly used for reusing SSZ methods like `HashTreeRoot`.
isVariable bool // True if contains variable-size fields
// Composite types have corresponding metadata. Other fields would be nil except for the current type.
containerInfo *containerInfo
listInfo *listInfo
vectorInfo *vectorInfo
bitlistInfo *bitlistInfo
bitvectorInfo *bitvectorInfo
}
```
#### `Path`
The `Path` structure represents navigation paths through SSZ structures. It supports accessing a field by field name, accessing an element by index (list/vector type), and finding the length of homogenous collection types. The `ParsePath` function parses a raw string into a `Path` instance, which is commonly used in other APIs like `CalculateOffsetAndLength` and `GetGeneralizedIndexFromPath`.
```go
type Path struct {
Length bool // Flag for length queries (e.g., len(.field))
Elements []PathElement // Sequence of field accesses and indices
}
type PathElement struct {
Name string // Field name
Index *uint64 // list/vector index (nil if not an index access)
}
```
## Implementation Details
### Type Analysis (`analyzer.go`)
The `AnalyzeObject` function performs recursive type introspection using Go reflection:
1. **Type Inspection** - Examines Go `reflect.Value` to determine SSZ type
- Basic types (`uint8`, `uint16`, `uint32`, `uint64`, `bool`): `SSZType` constants
- Slices: Determined from struct tags (`ssz-size` for vectors, `ssz-max` for lists). There is a related [write-up](https://hackmd.io/@junsong/H101DKnwxl) regarding struct tags.
- Structs: Analyzed as Containers with field ordering from JSON tags
- Pointers: Dereferenced automatically
2. **Variable-Length Population** - Determines actual sizes at runtime
- For lists: Iterates elements, caches sizes for variable-element lists
- For containers: Recursively populates variable fields, adjusts offsets
- For bitlists: Decodes bit length from bitvector
3. **Offset Calculation** - Computes byte positions within serialized data
- Fixed-size fields: Offset = sum of preceding field sizes
- Variable-size fields: Offset stored as 4-byte pointer entries
### Path Parsing (`path.go`)
The `ParsePath` function parses path strings with the following rules:
- **Dot notation**: `.field1.field2` for field access
- **Array indexing**: `[0]`, `[42]` for element access
- **Length queries**: `len(.field)` for list/vector lengths
- **Character set**: Only `[A-Za-z0-9._\[\]\(\)]` allowed
Example:
```go
path, _ := ParsePath(".nested.array_field[5].inner_field")
// Returns: Path{
// Elements: [
// PathElement{Name: "nested"},
// PathElement{Name: "array_field", Index: <Pointer to uint64(5)>},
// PathElement{Name: "inner_field"}
// ]
// }
```
### Generalized Index Calculation (`generalized_index.go`)
The generalized index is a tree position identifier. This package follows the [Ethereum consensus-specs](https://github.com/ethereum/consensus-specs/blob/master/ssz/merkle-proofs.md#generalized-merkle-tree-index) to calculate the generalized index.
### Merkle Proof Generation (`merkle_proof.go`, `proof_collector.go`)
The `Prove` method generates Merkle proofs using a single-sweep merkleization algorithm:
#### Algorithm Overview
**Key Terms:**
- **Target gindex** (generalized index): The position of the SSZ element you want to prove, expressed as a generalized Merkle tree index. Stored in `Proof.Index`.
- Note: The generalized index for root is 1.
- **Registered gindices**: The set of tree positions whose node hashes must be captured during merkleization in order to later assemble the proof.
- **Sibling node**: The node that shares the same parent as another node.
- **Leaf value**: The 32-byte hash of the target node (the node being proven). Stored in `Proof.Leaf`.
**Phases:**
1. **Registration Phase** (`addTarget`)
> Goal: determine exactly which sibling hashes are needed for the proof.
- Record the target gindex as the proof target.
- Starting from the target node, walk the Merkle tree from the leaf (target gindex) to the root (gindex = 1).
- At each step:
- Compute and register the sibling gindex (`i XOR 1`) as “must collect”.
- Move to the parent (`i = i/2`).
- This produces the full set of registered gindices (the sibling nodes on the target-to-root path).
2. **Merkleization Phase** (`merkleize`)
> Goal: recursively merkleize the tree and capture the needed hashes.
- Recursively traverse the SSZ structure and compute Merkle tree node hashes from leaves to root.
- Whenever the traversal computes a node whose gindex is in registered gindices, store that nodes hash for later proof construction.
3. **Proof Assembly Phase** (`toProof`)
> Goal: create the final `fastssz.Proof` object in the correct format and order.
```go
// Proof represents a merkle proof against a general index.
type Proof struct {
Index int
Leaf []byte
Hashes [][]byte
}
```
- Set `Proof.Index` to the target gindex.
- Set `Proof.Leaf` to the 32-byte hash of the target node.
- Build `Proof.Hashes` by walking from the target node up to (but not including) the root:
- At node `i`, append the stored hash for the sibling (`i XOR 1`).
- Move to the parent (`i = i/2`).
- The resulting `Proof.Hashes` is ordered from the target level upward, containing one sibling hash per tree level on the path to the root.

View File

@@ -31,11 +31,6 @@ EXCLUDED_PATH_PREFIXES=(
".vscode"
)
# Gitignore overrides: paths that should still be scanned even if ignored by VCS.
GITIGNORE_OVERRIDES=(
"cmd/beacon-chain/execution"
)
# The logrus import path
LOGRUS_IMPORT="github.com/sirupsen/logrus"
# ----------------------------
@@ -75,14 +70,6 @@ rg_args=(
-0 # NUL-delimited output
)
if [[ ${#GITIGNORE_OVERRIDES[@]} -gt 0 ]]; then
# Disable VCS ignores so overrides are honored.
rg_args+=( --no-ignore-vcs )
for ov in "${GITIGNORE_OVERRIDES[@]}"; do
rg_args+=( --glob "$ov/**" )
done
fi
for ex in "${EXCLUDED_PATH_PREFIXES[@]}"; do
rg_args+=( --glob "!$ex/**" )
done

View File

@@ -6,28 +6,20 @@ import (
"github.com/sirupsen/logrus"
)
type HookIdentifier string
type WriterHook struct {
AllowedLevels []logrus.Level
Writer io.Writer
Formatter logrus.Formatter
Identifier HookIdentifier
}
func (hook *WriterHook) Levels() []logrus.Level {
if len(hook.AllowedLevels) == 0 {
if hook.AllowedLevels == nil || len(hook.AllowedLevels) == 0 {
return logrus.AllLevels
}
return hook.AllowedLevels
}
func (hook *WriterHook) Fire(entry *logrus.Entry) error {
val, ok := entry.Data[LogTargetField]
if ok && val != hook.Identifier {
return nil
}
line, err := hook.Formatter.Format(entry)
if err != nil {
return err

View File

@@ -17,43 +17,11 @@ import (
"gopkg.in/natefinch/lumberjack.v2"
)
var (
userVerbosity = logrus.InfoLevel
vmodule = make(map[string]logrus.Level)
)
var ephemeralLogFileVerbosity = logrus.DebugLevel
const (
ephemeralLogFileVerbosity = logrus.DebugLevel
LogTargetField = "log_target"
LogTargetEphemeral HookIdentifier = "ephemeral"
LogTargetUser HookIdentifier = "user"
)
// SetLoggingLevelAndData sets the base logging level for logrus.
func SetLoggingLevelAndData(baseVerbosity logrus.Level, vmoduleMap map[string]logrus.Level, maxVmoduleLevel logrus.Level, disableEphemeral bool) {
userVerbosity = baseVerbosity
vmodule = vmoduleMap
globalLevel := max(baseVerbosity, maxVmoduleLevel)
if !disableEphemeral {
globalLevel = max(globalLevel, ephemeralLogFileVerbosity)
}
logrus.SetLevel(globalLevel)
}
// PackageVerbosity returns the verbosity of a given package.
func PackageVerbosity(packagePath string) logrus.Level {
bestLen := 0
bestLevel := userVerbosity
for k, v := range vmodule {
if k == packagePath || strings.HasPrefix(packagePath, k+"/") {
if len(k) > bestLen {
bestLen = len(k)
bestLevel = v
}
}
}
return bestLevel
// SetLoggingLevel sets the base logging level for logrus.
func SetLoggingLevel(lvl logrus.Level) {
logrus.SetLevel(max(lvl, ephemeralLogFileVerbosity))
}
func addLogWriter(w io.Writer) {
@@ -100,7 +68,6 @@ func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Le
Formatter: formatter,
Writer: f,
AllowedLevels: logrus.AllLevels[:max(lvl, maxVmoduleLevel)+1],
Identifier: LogTargetUser,
})
logrus.Debug("File logging initialized")
@@ -134,7 +101,6 @@ func ConfigureEphemeralLogFile(datadirPath string, app string) error {
Formatter: formatter,
Writer: debugWriter,
AllowedLevels: logrus.AllLevels[:ephemeralLogFileVerbosity+1],
Identifier: LogTargetEphemeral,
})
logrus.WithField("path", logFilePath).Debug("Ephemeral log file initialized")

View File

@@ -26,21 +26,21 @@ func TestLifecycle(t *testing.T) {
port := 1000 + rand.Intn(1000)
prometheusService := NewService(t.Context(), fmt.Sprintf(":%d", port), nil)
prometheusService.Start()
// Actively wait until the service responds on /metrics (faster and less flaky than a fixed sleep)
deadline := time.Now().Add(3 * time.Second)
for {
if time.Now().After(deadline) {
t.Fatalf("metrics endpoint not ready within timeout")
}
resp, err := http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))
if err == nil {
_ = resp.Body.Close()
if resp.StatusCode == http.StatusOK {
break
}
}
time.Sleep(50 * time.Millisecond)
}
// Actively wait until the service responds on /metrics (faster and less flaky than a fixed sleep)
deadline := time.Now().Add(3 * time.Second)
for {
if time.Now().After(deadline) {
t.Fatalf("metrics endpoint not ready within timeout")
}
resp, err := http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))
if err == nil {
_ = resp.Body.Close()
if resp.StatusCode == http.StatusOK {
break
}
}
time.Sleep(50 * time.Millisecond)
}
// Query the service to ensure it really started.
resp, err := http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))
@@ -49,18 +49,18 @@ func TestLifecycle(t *testing.T) {
err = prometheusService.Stop()
require.NoError(t, err)
// Actively wait until the service stops responding on /metrics
deadline = time.Now().Add(3 * time.Second)
for {
if time.Now().After(deadline) {
t.Fatalf("metrics endpoint still reachable after timeout")
}
_, err = http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))
if err != nil {
break
}
time.Sleep(50 * time.Millisecond)
}
// Actively wait until the service stops responding on /metrics
deadline = time.Now().Add(3 * time.Second)
for {
if time.Now().After(deadline) {
t.Fatalf("metrics endpoint still reachable after timeout")
}
_, err = http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))
if err != nil {
break
}
time.Sleep(50 * time.Millisecond)
}
// Query the service to ensure it really stopped.
_, err = http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))

View File

@@ -334,7 +334,7 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *logrus.Entry, keys
_, err = fmt.Fprintf(b, "%s %s%s "+messageFormat, colorScheme.TimestampColor(timestamp), level, prefix, message)
}
for _, k := range keys {
if k != "package" && k != "log_target" {
if k != "package" {
v := entry.Data[k]
format := "%+v"

View File

@@ -2,38 +2,24 @@ version: v1.7.0-alpha.1
style: full
specrefs:
search_root: .
auto_standardize_names: true
auto_add_missing_entries: true
require_exceptions_have_fork: true
search_root: ..
files:
- specrefs/configs.yml
- specrefs/constants.yml
- specrefs/containers.yml
- specrefs/dataclasses.yml
- specrefs/functions.yml
- specrefs/presets.yml
- configs.yml
- constants.yml
- containers.yml
- dataclasses.yml
- functions.yml
- presets.yml
exceptions:
presets:
# gloas
# Not implemented: gloas (future fork)
- BUILDER_PENDING_WITHDRAWALS_LIMIT#gloas
- MAX_PAYLOAD_ATTESTATIONS#gloas
- PTC_SIZE#gloas
constants:
# phase0
- BASIS_POINTS#phase0
- ENDIANNESS#phase0
- MAX_CONCURRENT_REQUESTS#phase0
- UINT64_MAX#phase0
- UINT64_MAX_SQRT#phase0
# altair
- PARTICIPATION_FLAG_WEIGHTS#altair
# bellatrix
- SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY#bellatrix
# deneb
# Constants in the KZG library
- BLS_MODULUS#deneb
- BYTES_PER_COMMITMENT#deneb
- BYTES_PER_FIELD_ELEMENT#deneb
@@ -47,9 +33,18 @@ exceptions:
- PRIMITIVE_ROOT_OF_UNITY#deneb
- RANDOM_CHALLENGE_KZG_BATCH_DOMAIN#deneb
- RANDOM_CHALLENGE_KZG_CELL_BATCH_DOMAIN#fulu
# fulu
# Not implemented
- BASIS_POINTS#phase0
- ENDIANNESS#phase0
- MAX_CONCURRENT_REQUESTS#phase0
- PARTICIPATION_FLAG_WEIGHTS#altair
- SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY#bellatrix
- UINT256_MAX#fulu
# gloas
- UINT64_MAX#phase0
- UINT64_MAX_SQRT#phase0
# Not implemented: gloas (future fork)
- BUILDER_PAYMENT_THRESHOLD_DENOMINATOR#gloas
- BUILDER_PAYMENT_THRESHOLD_NUMERATOR#gloas
- BUILDER_WITHDRAWAL_PREFIX#gloas
@@ -66,62 +61,61 @@ exceptions:
- PTC_TIMELINESS_INDEX#gloas
configs:
# gloas
# Not implemented: gloas (future fork)
- AGGREGATE_DUE_BPS_GLOAS#gloas
- ATTESTATION_DUE_BPS_GLOAS#gloas
- CONTRIBUTION_DUE_BPS_GLOAS#gloas
- GLOAS_FORK_EPOCH#gloas
- GLOAS_FORK_VERSION#gloas
- MAX_REQUEST_PAYLOADS#gloas
- MIN_BUILDER_WITHDRAWABILITY_DELAY#gloas
- PAYLOAD_ATTESTATION_DUE_BPS#gloas
- SYNC_MESSAGE_DUE_BPS_GLOAS#gloas
- MIN_BUILDER_WITHDRAWABILITY_DELAY#gloas
ssz_objects:
# phase0
# Not implemented
- Eth1Block#phase0
# capella
- MatrixEntry#fulu
# Not implemented: capella
- LightClientBootstrap#capella
- LightClientFinalityUpdate#capella
- LightClientOptimisticUpdate#capella
- LightClientUpdate#capella
# fulu
- MatrixEntry#fulu
# gloas
# Not implemented: gloas (future fork)
- BeaconBlockBody#gloas
- BeaconState#gloas
- Builder#gloas
- BuilderPendingPayment#gloas
- BuilderPendingWithdrawal#gloas
- DataColumnSidecar#gloas
- ExecutionPayloadBid#gloas
- ExecutionPayloadEnvelope#gloas
- ExecutionPayloadBid#gloas
- ForkChoiceNode#gloas
- IndexedPayloadAttestation#gloas
- PayloadAttestation#gloas
- PayloadAttestationData#gloas
- PayloadAttestationMessage#gloas
- ProposerPreferences#gloas
- SignedExecutionPayloadBid#gloas
- SignedExecutionPayloadEnvelope#gloas
- SignedExecutionPayloadBid#gloas
- Builder#gloas
- ProposerPreferences#gloas
- SignedProposerPreferences#gloas
dataclasses:
# phase0
- LatestMessage#phase0
- Store#phase0
# altair
- LightClientStore#altair
# bellatrix
- OptimisticStore#bellatrix
# capella
- ExpectedWithdrawals#capella
- LightClientStore#capella
# electra
- ExpectedWithdrawals#electra
# fulu
# Not implemented
- BlobParameters#fulu
# gloas
- ExpectedWithdrawals#capella
- ExpectedWithdrawals#electra
- LatestMessage#phase0
- LightClientStore#altair
- OptimisticStore#bellatrix
- Store#phase0
# Not implemented: capella
- LightClientStore#capella
# Not implemented: gloas (future fork)
- ExpectedWithdrawals#gloas
- LatestMessage#gloas
- Store#gloas
@@ -181,12 +175,7 @@ exceptions:
- verify_cell_kzg_proof_batch#fulu
- verify_cell_kzg_proof_batch_impl#fulu
# phase0
- update_proposer_boost_root#phase0
- is_proposer_equivocation#phase0
- record_block_timeliness#phase0
- compute_proposer_score#phase0
- get_attestation_score#phase0
# Not implemented: phase0
- calculate_committee_fraction#phase0
- compute_fork_version#phase0
- compute_pulled_up_tip#phase0
@@ -232,7 +221,8 @@ exceptions:
- validate_on_attestation#phase0
- validate_target_epoch_against_current_time#phase0
- xor#phase0
# altair
# Not implemented: altair
- compute_merkle_proof#altair
- compute_sync_committee_period_at_slot#altair
- get_contribution_and_proof#altair
@@ -254,29 +244,27 @@ exceptions:
- process_sync_committee_contributions#altair
- set_or_append_list#altair
- validate_light_client_update#altair
# bellatrix
# Not implemented: bellatrix
- get_execution_payload#bellatrix
- is_merge_transition_block#bellatrix
- is_optimistic_candidate_block#bellatrix
- latest_verified_ancestor#bellatrix
- prepare_execution_payload#bellatrix
# capella
- apply_withdrawals#capella
- get_balance_after_withdrawals#capella
# Not implemented: capella
- get_lc_execution_root#capella
- get_validators_sweep_withdrawals#capella
- is_valid_light_client_header#capella
- prepare_execution_payload#capella
- process_epoch#capella
- update_next_withdrawal_index#capella
- update_next_withdrawal_validator_index#capella
- upgrade_lc_bootstrap_to_capella#capella
- upgrade_lc_finality_update_to_capella#capella
- upgrade_lc_header_to_capella#capella
- upgrade_lc_optimistic_update_to_capella#capella
- upgrade_lc_store_to_capella#capella
- upgrade_lc_update_to_capella#capella
# deneb
# Not implemented: deneb
- get_lc_execution_root#deneb
- is_valid_light_client_header#deneb
- prepare_execution_payload#deneb
@@ -286,34 +274,33 @@ exceptions:
- upgrade_lc_optimistic_update_to_deneb#deneb
- upgrade_lc_store_to_deneb#deneb
- upgrade_lc_update_to_deneb#deneb
# electra
# Not implemented: electra
- compute_weak_subjectivity_period#electra
- current_sync_committee_gindex_at_slot#electra
- finalized_root_gindex_at_slot#electra
- get_eth1_vote#electra
- get_lc_execution_root#electra
- get_pending_partial_withdrawals#electra
- get_validators_sweep_withdrawals#electra
- is_compounding_withdrawal_credential#electra
- is_eligible_for_partial_withdrawals#electra
- is_within_weak_subjectivity_period#electra
- next_sync_committee_gindex_at_slot#electra
- normalize_merkle_branch#electra
- prepare_execution_payload#electra
- update_pending_partial_withdrawals#electra
- upgrade_lc_bootstrap_to_electra#electra
- upgrade_lc_finality_update_to_electra#electra
- upgrade_lc_header_to_electra#electra
- upgrade_lc_optimistic_update_to_electra#electra
- upgrade_lc_store_to_electra#electra
- upgrade_lc_update_to_electra#electra
# fulu
# Not implemented: fulu
- compute_matrix#fulu
- get_blob_parameters#fulu
- get_data_column_sidecars_from_block#fulu
- get_data_column_sidecars_from_column_sidecar#fulu
- recover_matrix#fulu
# gloas
# Not implemented: gloas (future fork)
- compute_balance_weighted_acceptance#gloas
- compute_balance_weighted_selection#gloas
- compute_fork_version#gloas
@@ -381,36 +368,49 @@ exceptions:
- verify_execution_payload_bid_signature#gloas
- add_builder_to_registry#gloas
- apply_deposit_for_builder#gloas
- apply_withdrawals#capella
- apply_withdrawals#gloas
- can_builder_cover_bid#gloas
- compute_proposer_score#phase0
- convert_builder_index_to_validator_index#gloas
- convert_validator_index_to_builder_index#gloas
- get_attestation_score#gloas
- get_attestation_score#phase0
- get_balance_after_withdrawals#capella
- get_builder_from_deposit#gloas
- get_builder_withdrawals#gloas
- get_builders_sweep_withdrawals#gloas
- get_index_for_new_builder#gloas
- get_pending_balance_to_withdraw_for_builder#gloas
- get_pending_partial_withdrawals#electra
- get_proposer_preferences_signature#gloas
- get_upcoming_proposal_slots#gloas
- get_validators_sweep_withdrawals#capella
- get_validators_sweep_withdrawals#electra
- initiate_builder_exit#gloas
- is_active_builder#gloas
- is_builder_index#gloas
- is_eligible_for_partial_withdrawals#electra
- is_head_late#gloas
- is_head_weak#gloas
- is_parent_strong#gloas
- is_proposer_equivocation#phase0
- is_valid_proposal_slot#gloas
- process_deposit_request#gloas
- process_voluntary_exit#gloas
- record_block_timeliness#gloas
- record_block_timeliness#phase0
- should_apply_proposer_boost#gloas
- update_builder_pending_withdrawals#gloas
- update_next_withdrawal_builder_index#gloas
- update_next_withdrawal_index#capella
- update_next_withdrawal_validator_index#capella
- update_payload_expected_withdrawals#gloas
- update_pending_partial_withdrawals#electra
- update_proposer_boost_root#gloas
- update_proposer_boost_root#phase0
presets:
# gloas
- BUILDER_PENDING_WITHDRAWALS_LIMIT#gloas
- BUILDER_REGISTRY_LIMIT#gloas
- MAX_BUILDERS_PER_WITHDRAWALS_SWEEP#gloas

View File

@@ -11,12 +11,17 @@ Install `ethspecify` with the following command:
pipx install ethspecify
```
> [!NOTE]
> You can run `ethspecify <cmd>` in the `specrefs` directory or
> `ethspecify <cmd> --path=specrefs` from the project's root directory.
## Maintenance
When adding support for a new specification version, follow these steps:
0. Change directory into the `specrefs` directory.
1. Update the version in `.ethspecify.yml` configuration.
2. Run `ethspecify` to update/populate specrefs.
2. Run `ethspecify process` to update/populate specrefs.
3. Run `ethspecify check` to check specrefs.
4. If there are errors, use the error message as a guide to fix the issue. If
there are new specrefs with empty sources, implement/locate each item and

View File

@@ -1,4 +1,4 @@
- name: AGGREGATE_DUE_BPS#phase0
- name: AGGREGATE_DUE_BPS
sources:
- file: config/params/config.go
search: AggregateDueBPS\s+primitives.BP
@@ -8,14 +8,7 @@
AGGREGATE_DUE_BPS: uint64 = 6667
</spec>
- name: AGGREGATE_DUE_BPS_GLOAS#gloas
sources: []
spec: |
<spec config_var="AGGREGATE_DUE_BPS_GLOAS" fork="gloas" hash="34aa3164">
AGGREGATE_DUE_BPS_GLOAS: uint64 = 5000
</spec>
- name: ALTAIR_FORK_EPOCH#altair
- name: ALTAIR_FORK_EPOCH
sources:
- file: config/params/config.go
search: AltairForkEpoch\s+primitives.Epoch
@@ -25,7 +18,7 @@
ALTAIR_FORK_EPOCH: Epoch = 74240
</spec>
- name: ALTAIR_FORK_VERSION#altair
- name: ALTAIR_FORK_VERSION
sources:
- file: config/params/config.go
search: AltairForkVersion\s+\[]byte
@@ -35,7 +28,7 @@
ALTAIR_FORK_VERSION: Version = '0x01000000'
</spec>
- name: ATTESTATION_DUE_BPS#phase0
- name: ATTESTATION_DUE_BPS
sources:
- file: config/params/config.go
search: AttestationDueBPS\s+primitives.BP
@@ -45,14 +38,7 @@
ATTESTATION_DUE_BPS: uint64 = 3333
</spec>
- name: ATTESTATION_DUE_BPS_GLOAS#gloas
sources: []
spec: |
<spec config_var="ATTESTATION_DUE_BPS_GLOAS" fork="gloas" hash="3863c1ef">
ATTESTATION_DUE_BPS_GLOAS: uint64 = 2500
</spec>
- name: ATTESTATION_PROPAGATION_SLOT_RANGE#phase0
- name: ATTESTATION_PROPAGATION_SLOT_RANGE
sources:
- file: config/params/config.go
search: AttestationPropagationSlotRange\s+primitives.Slot
@@ -62,7 +48,7 @@
ATTESTATION_PROPAGATION_SLOT_RANGE = 32
</spec>
- name: ATTESTATION_SUBNET_COUNT#phase0
- name: ATTESTATION_SUBNET_COUNT
sources:
- file: config/params/config.go
search: AttestationSubnetCount\s+uint64
@@ -72,7 +58,7 @@
ATTESTATION_SUBNET_COUNT = 64
</spec>
- name: ATTESTATION_SUBNET_EXTRA_BITS#phase0
- name: ATTESTATION_SUBNET_EXTRA_BITS
sources:
- file: config/params/config.go
search: AttestationSubnetExtraBits\s+uint64
@@ -82,7 +68,7 @@
ATTESTATION_SUBNET_EXTRA_BITS = 0
</spec>
- name: ATTESTATION_SUBNET_PREFIX_BITS#phase0
- name: ATTESTATION_SUBNET_PREFIX_BITS
sources:
- file: config/params/config.go
search: AttestationSubnetPrefixBits\s+uint64
@@ -92,7 +78,7 @@
ATTESTATION_SUBNET_PREFIX_BITS: int = 6
</spec>
- name: BALANCE_PER_ADDITIONAL_CUSTODY_GROUP#fulu
- name: BALANCE_PER_ADDITIONAL_CUSTODY_GROUP
sources:
- file: config/params/config.go
search: BalancePerAdditionalCustodyGroup\s+uint64
@@ -102,7 +88,7 @@
BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: Gwei = 32000000000
</spec>
- name: BELLATRIX_FORK_EPOCH#bellatrix
- name: BELLATRIX_FORK_EPOCH
sources:
- file: config/params/config.go
search: BellatrixForkEpoch\s+primitives.Epoch
@@ -112,7 +98,7 @@
BELLATRIX_FORK_EPOCH: Epoch = 144896
</spec>
- name: BELLATRIX_FORK_VERSION#bellatrix
- name: BELLATRIX_FORK_VERSION
sources:
- file: config/params/config.go
search: BellatrixForkVersion\s+\[]byte
@@ -122,7 +108,7 @@
BELLATRIX_FORK_VERSION: Version = '0x02000000'
</spec>
- name: BLOB_SCHEDULE#fulu
- name: BLOB_SCHEDULE
sources:
- file: config/params/config.go
search: BlobSchedule\s+\[]BlobScheduleEntry
@@ -141,7 +127,7 @@
)
</spec>
- name: BLOB_SIDECAR_SUBNET_COUNT#deneb
- name: BLOB_SIDECAR_SUBNET_COUNT
sources:
- file: config/params/config.go
search: BlobsidecarSubnetCount\s+uint64
@@ -151,7 +137,7 @@
BLOB_SIDECAR_SUBNET_COUNT = 6
</spec>
- name: BLOB_SIDECAR_SUBNET_COUNT_ELECTRA#electra
- name: BLOB_SIDECAR_SUBNET_COUNT_ELECTRA
sources:
- file: config/params/config.go
search: BlobsidecarSubnetCountElectra\s+uint64
@@ -161,7 +147,7 @@
BLOB_SIDECAR_SUBNET_COUNT_ELECTRA = 9
</spec>
- name: CAPELLA_FORK_EPOCH#capella
- name: CAPELLA_FORK_EPOCH
sources:
- file: config/params/config.go
search: CapellaForkEpoch\s+primitives.Epoch
@@ -171,7 +157,7 @@
CAPELLA_FORK_EPOCH: Epoch = 194048
</spec>
- name: CAPELLA_FORK_VERSION#capella
- name: CAPELLA_FORK_VERSION
sources:
- file: config/params/config.go
search: CapellaForkVersion\s+\[]byte
@@ -181,7 +167,7 @@
CAPELLA_FORK_VERSION: Version = '0x03000000'
</spec>
- name: CHURN_LIMIT_QUOTIENT#phase0
- name: CHURN_LIMIT_QUOTIENT
sources:
- file: config/params/config.go
search: ChurnLimitQuotient\s+uint64
@@ -191,7 +177,7 @@
CHURN_LIMIT_QUOTIENT: uint64 = 65536
</spec>
- name: CONTRIBUTION_DUE_BPS#altair
- name: CONTRIBUTION_DUE_BPS
sources:
- file: config/params/config.go
search: ContributionDueBPS\s+primitives.BP
@@ -201,14 +187,7 @@
CONTRIBUTION_DUE_BPS: uint64 = 6667
</spec>
- name: CONTRIBUTION_DUE_BPS_GLOAS#gloas
sources: []
spec: |
<spec config_var="CONTRIBUTION_DUE_BPS_GLOAS" fork="gloas" hash="0ead2ac1">
CONTRIBUTION_DUE_BPS_GLOAS: uint64 = 5000
</spec>
- name: CUSTODY_REQUIREMENT#fulu
- name: CUSTODY_REQUIREMENT
sources:
- file: config/params/config.go
search: CustodyRequirement\s+uint64.*yaml:"CUSTODY_REQUIREMENT"
@@ -218,7 +197,7 @@
CUSTODY_REQUIREMENT = 4
</spec>
- name: DATA_COLUMN_SIDECAR_SUBNET_COUNT#fulu
- name: DATA_COLUMN_SIDECAR_SUBNET_COUNT
sources:
- file: config/params/config.go
search: DataColumnSidecarSubnetCount\s+uint64
@@ -228,7 +207,7 @@
DATA_COLUMN_SIDECAR_SUBNET_COUNT = 128
</spec>
- name: DENEB_FORK_EPOCH#deneb
- name: DENEB_FORK_EPOCH
sources:
- file: config/params/config.go
search: DenebForkEpoch\s+primitives.Epoch
@@ -238,7 +217,7 @@
DENEB_FORK_EPOCH: Epoch = 269568
</spec>
- name: DENEB_FORK_VERSION#deneb
- name: DENEB_FORK_VERSION
sources:
- file: config/params/config.go
search: DenebForkVersion\s+\[]byte
@@ -248,7 +227,7 @@
DENEB_FORK_VERSION: Version = '0x04000000'
</spec>
- name: EJECTION_BALANCE#phase0
- name: EJECTION_BALANCE
sources:
- file: config/params/config.go
search: EjectionBalance\s+uint64
@@ -258,7 +237,7 @@
EJECTION_BALANCE: Gwei = 16000000000
</spec>
- name: ELECTRA_FORK_EPOCH#electra
- name: ELECTRA_FORK_EPOCH
sources:
- file: config/params/config.go
search: ElectraForkEpoch\s+primitives.Epoch
@@ -268,7 +247,7 @@
ELECTRA_FORK_EPOCH: Epoch = 364032
</spec>
- name: ELECTRA_FORK_VERSION#electra
- name: ELECTRA_FORK_VERSION
sources:
- file: config/params/config.go
search: ElectraForkVersion\s+\[]byte
@@ -278,7 +257,7 @@
ELECTRA_FORK_VERSION: Version = '0x05000000'
</spec>
- name: EPOCHS_PER_SUBNET_SUBSCRIPTION#phase0
- name: EPOCHS_PER_SUBNET_SUBSCRIPTION
sources:
- file: config/params/config.go
search: EpochsPerSubnetSubscription\s+uint64
@@ -288,7 +267,7 @@
EPOCHS_PER_SUBNET_SUBSCRIPTION = 256
</spec>
- name: ETH1_FOLLOW_DISTANCE#phase0
- name: ETH1_FOLLOW_DISTANCE
sources:
- file: config/params/config.go
search: Eth1FollowDistance\s+uint64
@@ -298,7 +277,7 @@
ETH1_FOLLOW_DISTANCE: uint64 = 2048
</spec>
- name: FULU_FORK_EPOCH#fulu
- name: FULU_FORK_EPOCH
sources:
- file: config/params/config.go
search: FuluForkEpoch\s+primitives.Epoch
@@ -308,7 +287,7 @@
FULU_FORK_EPOCH: Epoch = 411392
</spec>
- name: FULU_FORK_VERSION#fulu
- name: FULU_FORK_VERSION
sources:
- file: config/params/config.go
search: FuluForkVersion\s+\[]byte
@@ -318,7 +297,7 @@
FULU_FORK_VERSION: Version = '0x06000000'
</spec>
- name: GENESIS_DELAY#phase0
- name: GENESIS_DELAY
sources:
- file: config/params/config.go
search: GenesisDelay\s+uint64
@@ -328,7 +307,7 @@
GENESIS_DELAY: uint64 = 604800
</spec>
- name: GENESIS_FORK_VERSION#phase0
- name: GENESIS_FORK_VERSION
sources:
- file: config/params/config.go
search: GenesisForkVersion\s+\[]byte
@@ -338,21 +317,7 @@
GENESIS_FORK_VERSION: Version = '0x00000000'
</spec>
- name: GLOAS_FORK_EPOCH#gloas
sources: []
spec: |
<spec config_var="GLOAS_FORK_EPOCH" fork="gloas" hash="c25152cf">
GLOAS_FORK_EPOCH: Epoch = 18446744073709551615
</spec>
- name: GLOAS_FORK_VERSION#gloas
sources: []
spec: |
<spec config_var="GLOAS_FORK_VERSION" fork="gloas" hash="c1c5c007">
GLOAS_FORK_VERSION: Version = '0x07000000'
</spec>
- name: INACTIVITY_SCORE_BIAS#altair
- name: INACTIVITY_SCORE_BIAS
sources:
- file: config/params/config.go
search: InactivityScoreBias\s+uint64
@@ -362,7 +327,7 @@
INACTIVITY_SCORE_BIAS: uint64 = 4
</spec>
- name: INACTIVITY_SCORE_RECOVERY_RATE#altair
- name: INACTIVITY_SCORE_RECOVERY_RATE
sources:
- file: config/params/config.go
search: InactivityScoreRecoveryRate\s+uint64
@@ -372,7 +337,7 @@
INACTIVITY_SCORE_RECOVERY_RATE: uint64 = 16
</spec>
- name: MAXIMUM_GOSSIP_CLOCK_DISPARITY#phase0
- name: MAXIMUM_GOSSIP_CLOCK_DISPARITY
sources:
- file: config/params/config.go
search: MaximumGossipClockDisparity\s+uint64
@@ -382,7 +347,7 @@
MAXIMUM_GOSSIP_CLOCK_DISPARITY = 500
</spec>
- name: MAX_BLOBS_PER_BLOCK#deneb
- name: MAX_BLOBS_PER_BLOCK
sources:
- file: config/params/config.go
search: DeprecatedMaxBlobsPerBlock\s+int
@@ -392,7 +357,7 @@
MAX_BLOBS_PER_BLOCK: uint64 = 6
</spec>
- name: MAX_BLOBS_PER_BLOCK_ELECTRA#electra
- name: MAX_BLOBS_PER_BLOCK_ELECTRA
sources:
- file: config/params/config.go
search: DeprecatedMaxBlobsPerBlockElectra\s+int
@@ -402,7 +367,7 @@
MAX_BLOBS_PER_BLOCK_ELECTRA: uint64 = 9
</spec>
- name: MAX_PAYLOAD_SIZE#phase0
- name: MAX_PAYLOAD_SIZE
sources:
- file: config/params/config.go
search: MaxPayloadSize\s+uint64
@@ -412,7 +377,7 @@
MAX_PAYLOAD_SIZE = 10485760
</spec>
- name: MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT#deneb
- name: MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT
sources:
- file: config/params/config.go
search: MaxPerEpochActivationChurnLimit\s+uint64
@@ -422,7 +387,7 @@
MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: uint64 = 8
</spec>
- name: MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT#electra
- name: MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT
sources:
- file: config/params/config.go
search: MaxPerEpochActivationExitChurnLimit\s+uint64
@@ -432,7 +397,7 @@
MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: Gwei = 256000000000
</spec>
- name: MAX_REQUEST_BLOB_SIDECARS#deneb
- name: MAX_REQUEST_BLOB_SIDECARS
sources:
- file: config/params/config.go
search: MaxRequestBlobSidecars\s+uint64
@@ -442,7 +407,7 @@
MAX_REQUEST_BLOB_SIDECARS = 768
</spec>
- name: MAX_REQUEST_BLOB_SIDECARS_ELECTRA#electra
- name: MAX_REQUEST_BLOB_SIDECARS_ELECTRA
sources:
- file: config/params/config.go
search: MaxRequestBlobSidecarsElectra\s+uint64
@@ -452,7 +417,7 @@
MAX_REQUEST_BLOB_SIDECARS_ELECTRA = 1152
</spec>
- name: MAX_REQUEST_BLOCKS#phase0
- name: MAX_REQUEST_BLOCKS
sources:
- file: config/params/config.go
search: MaxRequestBlocks\s+uint64
@@ -462,7 +427,7 @@
MAX_REQUEST_BLOCKS = 1024
</spec>
- name: MAX_REQUEST_BLOCKS_DENEB#deneb
- name: MAX_REQUEST_BLOCKS_DENEB
sources:
- file: config/params/config.go
search: MaxRequestBlocksDeneb\s+uint64
@@ -472,7 +437,7 @@
MAX_REQUEST_BLOCKS_DENEB = 128
</spec>
- name: MAX_REQUEST_DATA_COLUMN_SIDECARS#fulu
- name: MAX_REQUEST_DATA_COLUMN_SIDECARS
sources:
- file: config/params/config.go
search: MaxRequestDataColumnSidecars\s+uint64
@@ -482,14 +447,7 @@
MAX_REQUEST_DATA_COLUMN_SIDECARS = 16384
</spec>
- name: MAX_REQUEST_PAYLOADS#gloas
sources: []
spec: |
<spec config_var="MAX_REQUEST_PAYLOADS" fork="gloas" hash="23399ee5">
MAX_REQUEST_PAYLOADS = 128
</spec>
- name: MESSAGE_DOMAIN_INVALID_SNAPPY#phase0
- name: MESSAGE_DOMAIN_INVALID_SNAPPY
sources:
- file: config/params/config.go
search: MessageDomainInvalidSnappy\s+\[4\]byte
@@ -499,7 +457,7 @@
MESSAGE_DOMAIN_INVALID_SNAPPY: DomainType = '0x00000000'
</spec>
- name: MESSAGE_DOMAIN_VALID_SNAPPY#phase0
- name: MESSAGE_DOMAIN_VALID_SNAPPY
sources:
- file: config/params/config.go
search: MessageDomainValidSnappy\s+\[4\]byte
@@ -509,14 +467,7 @@
MESSAGE_DOMAIN_VALID_SNAPPY: DomainType = '0x01000000'
</spec>
- name: MIN_BUILDER_WITHDRAWABILITY_DELAY#gloas
sources: []
spec: |
<spec config_var="MIN_BUILDER_WITHDRAWABILITY_DELAY" fork="gloas" hash="d378428f">
MIN_BUILDER_WITHDRAWABILITY_DELAY: uint64 = 4096
</spec>
- name: MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS#deneb
- name: MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
sources:
- file: config/params/config.go
search: MinEpochsForBlobsSidecarsRequest\s+primitives.Epoch
@@ -526,7 +477,7 @@
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS = 4096
</spec>
- name: MIN_EPOCHS_FOR_BLOCK_REQUESTS#phase0
- name: MIN_EPOCHS_FOR_BLOCK_REQUESTS
sources:
- file: config/params/config.go
search: MinEpochsForBlockRequests\s+uint64
@@ -536,7 +487,7 @@
MIN_EPOCHS_FOR_BLOCK_REQUESTS = 33024
</spec>
- name: MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS#fulu
- name: MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS
sources:
- file: config/params/config.go
search: MinEpochsForDataColumnSidecarsRequest\s+primitives.Epoch
@@ -546,7 +497,7 @@
MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS = 4096
</spec>
- name: MIN_GENESIS_ACTIVE_VALIDATOR_COUNT#phase0
- name: MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
sources:
- file: config/params/config.go
search: MinGenesisActiveValidatorCount\s+uint64
@@ -556,7 +507,7 @@
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: uint64 = 16384
</spec>
- name: MIN_GENESIS_TIME#phase0
- name: MIN_GENESIS_TIME
sources:
- file: config/params/config.go
search: MinGenesisTime\s+uint64
@@ -566,7 +517,7 @@
MIN_GENESIS_TIME: uint64 = 1606824000
</spec>
- name: MIN_PER_EPOCH_CHURN_LIMIT#phase0
- name: MIN_PER_EPOCH_CHURN_LIMIT
sources:
- file: config/params/config.go
search: MinPerEpochChurnLimit\s+uint64
@@ -576,7 +527,7 @@
MIN_PER_EPOCH_CHURN_LIMIT: uint64 = 4
</spec>
- name: MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA#electra
- name: MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA
sources:
- file: config/params/config.go
search: MinPerEpochChurnLimitElectra\s+uint64
@@ -586,7 +537,7 @@
MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: Gwei = 128000000000
</spec>
- name: MIN_VALIDATOR_WITHDRAWABILITY_DELAY#phase0
- name: MIN_VALIDATOR_WITHDRAWABILITY_DELAY
sources:
- file: config/params/config.go
search: MinValidatorWithdrawabilityDelay\s+primitives.Epoch
@@ -596,7 +547,7 @@
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: uint64 = 256
</spec>
- name: NUMBER_OF_CUSTODY_GROUPS#fulu
- name: NUMBER_OF_CUSTODY_GROUPS
sources:
- file: config/params/config.go
search: NumberOfCustodyGroups\s+uint64
@@ -606,14 +557,7 @@
NUMBER_OF_CUSTODY_GROUPS = 128
</spec>
- name: PAYLOAD_ATTESTATION_DUE_BPS#gloas
sources: []
spec: |
<spec config_var="PAYLOAD_ATTESTATION_DUE_BPS" fork="gloas" hash="17307d0e">
PAYLOAD_ATTESTATION_DUE_BPS: uint64 = 7500
</spec>
- name: PROPOSER_REORG_CUTOFF_BPS#phase0
- name: PROPOSER_REORG_CUTOFF_BPS
sources:
- file: config/params/config.go
search: ProposerReorgCutoffBPS\s+primitives.BP
@@ -623,7 +567,7 @@
PROPOSER_REORG_CUTOFF_BPS: uint64 = 1667
</spec>
- name: PROPOSER_SCORE_BOOST#phase0
- name: PROPOSER_SCORE_BOOST
sources:
- file: config/params/config.go
search: ProposerScoreBoost\s+uint64
@@ -633,7 +577,7 @@
PROPOSER_SCORE_BOOST: uint64 = 40
</spec>
- name: REORG_HEAD_WEIGHT_THRESHOLD#phase0
- name: REORG_HEAD_WEIGHT_THRESHOLD
sources:
- file: config/params/config.go
search: ReorgHeadWeightThreshold\s+uint64
@@ -643,7 +587,7 @@
REORG_HEAD_WEIGHT_THRESHOLD: uint64 = 20
</spec>
- name: REORG_MAX_EPOCHS_SINCE_FINALIZATION#phase0
- name: REORG_MAX_EPOCHS_SINCE_FINALIZATION
sources:
- file: config/params/config.go
search: ReorgMaxEpochsSinceFinalization\s+primitives.Epoch
@@ -653,7 +597,7 @@
REORG_MAX_EPOCHS_SINCE_FINALIZATION: Epoch = 2
</spec>
- name: REORG_PARENT_WEIGHT_THRESHOLD#phase0
- name: REORG_PARENT_WEIGHT_THRESHOLD
sources:
- file: config/params/config.go
search: ReorgParentWeightThreshold\s+uint64
@@ -663,7 +607,7 @@
REORG_PARENT_WEIGHT_THRESHOLD: uint64 = 160
</spec>
- name: SAMPLES_PER_SLOT#fulu
- name: SAMPLES_PER_SLOT
sources:
- file: config/params/config.go
search: SamplesPerSlot\s+uint64
@@ -673,7 +617,7 @@
SAMPLES_PER_SLOT = 8
</spec>
- name: SECONDS_PER_ETH1_BLOCK#phase0
- name: SECONDS_PER_ETH1_BLOCK
sources:
- file: config/params/config.go
search: SecondsPerETH1Block\s+uint64
@@ -683,7 +627,7 @@
SECONDS_PER_ETH1_BLOCK: uint64 = 14
</spec>
- name: SECONDS_PER_SLOT#phase0
- name: SECONDS_PER_SLOT
sources:
- file: config/params/config.go
search: SecondsPerSlot\s+uint64
@@ -693,7 +637,7 @@
SECONDS_PER_SLOT: uint64 = 12
</spec>
- name: SHARD_COMMITTEE_PERIOD#phase0
- name: SHARD_COMMITTEE_PERIOD
sources:
- file: config/params/config.go
search: ShardCommitteePeriod\s+primitives.Epoch
@@ -703,7 +647,7 @@
SHARD_COMMITTEE_PERIOD: uint64 = 256
</spec>
- name: SLOT_DURATION_MS#phase0
- name: SLOT_DURATION_MS
sources:
- file: config/params/config.go
search: SlotDurationMilliseconds\s+uint64
@@ -713,7 +657,7 @@
SLOT_DURATION_MS: uint64 = 12000
</spec>
- name: SUBNETS_PER_NODE#phase0
- name: SUBNETS_PER_NODE
sources:
- file: config/params/config.go
search: SubnetsPerNode\s+uint64
@@ -723,7 +667,7 @@
SUBNETS_PER_NODE = 2
</spec>
- name: SYNC_MESSAGE_DUE_BPS#altair
- name: SYNC_MESSAGE_DUE_BPS
sources:
- file: config/params/config.go
search: SyncMessageDueBPS\s+primitives.BP
@@ -733,14 +677,7 @@
SYNC_MESSAGE_DUE_BPS: uint64 = 3333
</spec>
- name: SYNC_MESSAGE_DUE_BPS_GLOAS#gloas
sources: []
spec: |
<spec config_var="SYNC_MESSAGE_DUE_BPS_GLOAS" fork="gloas" hash="47f14d95">
SYNC_MESSAGE_DUE_BPS_GLOAS: uint64 = 2500
</spec>
- name: TERMINAL_BLOCK_HASH#bellatrix
- name: TERMINAL_BLOCK_HASH
sources:
- file: config/params/config.go
search: TerminalBlockHash\s+common.Hash
@@ -750,7 +687,7 @@
TERMINAL_BLOCK_HASH: Hash32 = '0x0000000000000000000000000000000000000000000000000000000000000000'
</spec>
- name: TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH#bellatrix
- name: TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
sources:
- file: config/params/config.go
search: TerminalBlockHashActivationEpoch\s+primitives.Epoch
@@ -760,7 +697,7 @@
TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH = 18446744073709551615
</spec>
- name: TERMINAL_TOTAL_DIFFICULTY#bellatrix
- name: TERMINAL_TOTAL_DIFFICULTY
sources:
- file: config/params/config.go
search: TerminalTotalDifficulty\s+string
@@ -770,7 +707,7 @@
TERMINAL_TOTAL_DIFFICULTY = 58750000000000000000000
</spec>
- name: VALIDATOR_CUSTODY_REQUIREMENT#fulu
- name: VALIDATOR_CUSTODY_REQUIREMENT
sources:
- file: config/params/config.go
search: ValidatorCustodyRequirement\s+uint64

File diff suppressed because one or more lines are too long

View File

@@ -50,7 +50,7 @@
committee_bits: Bitvector[MAX_COMMITTEES_PER_SLOT]
</spec>
- name: AttestationData#phase0
- name: AttestationData
sources:
- file: proto/eth/v1/attestation.proto
search: message AttestationData {
@@ -88,7 +88,7 @@
attestation_2: IndexedAttestation
</spec>
- name: BLSToExecutionChange#capella
- name: BLSToExecutionChange
sources:
- file: proto/prysm/v1alpha1/withdrawals.proto
search: message BLSToExecutionChange {
@@ -100,7 +100,7 @@
to_execution_address: ExecutionAddress
</spec>
- name: BeaconBlock#phase0
- name: BeaconBlock
sources:
- file: proto/eth/v1/beacon_block.proto
search: message BeaconBlock {
@@ -239,34 +239,7 @@
execution_requests: ExecutionRequests
</spec>
- name: BeaconBlockBody#gloas
sources: []
spec: |
<spec ssz_object="BeaconBlockBody" fork="gloas" hash="7e472a77">
class BeaconBlockBody(Container):
randao_reveal: BLSSignature
eth1_data: Eth1Data
graffiti: Bytes32
proposer_slashings: List[ProposerSlashing, MAX_PROPOSER_SLASHINGS]
attester_slashings: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS_ELECTRA]
attestations: List[Attestation, MAX_ATTESTATIONS_ELECTRA]
deposits: List[Deposit, MAX_DEPOSITS]
voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
sync_aggregate: SyncAggregate
# [Modified in Gloas:EIP7732]
# Removed `execution_payload`
bls_to_execution_changes: List[SignedBLSToExecutionChange, MAX_BLS_TO_EXECUTION_CHANGES]
# [Modified in Gloas:EIP7732]
# Removed `blob_kzg_commitments`
# [Modified in Gloas:EIP7732]
# Removed `execution_requests`
# [New in Gloas:EIP7732]
signed_execution_payload_bid: SignedExecutionPayloadBid
# [New in Gloas:EIP7732]
payload_attestations: List[PayloadAttestation, MAX_PAYLOAD_ATTESTATIONS]
</spec>
- name: BeaconBlockHeader#phase0
- name: BeaconBlockHeader
sources:
- file: proto/eth/v1/beacon_block.proto
search: message BeaconBlockHeader {
@@ -565,69 +538,7 @@
proposer_lookahead: Vector[ValidatorIndex, (MIN_SEED_LOOKAHEAD + 1) * SLOTS_PER_EPOCH]
</spec>
- name: BeaconState#gloas
sources: []
spec: |
<spec ssz_object="BeaconState" fork="gloas" hash="c33b0db2">
class BeaconState(Container):
genesis_time: uint64
genesis_validators_root: Root
slot: Slot
fork: Fork
latest_block_header: BeaconBlockHeader
block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT]
eth1_data: Eth1Data
eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH]
eth1_deposit_index: uint64
validators: List[Validator, VALIDATOR_REGISTRY_LIMIT]
balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT]
randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR]
slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR]
previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT]
current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT]
justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH]
previous_justified_checkpoint: Checkpoint
current_justified_checkpoint: Checkpoint
finalized_checkpoint: Checkpoint
inactivity_scores: List[uint64, VALIDATOR_REGISTRY_LIMIT]
current_sync_committee: SyncCommittee
next_sync_committee: SyncCommittee
# [Modified in Gloas:EIP7732]
# Removed `latest_execution_payload_header`
# [New in Gloas:EIP7732]
latest_execution_payload_bid: ExecutionPayloadBid
next_withdrawal_index: WithdrawalIndex
next_withdrawal_validator_index: ValidatorIndex
historical_summaries: List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT]
deposit_requests_start_index: uint64
deposit_balance_to_consume: Gwei
exit_balance_to_consume: Gwei
earliest_exit_epoch: Epoch
consolidation_balance_to_consume: Gwei
earliest_consolidation_epoch: Epoch
pending_deposits: List[PendingDeposit, PENDING_DEPOSITS_LIMIT]
pending_partial_withdrawals: List[PendingPartialWithdrawal, PENDING_PARTIAL_WITHDRAWALS_LIMIT]
pending_consolidations: List[PendingConsolidation, PENDING_CONSOLIDATIONS_LIMIT]
proposer_lookahead: Vector[ValidatorIndex, (MIN_SEED_LOOKAHEAD + 1) * SLOTS_PER_EPOCH]
# [New in Gloas:EIP7732]
builders: List[Builder, BUILDER_REGISTRY_LIMIT]
# [New in Gloas:EIP7732]
next_withdrawal_builder_index: BuilderIndex
# [New in Gloas:EIP7732]
execution_payload_availability: Bitvector[SLOTS_PER_HISTORICAL_ROOT]
# [New in Gloas:EIP7732]
builder_pending_payments: Vector[BuilderPendingPayment, 2 * SLOTS_PER_EPOCH]
# [New in Gloas:EIP7732]
builder_pending_withdrawals: List[BuilderPendingWithdrawal, BUILDER_PENDING_WITHDRAWALS_LIMIT]
# [New in Gloas:EIP7732]
latest_block_hash: Hash32
# [New in Gloas:EIP7732]
payload_expected_withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]
</spec>
- name: BlobIdentifier#deneb
- name: BlobIdentifier
sources:
- file: proto/prysm/v1alpha1/blobs.proto
search: message BlobIdentifier {
@@ -638,7 +549,7 @@
index: BlobIndex
</spec>
- name: BlobSidecar#deneb
- name: BlobSidecar
sources:
- file: proto/prysm/v1alpha1/beacon_block.proto
search: message BlobSidecar {
@@ -653,39 +564,7 @@
kzg_commitment_inclusion_proof: Vector[Bytes32, KZG_COMMITMENT_INCLUSION_PROOF_DEPTH]
</spec>
- name: Builder#gloas
sources: []
spec: |
<spec ssz_object="Builder" fork="gloas" hash="ae177179">
class Builder(Container):
pubkey: BLSPubkey
version: uint8
execution_address: ExecutionAddress
balance: Gwei
deposit_epoch: Epoch
withdrawable_epoch: Epoch
</spec>
- name: BuilderPendingPayment#gloas
sources: []
spec: |
<spec ssz_object="BuilderPendingPayment" fork="gloas" hash="73cf1649">
class BuilderPendingPayment(Container):
weight: Gwei
withdrawal: BuilderPendingWithdrawal
</spec>
- name: BuilderPendingWithdrawal#gloas
sources: []
spec: |
<spec ssz_object="BuilderPendingWithdrawal" fork="gloas" hash="0579f0ac">
class BuilderPendingWithdrawal(Container):
fee_recipient: ExecutionAddress
amount: Gwei
builder_index: BuilderIndex
</spec>
- name: Checkpoint#phase0
- name: Checkpoint
sources:
- file: proto/eth/v1/attestation.proto
search: message Checkpoint {
@@ -696,7 +575,7 @@
root: Root
</spec>
- name: ConsolidationRequest#electra
- name: ConsolidationRequest
sources:
- file: proto/engine/v1/electra.proto
search: message ConsolidationRequest {
@@ -708,7 +587,7 @@
target_pubkey: BLSPubkey
</spec>
- name: ContributionAndProof#altair
- name: ContributionAndProof
sources:
- file: proto/prysm/v1alpha1/sync_committee.proto
search: message ContributionAndProof {
@@ -720,7 +599,7 @@
selection_proof: BLSSignature
</spec>
- name: DataColumnSidecar#fulu
- name: DataColumnSidecar
sources:
- file: proto/prysm/v1alpha1/data_columns.proto
search: message DataColumnSidecar {
@@ -735,26 +614,7 @@
kzg_commitments_inclusion_proof: Vector[Bytes32, KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH]
</spec>
- name: DataColumnSidecar#gloas
sources: []
spec: |
<spec ssz_object="DataColumnSidecar" fork="gloas" hash="8028928b">
class DataColumnSidecar(Container):
index: ColumnIndex
column: List[Cell, MAX_BLOB_COMMITMENTS_PER_BLOCK]
kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
kzg_proofs: List[KZGProof, MAX_BLOB_COMMITMENTS_PER_BLOCK]
# [Modified in Gloas:EIP7732]
# Removed `signed_block_header`
# [Modified in Gloas:EIP7732]
# Removed `kzg_commitments_inclusion_proof`
# [New in Gloas:EIP7732]
slot: Slot
# [New in Gloas:EIP7732]
beacon_block_root: Root
</spec>
- name: DataColumnsByRootIdentifier#fulu
- name: DataColumnsByRootIdentifier
sources:
- file: proto/prysm/v1alpha1/data_columns.proto
search: message DataColumnsByRootIdentifier {
@@ -765,7 +625,7 @@
columns: List[ColumnIndex, NUMBER_OF_COLUMNS]
</spec>
- name: Deposit#phase0
- name: Deposit
sources:
- file: proto/eth/v1/beacon_block.proto
search: message Deposit {
@@ -776,7 +636,7 @@
data: DepositData
</spec>
- name: DepositData#phase0
- name: DepositData
sources:
- file: proto/prysm/v1alpha1/beacon_core_types.proto
search: message Data {
@@ -789,7 +649,7 @@
signature: BLSSignature
</spec>
- name: DepositMessage#phase0
- name: DepositMessage
sources:
- file: proto/prysm/v1alpha1/beacon_state.proto
search: message DepositMessage {
@@ -801,7 +661,7 @@
amount: Gwei
</spec>
- name: DepositRequest#electra
- name: DepositRequest
sources:
- file: proto/engine/v1/electra.proto
search: message DepositRequest {
@@ -815,17 +675,7 @@
index: uint64
</spec>
- name: Eth1Block#phase0
sources: []
spec: |
<spec ssz_object="Eth1Block" fork="phase0" hash="0a5c6b45">
class Eth1Block(Container):
timestamp: uint64
deposit_root: Root
deposit_count: uint64
</spec>
- name: Eth1Data#phase0
- name: Eth1Data
sources:
- file: proto/prysm/v1alpha1/beacon_core_types.proto
search: message Eth1Data {
@@ -913,38 +763,6 @@
excess_blob_gas: uint64
</spec>
- name: ExecutionPayloadBid#gloas
sources: []
spec: |
<spec ssz_object="ExecutionPayloadBid" fork="gloas" hash="aa71ba16">
class ExecutionPayloadBid(Container):
parent_block_hash: Hash32
parent_block_root: Root
block_hash: Hash32
prev_randao: Bytes32
fee_recipient: ExecutionAddress
gas_limit: uint64
builder_index: BuilderIndex
slot: Slot
value: Gwei
execution_payment: Gwei
blob_kzg_commitments_root: Root
</spec>
- name: ExecutionPayloadEnvelope#gloas
sources: []
spec: |
<spec ssz_object="ExecutionPayloadEnvelope" fork="gloas" hash="cd522f7f">
class ExecutionPayloadEnvelope(Container):
payload: ExecutionPayload
execution_requests: ExecutionRequests
builder_index: BuilderIndex
beacon_block_root: Root
slot: Slot
blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
state_root: Root
</spec>
- name: ExecutionPayloadHeader#bellatrix
sources:
- file: proto/engine/v1/execution_engine.proto
@@ -1021,7 +839,7 @@
excess_blob_gas: uint64
</spec>
- name: ExecutionRequests#electra
- name: ExecutionRequests
sources:
- file: proto/engine/v1/electra.proto
search: message ExecutionRequests {
@@ -1036,7 +854,7 @@
consolidations: List[ConsolidationRequest, MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD]
</spec>
- name: Fork#phase0
- name: Fork
sources:
- file: proto/prysm/v1alpha1/beacon_core_types.proto
search: message Fork {
@@ -1048,16 +866,7 @@
epoch: Epoch
</spec>
- name: ForkChoiceNode#gloas
sources: []
spec: |
<spec ssz_object="ForkChoiceNode" fork="gloas" hash="755a4b34">
class ForkChoiceNode(Container):
root: Root
payload_status: PayloadStatus # One of PAYLOAD_STATUS_* values
</spec>
- name: ForkData#phase0
- name: ForkData
sources:
- file: proto/prysm/v1alpha1/beacon_state.proto
search: message ForkData {
@@ -1068,7 +877,7 @@
genesis_validators_root: Root
</spec>
- name: HistoricalBatch#phase0
- name: HistoricalBatch
sources:
- file: proto/prysm/v1alpha1/beacon_state.proto
search: message HistoricalBatch {
@@ -1079,7 +888,7 @@
state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
</spec>
- name: HistoricalSummary#capella
- name: HistoricalSummary
sources:
- file: proto/prysm/v1alpha1/beacon_core_types.proto
search: message HistoricalSummary {
@@ -1115,17 +924,7 @@
signature: BLSSignature
</spec>
- name: IndexedPayloadAttestation#gloas
sources: []
spec: |
<spec ssz_object="IndexedPayloadAttestation" fork="gloas" hash="fa4832c8">
class IndexedPayloadAttestation(Container):
attesting_indices: List[ValidatorIndex, PTC_SIZE]
data: PayloadAttestationData
signature: BLSSignature
</spec>
- name: LightClientBootstrap#altair
- name: LightClientBootstrap
sources:
- file: proto/prysm/v1alpha1/light_client.proto
search: message LightClientBootstrapAltair {
@@ -1139,18 +938,7 @@
current_sync_committee_branch: CurrentSyncCommitteeBranch
</spec>
- name: LightClientBootstrap#capella
sources: []
spec: |
<spec ssz_object="LightClientBootstrap" fork="capella" hash="85f4f5fe">
class LightClientBootstrap(Container):
# [Modified in Capella]
header: LightClientHeader
current_sync_committee: SyncCommittee
current_sync_committee_branch: CurrentSyncCommitteeBranch
</spec>
- name: LightClientFinalityUpdate#altair
- name: LightClientFinalityUpdate
sources:
- file: proto/prysm/v1alpha1/light_client.proto
search: message LightClientFinalityUpdateAltair {
@@ -1168,20 +956,6 @@
signature_slot: Slot
</spec>
- name: LightClientFinalityUpdate#capella
sources: []
spec: |
<spec ssz_object="LightClientFinalityUpdate" fork="capella" hash="9d2b55dd">
class LightClientFinalityUpdate(Container):
# [Modified in Capella]
attested_header: LightClientHeader
# [Modified in Capella]
finalized_header: LightClientHeader
finality_branch: FinalityBranch
sync_aggregate: SyncAggregate
signature_slot: Slot
</spec>
- name: LightClientHeader#altair
sources:
- file: proto/prysm/v1alpha1/light_client.proto
@@ -1206,7 +980,7 @@
execution_branch: ExecutionBranch
</spec>
- name: LightClientOptimisticUpdate#altair
- name: LightClientOptimisticUpdate
sources:
- file: proto/prysm/v1alpha1/light_client.proto
search: message LightClientOptimisticUpdateAltair {
@@ -1221,18 +995,7 @@
signature_slot: Slot
</spec>
- name: LightClientOptimisticUpdate#capella
sources: []
spec: |
<spec ssz_object="LightClientOptimisticUpdate" fork="capella" hash="bdce7b1d">
class LightClientOptimisticUpdate(Container):
# [Modified in Capella]
attested_header: LightClientHeader
sync_aggregate: SyncAggregate
signature_slot: Slot
</spec>
- name: LightClientUpdate#altair
- name: LightClientUpdate
sources:
- file: proto/prysm/v1alpha1/light_client.proto
search: message LightClientUpdateAltair {
@@ -1253,65 +1016,7 @@
signature_slot: Slot
</spec>
- name: LightClientUpdate#capella
sources: []
spec: |
<spec ssz_object="LightClientUpdate" fork="capella" hash="8d215165">
class LightClientUpdate(Container):
# [Modified in Capella]
attested_header: LightClientHeader
next_sync_committee: SyncCommittee
next_sync_committee_branch: NextSyncCommitteeBranch
# [Modified in Capella]
finalized_header: LightClientHeader
finality_branch: FinalityBranch
sync_aggregate: SyncAggregate
signature_slot: Slot
</spec>
- name: MatrixEntry#fulu
sources: []
spec: |
<spec ssz_object="MatrixEntry" fork="fulu" hash="0da9cc8e">
class MatrixEntry(Container):
cell: Cell
kzg_proof: KZGProof
column_index: ColumnIndex
row_index: RowIndex
</spec>
- name: PayloadAttestation#gloas
sources: []
spec: |
<spec ssz_object="PayloadAttestation" fork="gloas" hash="c769473d">
class PayloadAttestation(Container):
aggregation_bits: Bitvector[PTC_SIZE]
data: PayloadAttestationData
signature: BLSSignature
</spec>
- name: PayloadAttestationData#gloas
sources: []
spec: |
<spec ssz_object="PayloadAttestationData" fork="gloas" hash="9f1b7f92">
class PayloadAttestationData(Container):
beacon_block_root: Root
slot: Slot
payload_present: boolean
blob_data_available: boolean
</spec>
- name: PayloadAttestationMessage#gloas
sources: []
spec: |
<spec ssz_object="PayloadAttestationMessage" fork="gloas" hash="3707678d">
class PayloadAttestationMessage(Container):
validator_index: ValidatorIndex
data: PayloadAttestationData
signature: BLSSignature
</spec>
- name: PendingAttestation#phase0
- name: PendingAttestation
sources:
- file: proto/prysm/v1alpha1/beacon_state.proto
search: message PendingAttestation {
@@ -1324,7 +1029,7 @@
proposer_index: ValidatorIndex
</spec>
- name: PendingConsolidation#electra
- name: PendingConsolidation
sources:
- file: proto/prysm/v1alpha1/eip_7251.proto
search: message PendingConsolidation {
@@ -1335,7 +1040,7 @@
target_index: ValidatorIndex
</spec>
- name: PendingDeposit#electra
- name: PendingDeposit
sources:
- file: proto/prysm/v1alpha1/eip_7251.proto
search: message PendingDeposit {
@@ -1349,7 +1054,7 @@
slot: Slot
</spec>
- name: PendingPartialWithdrawal#electra
- name: PendingPartialWithdrawal
sources:
- file: proto/prysm/v1alpha1/eip_7251.proto
search: message PendingPartialWithdrawal {
@@ -1361,7 +1066,7 @@
withdrawable_epoch: Epoch
</spec>
- name: PowBlock#bellatrix
- name: PowBlock
sources:
- file: proto/prysm/v1alpha1/beacon_state.proto
search: message PowBlock {
@@ -1373,18 +1078,7 @@
total_difficulty: uint256
</spec>
- name: ProposerPreferences#gloas
sources: []
spec: |
<spec ssz_object="ProposerPreferences" fork="gloas" hash="2a38b149">
class ProposerPreferences(Container):
proposal_slot: Slot
validator_index: ValidatorIndex
fee_recipient: ExecutionAddress
gas_limit: uint64
</spec>
- name: ProposerSlashing#phase0
- name: ProposerSlashing
sources:
- file: proto/eth/v1/beacon_block.proto
search: message ProposerSlashing {
@@ -1418,7 +1112,7 @@
signature: BLSSignature
</spec>
- name: SignedBLSToExecutionChange#capella
- name: SignedBLSToExecutionChange
sources:
- file: proto/prysm/v1alpha1/withdrawals.proto
search: message SignedBLSToExecutionChange {
@@ -1429,7 +1123,7 @@
signature: BLSSignature
</spec>
- name: SignedBeaconBlock#phase0
- name: SignedBeaconBlock
sources:
- file: proto/prysm/v1alpha1/beacon_block.proto
search: message SignedBeaconBlock {
@@ -1440,7 +1134,7 @@
signature: BLSSignature
</spec>
- name: SignedBeaconBlockHeader#phase0
- name: SignedBeaconBlockHeader
sources:
- file: proto/prysm/v1alpha1/beacon_core_types.proto
search: message SignedBeaconBlockHeader {
@@ -1451,7 +1145,7 @@
signature: BLSSignature
</spec>
- name: SignedContributionAndProof#altair
- name: SignedContributionAndProof
sources:
- file: proto/prysm/v1alpha1/sync_committee.proto
search: message SignedContributionAndProof {
@@ -1462,34 +1156,7 @@
signature: BLSSignature
</spec>
- name: SignedExecutionPayloadBid#gloas
sources: []
spec: |
<spec ssz_object="SignedExecutionPayloadBid" fork="gloas" hash="6b344341">
class SignedExecutionPayloadBid(Container):
message: ExecutionPayloadBid
signature: BLSSignature
</spec>
- name: SignedExecutionPayloadEnvelope#gloas
sources: []
spec: |
<spec ssz_object="SignedExecutionPayloadEnvelope" fork="gloas" hash="ab8f3404">
class SignedExecutionPayloadEnvelope(Container):
message: ExecutionPayloadEnvelope
signature: BLSSignature
</spec>
- name: SignedProposerPreferences#gloas
sources: []
spec: |
<spec ssz_object="SignedProposerPreferences" fork="gloas" hash="2142774c">
class SignedProposerPreferences(Container):
message: ProposerPreferences
signature: BLSSignature
</spec>
- name: SignedVoluntaryExit#phase0
- name: SignedVoluntaryExit
sources:
- file: proto/prysm/v1alpha1/beacon_core_types.proto
search: message SignedVoluntaryExit {
@@ -1500,7 +1167,7 @@
signature: BLSSignature
</spec>
- name: SigningData#phase0
- name: SigningData
sources:
- file: proto/prysm/v1alpha1/beacon_state.proto
search: message SigningData {
@@ -1511,7 +1178,7 @@
domain: Domain
</spec>
- name: SingleAttestation#electra
- name: SingleAttestation
sources:
- file: proto/prysm/v1alpha1/attestation.proto
search: message SingleAttestation {
@@ -1524,7 +1191,7 @@
signature: BLSSignature
</spec>
- name: SyncAggregate#altair
- name: SyncAggregate
sources:
- file: proto/prysm/v1alpha1/beacon_core_types.proto
search: message SyncAggregate {
@@ -1535,7 +1202,7 @@
sync_committee_signature: BLSSignature
</spec>
- name: SyncAggregatorSelectionData#altair
- name: SyncAggregatorSelectionData
sources:
- file: proto/prysm/v1alpha1/beacon_state.proto
search: message SyncAggregatorSelectionData {
@@ -1546,7 +1213,7 @@
subcommittee_index: uint64
</spec>
- name: SyncCommittee#altair
- name: SyncCommittee
sources:
- file: proto/prysm/v1alpha1/beacon_core_types.proto
search: message SyncCommittee {
@@ -1557,7 +1224,7 @@
aggregate_pubkey: BLSPubkey
</spec>
- name: SyncCommitteeContribution#altair
- name: SyncCommitteeContribution
sources:
- file: proto/prysm/v1alpha1/sync_committee.proto
search: message SyncCommitteeContribution {
@@ -1571,7 +1238,7 @@
signature: BLSSignature
</spec>
- name: SyncCommitteeMessage#altair
- name: SyncCommitteeMessage
sources:
- file: proto/prysm/v1alpha1/sync_committee.proto
search: message SyncCommitteeMessage {
@@ -1584,7 +1251,7 @@
signature: BLSSignature
</spec>
- name: Validator#phase0
- name: Validator
sources:
- file: proto/prysm/v1alpha1/beacon_core_types.proto
search: message Validator {
@@ -1601,7 +1268,7 @@
withdrawable_epoch: Epoch
</spec>
- name: VoluntaryExit#phase0
- name: VoluntaryExit
sources:
- file: proto/eth/v1/beacon_block.proto
search: message VoluntaryExit {
@@ -1612,7 +1279,7 @@
validator_index: ValidatorIndex
</spec>
- name: Withdrawal#capella
- name: Withdrawal
sources:
- file: proto/engine/v1/execution_engine.proto
search: message Withdrawal {
@@ -1625,7 +1292,7 @@
amount: Gwei
</spec>
- name: WithdrawalRequest#electra
- name: WithdrawalRequest
sources:
- file: proto/engine/v1/electra.proto
search: message WithdrawalRequest {

View File

@@ -1,4 +1,4 @@
- name: BlobParameters#fulu
- name: BlobParameters
sources: []
spec: |
<spec dataclass="BlobParameters" fork="fulu" hash="a4575aa8">
@@ -54,20 +54,6 @@
processed_sweep_withdrawals_count: uint64
</spec>
- name: ExpectedWithdrawals#gloas
sources: []
spec: |
<spec dataclass="ExpectedWithdrawals" fork="gloas" hash="b32cc9c9">
class ExpectedWithdrawals(object):
withdrawals: Sequence[Withdrawal]
# [New in Gloas:EIP7732]
processed_builder_withdrawals_count: uint64
processed_partial_withdrawals_count: uint64
# [New in Gloas:EIP7732]
processed_builders_sweep_count: uint64
processed_sweep_withdrawals_count: uint64
</spec>
- name: GetPayloadResponse#bellatrix
sources:
- file: consensus-types/blocks/get_payload.go
@@ -140,7 +126,7 @@
execution_requests: Sequence[bytes]
</spec>
- name: LatestMessage#phase0
- name: LatestMessage
sources: []
spec: |
<spec dataclass="LatestMessage" fork="phase0" hash="44e832d0">
@@ -150,18 +136,7 @@
root: Root
</spec>
- name: LatestMessage#gloas
sources: []
spec: |
<spec dataclass="LatestMessage" fork="gloas" hash="a0030894">
@dataclass(eq=True, frozen=True)
class LatestMessage(object):
slot: Slot
root: Root
payload_present: boolean
</spec>
- name: LightClientStore#altair
- name: LightClientStore
sources: []
spec: |
<spec dataclass="LightClientStore" fork="altair" hash="24725cec">
@@ -180,23 +155,6 @@
current_max_active_participants: uint64
</spec>
- name: LightClientStore#capella
sources: []
spec: |
<spec dataclass="LightClientStore" fork="capella" hash="04b41062">
class LightClientStore(object):
# [Modified in Capella]
finalized_header: LightClientHeader
current_sync_committee: SyncCommittee
next_sync_committee: SyncCommittee
# [Modified in Capella]
best_valid_update: Optional[LightClientUpdate]
# [Modified in Capella]
optimistic_header: LightClientHeader
previous_max_active_participants: uint64
current_max_active_participants: uint64
</spec>
- name: NewPayloadRequest#bellatrix
sources:
- file: beacon-chain/execution/engine_client.go
@@ -233,7 +191,7 @@
execution_requests: ExecutionRequests
</spec>
- name: OptimisticStore#bellatrix
- name: OptimisticStore
sources: []
spec: |
<spec dataclass="OptimisticStore" fork="bellatrix" hash="a2b2182c">
@@ -288,7 +246,7 @@
parent_beacon_block_root: Root
</spec>
- name: Store#phase0
- name: Store
sources: []
spec: |
<spec dataclass="Store" fork="phase0" hash="abe525d6">
@@ -308,30 +266,3 @@
latest_messages: Dict[ValidatorIndex, LatestMessage] = field(default_factory=dict)
unrealized_justifications: Dict[Root, Checkpoint] = field(default_factory=dict)
</spec>
- name: Store#gloas
sources: []
spec: |
<spec dataclass="Store" fork="gloas" hash="4dbfec46">
class Store(object):
time: uint64
genesis_time: uint64
justified_checkpoint: Checkpoint
finalized_checkpoint: Checkpoint
unrealized_justified_checkpoint: Checkpoint
unrealized_finalized_checkpoint: Checkpoint
proposer_boost_root: Root
equivocating_indices: Set[ValidatorIndex]
blocks: Dict[Root, BeaconBlock] = field(default_factory=dict)
block_states: Dict[Root, BeaconState] = field(default_factory=dict)
block_timeliness: Dict[Root, Vector[boolean, NUM_BLOCK_TIMELINESS_DEADLINES]] = field(
default_factory=dict
)
checkpoint_states: Dict[Checkpoint, BeaconState] = field(default_factory=dict)
latest_messages: Dict[ValidatorIndex, LatestMessage] = field(default_factory=dict)
unrealized_justifications: Dict[Root, Checkpoint] = field(default_factory=dict)
# [New in Gloas:EIP7732]
execution_payload_states: Dict[Root, BeaconState] = field(default_factory=dict)
# [New in Gloas:EIP7732]
ptc_vote: Dict[Root, Vector[boolean, PTC_SIZE]] = field(default_factory=dict)
</spec>

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
- name: BASE_REWARD_FACTOR#phase0
- name: BASE_REWARD_FACTOR
sources:
- file: config/params/config.go
search: BaseRewardFactor\s+.*yaml:"BASE_REWARD_FACTOR"
@@ -8,21 +8,7 @@
BASE_REWARD_FACTOR: uint64 = 64
</spec>
- name: BUILDER_PENDING_WITHDRAWALS_LIMIT#gloas
sources: []
spec: |
<spec preset_var="BUILDER_PENDING_WITHDRAWALS_LIMIT" fork="gloas" hash="40b31377">
BUILDER_PENDING_WITHDRAWALS_LIMIT: uint64 = 1048576
</spec>
- name: BUILDER_REGISTRY_LIMIT#gloas
sources: []
spec: |
<spec preset_var="BUILDER_REGISTRY_LIMIT" fork="gloas" hash="e951ff73">
BUILDER_REGISTRY_LIMIT: uint64 = 1099511627776
</spec>
- name: BYTES_PER_LOGS_BLOOM#bellatrix
- name: BYTES_PER_LOGS_BLOOM
sources:
- file: config/params/config.go
search: BytesPerLogsBloom\s+.*yaml:"BYTES_PER_LOGS_BLOOM"
@@ -32,7 +18,7 @@
BYTES_PER_LOGS_BLOOM: uint64 = 256
</spec>
- name: CELLS_PER_EXT_BLOB#fulu
- name: CELLS_PER_EXT_BLOB
sources:
- file: beacon-chain/rpc/eth/config/handlers.go
search: data\["CELLS_PER_EXT_BLOB"\]
@@ -42,7 +28,7 @@
CELLS_PER_EXT_BLOB = 128
</spec>
- name: EFFECTIVE_BALANCE_INCREMENT#phase0
- name: EFFECTIVE_BALANCE_INCREMENT
sources:
- file: config/params/config.go
search: EffectiveBalanceIncrement\s+.*yaml:"EFFECTIVE_BALANCE_INCREMENT"
@@ -52,7 +38,7 @@
EFFECTIVE_BALANCE_INCREMENT: Gwei = 1000000000
</spec>
- name: EPOCHS_PER_ETH1_VOTING_PERIOD#phase0
- name: EPOCHS_PER_ETH1_VOTING_PERIOD
sources:
- file: config/params/config.go
search: EpochsPerEth1VotingPeriod\s+.*yaml:"EPOCHS_PER_ETH1_VOTING_PERIOD"
@@ -62,7 +48,7 @@
EPOCHS_PER_ETH1_VOTING_PERIOD: uint64 = 64
</spec>
- name: EPOCHS_PER_HISTORICAL_VECTOR#phase0
- name: EPOCHS_PER_HISTORICAL_VECTOR
sources:
- file: config/fieldparams/mainnet.go
search: RandaoMixesLength\s*=
@@ -72,7 +58,7 @@
EPOCHS_PER_HISTORICAL_VECTOR: uint64 = 65536
</spec>
- name: EPOCHS_PER_SLASHINGS_VECTOR#phase0
- name: EPOCHS_PER_SLASHINGS_VECTOR
sources:
- file: config/fieldparams/mainnet.go
search: SlashingsLength\s*=
@@ -82,7 +68,7 @@
EPOCHS_PER_SLASHINGS_VECTOR: uint64 = 8192
</spec>
- name: EPOCHS_PER_SYNC_COMMITTEE_PERIOD#altair
- name: EPOCHS_PER_SYNC_COMMITTEE_PERIOD
sources:
- file: config/params/config.go
search: EpochsPerSyncCommitteePeriod\s+.*yaml:"EPOCHS_PER_SYNC_COMMITTEE_PERIOD"
@@ -92,7 +78,7 @@
EPOCHS_PER_SYNC_COMMITTEE_PERIOD: uint64 = 256
</spec>
- name: FIELD_ELEMENTS_PER_BLOB#deneb
- name: FIELD_ELEMENTS_PER_BLOB
sources:
- file: config/params/config.go
search: FieldElementsPerBlob\s+.*yaml:"FIELD_ELEMENTS_PER_BLOB"
@@ -102,7 +88,7 @@
FIELD_ELEMENTS_PER_BLOB: uint64 = 4096
</spec>
- name: FIELD_ELEMENTS_PER_CELL#fulu
- name: FIELD_ELEMENTS_PER_CELL
sources:
- file: config/fieldparams/mainnet.go
search: CellsPerBlob\s*=
@@ -112,7 +98,7 @@
FIELD_ELEMENTS_PER_CELL: uint64 = 64
</spec>
- name: FIELD_ELEMENTS_PER_EXT_BLOB#fulu
- name: FIELD_ELEMENTS_PER_EXT_BLOB
sources:
- file: proto/ssz_proto_library.bzl
search: mainnet\s*=\s*\{[^}]*"field_elements_per_ext_blob\.size".*[^}]*\}
@@ -122,7 +108,7 @@
FIELD_ELEMENTS_PER_EXT_BLOB = 8192
</spec>
- name: HISTORICAL_ROOTS_LIMIT#phase0
- name: HISTORICAL_ROOTS_LIMIT
sources:
- file: config/fieldparams/mainnet.go
search: HistoricalRootsLength\s*=
@@ -132,7 +118,7 @@
HISTORICAL_ROOTS_LIMIT: uint64 = 16777216
</spec>
- name: HYSTERESIS_DOWNWARD_MULTIPLIER#phase0
- name: HYSTERESIS_DOWNWARD_MULTIPLIER
sources:
- file: config/params/config.go
search: HysteresisDownwardMultiplier\s+.*yaml:"HYSTERESIS_DOWNWARD_MULTIPLIER"
@@ -142,7 +128,7 @@
HYSTERESIS_DOWNWARD_MULTIPLIER: uint64 = 1
</spec>
- name: HYSTERESIS_QUOTIENT#phase0
- name: HYSTERESIS_QUOTIENT
sources:
- file: config/params/config.go
search: HysteresisQuotient\s+.*yaml:"HYSTERESIS_QUOTIENT"
@@ -152,7 +138,7 @@
HYSTERESIS_QUOTIENT: uint64 = 4
</spec>
- name: HYSTERESIS_UPWARD_MULTIPLIER#phase0
- name: HYSTERESIS_UPWARD_MULTIPLIER
sources:
- file: config/params/config.go
search: HysteresisUpwardMultiplier\s+.*yaml:"HYSTERESIS_UPWARD_MULTIPLIER"
@@ -162,7 +148,7 @@
HYSTERESIS_UPWARD_MULTIPLIER: uint64 = 5
</spec>
- name: INACTIVITY_PENALTY_QUOTIENT#phase0
- name: INACTIVITY_PENALTY_QUOTIENT
sources:
- file: config/params/config.go
search: InactivityPenaltyQuotient\s+.*yaml:"INACTIVITY_PENALTY_QUOTIENT"
@@ -172,7 +158,7 @@
INACTIVITY_PENALTY_QUOTIENT: uint64 = 67108864
</spec>
- name: INACTIVITY_PENALTY_QUOTIENT_ALTAIR#altair
- name: INACTIVITY_PENALTY_QUOTIENT_ALTAIR
sources:
- file: config/params/config.go
search: InactivityPenaltyQuotientAltair\s+.*yaml:"INACTIVITY_PENALTY_QUOTIENT_ALTAIR"
@@ -182,7 +168,7 @@
INACTIVITY_PENALTY_QUOTIENT_ALTAIR: uint64 = 50331648
</spec>
- name: INACTIVITY_PENALTY_QUOTIENT_BELLATRIX#bellatrix
- name: INACTIVITY_PENALTY_QUOTIENT_BELLATRIX
sources:
- file: config/params/config.go
search: InactivityPenaltyQuotientBellatrix\s+.*yaml:"INACTIVITY_PENALTY_QUOTIENT_BELLATRIX"
@@ -192,7 +178,7 @@
INACTIVITY_PENALTY_QUOTIENT_BELLATRIX: uint64 = 16777216
</spec>
- name: KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH#fulu
- name: KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH
sources:
- file: proto/ssz_proto_library.bzl
search: mainnet\s*=\s*\{[^}]*"kzg_commitments_inclusion_proof_depth\.size":.*[^}]*\}
@@ -202,7 +188,7 @@
KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH: uint64 = 4
</spec>
- name: KZG_COMMITMENT_INCLUSION_PROOF_DEPTH#deneb
- name: KZG_COMMITMENT_INCLUSION_PROOF_DEPTH
sources:
- file: config/fieldparams/mainnet.go
search: KzgCommitmentInclusionProofDepth\s*=
@@ -212,7 +198,7 @@
KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: uint64 = 17
</spec>
- name: MAX_ATTESTATIONS#phase0
- name: MAX_ATTESTATIONS
sources:
- file: config/params/config.go
search: MaxAttestations\s+.*yaml:"MAX_ATTESTATIONS"
@@ -222,7 +208,7 @@
MAX_ATTESTATIONS = 128
</spec>
- name: MAX_ATTESTATIONS_ELECTRA#electra
- name: MAX_ATTESTATIONS_ELECTRA
sources:
- file: config/params/config.go
search: MaxAttestationsElectra\s+.*yaml:"MAX_ATTESTATIONS_ELECTRA"
@@ -232,7 +218,7 @@
MAX_ATTESTATIONS_ELECTRA = 8
</spec>
- name: MAX_ATTESTER_SLASHINGS#phase0
- name: MAX_ATTESTER_SLASHINGS
sources:
- file: config/params/config.go
search: MaxAttesterSlashings\s+.*yaml:"MAX_ATTESTER_SLASHINGS"
@@ -242,7 +228,7 @@
MAX_ATTESTER_SLASHINGS = 2
</spec>
- name: MAX_ATTESTER_SLASHINGS_ELECTRA#electra
- name: MAX_ATTESTER_SLASHINGS_ELECTRA
sources:
- file: config/params/config.go
search: MaxAttesterSlashingsElectra\s+.*yaml:"MAX_ATTESTER_SLASHINGS_ELECTRA"
@@ -252,7 +238,7 @@
MAX_ATTESTER_SLASHINGS_ELECTRA = 1
</spec>
- name: MAX_BLOB_COMMITMENTS_PER_BLOCK#deneb
- name: MAX_BLOB_COMMITMENTS_PER_BLOCK
sources:
- file: config/fieldparams/mainnet.go
search: MaxBlobCommitmentsPerBlock\s*=
@@ -262,7 +248,7 @@
MAX_BLOB_COMMITMENTS_PER_BLOCK: uint64 = 4096
</spec>
- name: MAX_BLS_TO_EXECUTION_CHANGES#capella
- name: MAX_BLS_TO_EXECUTION_CHANGES
sources:
- file: config/params/config.go
search: MaxBlsToExecutionChanges\s+.*yaml:"MAX_BLS_TO_EXECUTION_CHANGES"
@@ -272,14 +258,7 @@
MAX_BLS_TO_EXECUTION_CHANGES = 16
</spec>
- name: MAX_BUILDERS_PER_WITHDRAWALS_SWEEP#gloas
sources: []
spec: |
<spec preset_var="MAX_BUILDERS_PER_WITHDRAWALS_SWEEP" fork="gloas" hash="1556b314">
MAX_BUILDERS_PER_WITHDRAWALS_SWEEP = 16384
</spec>
- name: MAX_BYTES_PER_TRANSACTION#bellatrix
- name: MAX_BYTES_PER_TRANSACTION
sources:
- file: config/params/config.go
search: MaxBytesPerTransaction\s+.*yaml:"MAX_BYTES_PER_TRANSACTION"
@@ -309,7 +288,7 @@
MAX_COMMITTEES_PER_SLOT: uint64 = 64
</spec>
- name: MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD#electra
- name: MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD
sources:
- file: config/params/config.go
search: MaxConsolidationsRequestsPerPayload\s+.*yaml:"MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD"
@@ -319,7 +298,7 @@
MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: uint64 = 2
</spec>
- name: MAX_DEPOSITS#phase0
- name: MAX_DEPOSITS
sources:
- file: config/params/config.go
search: MaxDeposits\s+.*yaml:"MAX_DEPOSITS"
@@ -329,7 +308,7 @@
MAX_DEPOSITS = 16
</spec>
- name: MAX_DEPOSIT_REQUESTS_PER_PAYLOAD#electra
- name: MAX_DEPOSIT_REQUESTS_PER_PAYLOAD
sources:
- file: config/params/config.go
search: MaxDepositRequestsPerPayload\s+.*yaml:"MAX_DEPOSIT_REQUESTS_PER_PAYLOAD"
@@ -339,7 +318,7 @@
MAX_DEPOSIT_REQUESTS_PER_PAYLOAD: uint64 = 8192
</spec>
- name: MAX_EFFECTIVE_BALANCE#phase0
- name: MAX_EFFECTIVE_BALANCE
sources:
- file: config/params/config.go
search: MaxEffectiveBalance\s+.*yaml:"MAX_EFFECTIVE_BALANCE"
@@ -349,7 +328,7 @@
MAX_EFFECTIVE_BALANCE: Gwei = 32000000000
</spec>
- name: MAX_EFFECTIVE_BALANCE_ELECTRA#electra
- name: MAX_EFFECTIVE_BALANCE_ELECTRA
sources:
- file: config/params/config.go
search: MaxEffectiveBalanceElectra\s+.*yaml:"MAX_EFFECTIVE_BALANCE_ELECTRA"
@@ -359,7 +338,7 @@
MAX_EFFECTIVE_BALANCE_ELECTRA: Gwei = 2048000000000
</spec>
- name: MAX_EXTRA_DATA_BYTES#bellatrix
- name: MAX_EXTRA_DATA_BYTES
sources:
- file: config/params/config.go
search: MaxExtraDataBytes\s+.*yaml:"MAX_EXTRA_DATA_BYTES"
@@ -369,14 +348,7 @@
MAX_EXTRA_DATA_BYTES = 32
</spec>
- name: MAX_PAYLOAD_ATTESTATIONS#gloas
sources: []
spec: |
<spec preset_var="MAX_PAYLOAD_ATTESTATIONS" fork="gloas" hash="fc24e7ea">
MAX_PAYLOAD_ATTESTATIONS = 4
</spec>
- name: MAX_PENDING_DEPOSITS_PER_EPOCH#electra
- name: MAX_PENDING_DEPOSITS_PER_EPOCH
sources:
- file: config/params/config.go
search: MaxPendingDepositsPerEpoch\s+.*yaml:"MAX_PENDING_DEPOSITS_PER_EPOCH"
@@ -386,7 +358,7 @@
MAX_PENDING_DEPOSITS_PER_EPOCH: uint64 = 16
</spec>
- name: MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP#electra
- name: MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP
sources:
- file: config/params/config.go
search: MaxPendingPartialsPerWithdrawalsSweep\s+.*yaml:"MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP"
@@ -396,7 +368,7 @@
MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: uint64 = 8
</spec>
- name: MAX_PROPOSER_SLASHINGS#phase0
- name: MAX_PROPOSER_SLASHINGS
sources:
- file: config/params/config.go
search: MaxProposerSlashings\s+.*yaml:"MAX_PROPOSER_SLASHINGS"
@@ -406,7 +378,7 @@
MAX_PROPOSER_SLASHINGS = 16
</spec>
- name: MAX_SEED_LOOKAHEAD#phase0
- name: MAX_SEED_LOOKAHEAD
sources:
- file: config/params/config.go
search: MaxSeedLookahead\s+.*yaml:"MAX_SEED_LOOKAHEAD"
@@ -416,7 +388,7 @@
MAX_SEED_LOOKAHEAD: uint64 = 4
</spec>
- name: MAX_TRANSACTIONS_PER_PAYLOAD#bellatrix
- name: MAX_TRANSACTIONS_PER_PAYLOAD
sources:
- file: config/params/config.go
search: MaxTransactionsPerPayload\s+.*yaml:"MAX_TRANSACTIONS_PER_PAYLOAD"
@@ -446,7 +418,7 @@
MAX_VALIDATORS_PER_COMMITTEE: uint64 = 2048
</spec>
- name: MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP#capella
- name: MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
sources:
- file: config/params/config.go
search: MaxValidatorsPerWithdrawalsSweep\s+.*yaml:"MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP"
@@ -456,7 +428,7 @@
MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP = 16384
</spec>
- name: MAX_VOLUNTARY_EXITS#phase0
- name: MAX_VOLUNTARY_EXITS
sources:
- file: config/params/config.go
search: MaxVoluntaryExits\s+.*yaml:"MAX_VOLUNTARY_EXITS"
@@ -466,7 +438,7 @@
MAX_VOLUNTARY_EXITS = 16
</spec>
- name: MAX_WITHDRAWALS_PER_PAYLOAD#capella
- name: MAX_WITHDRAWALS_PER_PAYLOAD
sources:
- file: config/fieldparams/mainnet.go
search: MaxWithdrawalsPerPayload\s*=
@@ -476,7 +448,7 @@
MAX_WITHDRAWALS_PER_PAYLOAD: uint64 = 16
</spec>
- name: MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD#electra
- name: MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD
sources:
- file: config/params/config.go
search: MaxWithdrawalRequestsPerPayload\s+.*yaml:"MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD"
@@ -486,7 +458,7 @@
MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: uint64 = 16
</spec>
- name: MIN_ACTIVATION_BALANCE#electra
- name: MIN_ACTIVATION_BALANCE
sources:
- file: config/params/config.go
search: MinActivationBalance\s+.*yaml:"MIN_ACTIVATION_BALANCE"
@@ -496,7 +468,7 @@
MIN_ACTIVATION_BALANCE: Gwei = 32000000000
</spec>
- name: MIN_ATTESTATION_INCLUSION_DELAY#phase0
- name: MIN_ATTESTATION_INCLUSION_DELAY
sources:
- file: config/params/config.go
search: MinAttestationInclusionDelay\s+.*yaml:"MIN_ATTESTATION_INCLUSION_DELAY"
@@ -506,7 +478,7 @@
MIN_ATTESTATION_INCLUSION_DELAY: uint64 = 1
</spec>
- name: MIN_DEPOSIT_AMOUNT#phase0
- name: MIN_DEPOSIT_AMOUNT
sources:
- file: config/params/config.go
search: MinDepositAmount\s+.*yaml:"MIN_DEPOSIT_AMOUNT"
@@ -516,7 +488,7 @@
MIN_DEPOSIT_AMOUNT: Gwei = 1000000000
</spec>
- name: MIN_EPOCHS_TO_INACTIVITY_PENALTY#phase0
- name: MIN_EPOCHS_TO_INACTIVITY_PENALTY
sources:
- file: config/params/config.go
search: MinEpochsToInactivityPenalty\s+.*yaml:"MIN_EPOCHS_TO_INACTIVITY_PENALTY"
@@ -526,7 +498,7 @@
MIN_EPOCHS_TO_INACTIVITY_PENALTY: uint64 = 4
</spec>
- name: MIN_SEED_LOOKAHEAD#phase0
- name: MIN_SEED_LOOKAHEAD
sources:
- file: config/params/config.go
search: MinSeedLookahead\s+.*yaml:"MIN_SEED_LOOKAHEAD"
@@ -536,7 +508,7 @@
MIN_SEED_LOOKAHEAD: uint64 = 1
</spec>
- name: MIN_SLASHING_PENALTY_QUOTIENT#phase0
- name: MIN_SLASHING_PENALTY_QUOTIENT
sources:
- file: config/params/config.go
search: MinSlashingPenaltyQuotient\s+.*yaml:"MIN_SLASHING_PENALTY_QUOTIENT"
@@ -546,7 +518,7 @@
MIN_SLASHING_PENALTY_QUOTIENT: uint64 = 128
</spec>
- name: MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR#altair
- name: MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR
sources:
- file: config/params/config.go
search: MinSlashingPenaltyQuotientAltair\s+.*yaml:"MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR"
@@ -556,7 +528,7 @@
MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR: uint64 = 64
</spec>
- name: MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX#bellatrix
- name: MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX
sources:
- file: config/params/config.go
search: MinSlashingPenaltyQuotientBellatrix\s+.*yaml:"MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX"
@@ -566,7 +538,7 @@
MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX: uint64 = 32
</spec>
- name: MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA#electra
- name: MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA
sources:
- file: config/params/config.go
search: MinSlashingPenaltyQuotientElectra\s+.*yaml:"MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA"
@@ -576,7 +548,7 @@
MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA: uint64 = 4096
</spec>
- name: MIN_SYNC_COMMITTEE_PARTICIPANTS#altair
- name: MIN_SYNC_COMMITTEE_PARTICIPANTS
sources:
- file: config/params/config.go
search: MinSyncCommitteeParticipants\s+.*yaml:"MIN_SYNC_COMMITTEE_PARTICIPANTS"
@@ -586,7 +558,7 @@
MIN_SYNC_COMMITTEE_PARTICIPANTS = 1
</spec>
- name: NUMBER_OF_COLUMNS#fulu
- name: NUMBER_OF_COLUMNS
sources:
- file: config/fieldparams/mainnet.go
search: NumberOfColumns\s*=
@@ -596,7 +568,7 @@
NUMBER_OF_COLUMNS: uint64 = 128
</spec>
- name: PENDING_CONSOLIDATIONS_LIMIT#electra
- name: PENDING_CONSOLIDATIONS_LIMIT
sources:
- file: config/fieldparams/mainnet.go
search: PendingConsolidationsLimit\s*=
@@ -606,7 +578,7 @@
PENDING_CONSOLIDATIONS_LIMIT: uint64 = 262144
</spec>
- name: PENDING_DEPOSITS_LIMIT#electra
- name: PENDING_DEPOSITS_LIMIT
sources:
- file: config/fieldparams/mainnet.go
search: PendingDepositsLimit\s*=
@@ -616,7 +588,7 @@
PENDING_DEPOSITS_LIMIT: uint64 = 134217728
</spec>
- name: PENDING_PARTIAL_WITHDRAWALS_LIMIT#electra
- name: PENDING_PARTIAL_WITHDRAWALS_LIMIT
sources:
- file: config/fieldparams/mainnet.go
search: PendingPartialWithdrawalsLimit\s*=
@@ -626,7 +598,7 @@
PENDING_PARTIAL_WITHDRAWALS_LIMIT: uint64 = 134217728
</spec>
- name: PROPORTIONAL_SLASHING_MULTIPLIER#phase0
- name: PROPORTIONAL_SLASHING_MULTIPLIER
sources:
- file: config/params/config.go
search: ProportionalSlashingMultiplier\s+.*yaml:"PROPORTIONAL_SLASHING_MULTIPLIER"
@@ -636,7 +608,7 @@
PROPORTIONAL_SLASHING_MULTIPLIER: uint64 = 1
</spec>
- name: PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR#altair
- name: PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR
sources:
- file: config/params/config.go
search: ProportionalSlashingMultiplierAltair\s+.*yaml:"PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR"
@@ -646,7 +618,7 @@
PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR: uint64 = 2
</spec>
- name: PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX#bellatrix
- name: PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX
sources:
- file: config/params/config.go
search: ProportionalSlashingMultiplierBellatrix\s+.*yaml:"PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX"
@@ -656,7 +628,7 @@
PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX: uint64 = 3
</spec>
- name: PROPOSER_REWARD_QUOTIENT#phase0
- name: PROPOSER_REWARD_QUOTIENT
sources:
- file: config/params/config.go
search: ProposerRewardQuotient\s+.*yaml:"PROPOSER_REWARD_QUOTIENT"
@@ -666,14 +638,7 @@
PROPOSER_REWARD_QUOTIENT: uint64 = 8
</spec>
- name: PTC_SIZE#gloas
sources: []
spec: |
<spec preset_var="PTC_SIZE" fork="gloas" hash="d61c5930">
PTC_SIZE: uint64 = 512
</spec>
- name: SHUFFLE_ROUND_COUNT#phase0
- name: SHUFFLE_ROUND_COUNT
sources:
- file: config/params/config.go
search: ShuffleRoundCount\s+.*yaml:"SHUFFLE_ROUND_COUNT"
@@ -683,7 +648,7 @@
SHUFFLE_ROUND_COUNT: uint64 = 90
</spec>
- name: SLOTS_PER_EPOCH#phase0
- name: SLOTS_PER_EPOCH
sources:
- file: config/fieldparams/mainnet.go
search: SlotsPerEpoch\s*=
@@ -693,7 +658,7 @@
SLOTS_PER_EPOCH: uint64 = 32
</spec>
- name: SLOTS_PER_HISTORICAL_ROOT#phase0
- name: SLOTS_PER_HISTORICAL_ROOT
sources:
- file: config/fieldparams/mainnet.go
search: BlockRootsLength\s*=
@@ -703,7 +668,7 @@
SLOTS_PER_HISTORICAL_ROOT: uint64 = 8192
</spec>
- name: SYNC_COMMITTEE_SIZE#altair
- name: SYNC_COMMITTEE_SIZE
sources:
- file: config/fieldparams/mainnet.go
search: SyncCommitteeLength\s*=
@@ -713,7 +678,7 @@
SYNC_COMMITTEE_SIZE: uint64 = 512
</spec>
- name: TARGET_COMMITTEE_SIZE#phase0
- name: TARGET_COMMITTEE_SIZE
sources:
- file: config/params/config.go
search: TargetCommitteeSize\s+.*yaml:"TARGET_COMMITTEE_SIZE"
@@ -723,7 +688,7 @@
TARGET_COMMITTEE_SIZE: uint64 = 128
</spec>
- name: UPDATE_TIMEOUT#altair
- name: UPDATE_TIMEOUT
sources:
- file: beacon-chain/rpc/eth/config/handlers.go
search: data\["UPDATE_TIMEOUT"\]
@@ -733,7 +698,7 @@
UPDATE_TIMEOUT = 8192
</spec>
- name: VALIDATOR_REGISTRY_LIMIT#phase0
- name: VALIDATOR_REGISTRY_LIMIT
sources:
- file: config/fieldparams/mainnet.go
search: ValidatorRegistryLimit\s*=
@@ -743,7 +708,7 @@
VALIDATOR_REGISTRY_LIMIT: uint64 = 1099511627776
</spec>
- name: WHISTLEBLOWER_REWARD_QUOTIENT#phase0
- name: WHISTLEBLOWER_REWARD_QUOTIENT
sources:
- file: config/params/config.go
search: WhistleBlowerRewardQuotient\s+.*yaml:"WHISTLEBLOWER_REWARD_QUOTIENT"
@@ -753,7 +718,7 @@
WHISTLEBLOWER_REWARD_QUOTIENT: uint64 = 512
</spec>
- name: WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA#electra
- name: WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA
sources:
- file: config/params/config.go
search: WhistleBlowerRewardQuotientElectra\s+.*yaml:"WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA"

View File

@@ -225,9 +225,9 @@ func (r *testRunner) testDepositsAndTx(ctx context.Context, g *errgroup.Group,
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{r.depositor}); err != nil {
return errors.Wrap(err, "testDepositsAndTx unable to run, depositor did not Start")
}
go func() {
if r.config.TestDeposits {
log.Info("Running deposit tests")
go func() {
if r.config.TestDeposits {
log.Info("Running deposit tests")
// The validators with an index < minGenesisActiveCount all have deposits already from the chain start.
// Skip all of those chain start validators by seeking to minGenesisActiveCount in the validator list
// for further deposit testing.
@@ -238,12 +238,12 @@ func (r *testRunner) testDepositsAndTx(ctx context.Context, g *errgroup.Group,
r.t.Error(errors.Wrap(err, "depositor.SendAndMine failed"))
}
}
}
// Only generate background transactions when relevant for the test.
if r.config.TestDeposits || r.config.TestFeature || r.config.UseBuilder {
r.testTxGeneration(ctx, g, keystorePath, []e2etypes.ComponentRunner{})
}
}()
}
// Only generate background transactions when relevant for the test.
if r.config.TestDeposits || r.config.TestFeature || r.config.UseBuilder {
r.testTxGeneration(ctx, g, keystorePath, []e2etypes.ComponentRunner{})
}
}()
if r.config.TestDeposits {
return depositCheckValidator.Start(ctx)
}

View File

@@ -38,8 +38,8 @@ func TestEndToEnd_MinimalConfig(t *testing.T) {
r := e2eMinimal(t, cfg,
types.WithCheckpointSync(),
types.WithEpochs(10),
types.WithExitEpoch(4), // Minimum due to ShardCommitteePeriod=4
types.WithLargeBlobs(), // Use large blob transactions for BPO testing
types.WithExitEpoch(4), // Minimum due to ShardCommitteePeriod=4
types.WithLargeBlobs(), // Use large blob transactions for BPO testing
)
r.run()
}
}

View File

@@ -204,6 +204,7 @@ go_test(
"gloas__operations__execution_payload_header_test.go",
"gloas__operations__payload_attestation_test.go",
"gloas__operations__proposer_slashing_test.go",
"gloas__operations__withdrawals_test.go",
"gloas__sanity__slots_test.go",
"gloas__ssz_static__ssz_static_test.go",
"phase0__epoch_processing__effective_balance_updates_test.go",

View File

@@ -0,0 +1,11 @@
package mainnet
import (
"testing"
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations"
)
func TestMainnet_Gloas_Operations_Withdrawals(t *testing.T) {
operations.RunWithdrawalsTest(t, "mainnet")
}

View File

@@ -210,6 +210,7 @@ go_test(
"gloas__operations__execution_payload_bid_test.go",
"gloas__operations__payload_attestation_test.go",
"gloas__operations__proposer_slashing_test.go",
"gloas__operations__withdrawals_test.go",
"gloas__sanity__slots_test.go",
"gloas__ssz_static__ssz_static_test.go",
"phase0__epoch_processing__effective_balance_updates_test.go",

View File

@@ -0,0 +1,11 @@
package minimal
import (
"testing"
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations"
)
func TestMinimal_Gloas_Operations_Withdrawals(t *testing.T) {
operations.RunWithdrawalsTest(t, "minimal")
}

View File

@@ -8,16 +8,20 @@ go_library(
"helpers.go",
"payload_attestation.go",
"proposer_slashing.go",
"withdrawals.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/gloas:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//testing/require:go_default_library",
"//testing/spectest/shared/common/operations:go_default_library",
"//testing/spectest/utils:go_default_library",
],
)

View File

@@ -0,0 +1,48 @@
package operations
import (
"context"
"path"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/testing/require"
common "github.com/OffchainLabs/prysm/v7/testing/spectest/shared/common/operations"
"github.com/OffchainLabs/prysm/v7/testing/spectest/utils"
)
func emptyBlockGloas() (interfaces.SignedBeaconBlock, error) {
b := &ethpb.SignedBeaconBlockGloas{
Block: &ethpb.BeaconBlockGloas{
Body: &ethpb.BeaconBlockBodyGloas{},
},
}
return blocks.NewSignedBeaconBlock(b)
}
func RunWithdrawalsTest(t *testing.T, config string) {
require.NoError(t, utils.SetConfig(t, config))
testFolders, testsFolderPath := utils.TestFolders(t, config, version.String(version.Gloas), "operations/withdrawals/pyspec_tests")
if len(testFolders) == 0 {
t.Fatalf("No test folders found for %s/%s/%s", config, version.String(version.Gloas), "operations/withdrawals/pyspec_tests")
}
for _, folder := range testFolders {
t.Run(folder.Name(), func(t *testing.T) {
folderPath := path.Join(testsFolderPath, folder.Name())
blk, err := emptyBlockGloas()
require.NoError(t, err)
common.RunBlockOperationTest(t, folderPath, blk, sszToState, func(_ context.Context, s state.BeaconState, _ interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
if err := gloas.ProcessWithdrawals(s); err != nil {
return nil, err
}
return s, nil
})
})
}
}

View File

@@ -283,18 +283,16 @@ func (mr *MockValidatorClientMockRecorder) ProposeExit(ctx, in any) *gomock.Call
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProposeExit", reflect.TypeOf((*MockValidatorClient)(nil).ProposeExit), ctx, in)
}
// EnsureReady mocks base method.
func (m *MockValidatorClient) EnsureReady(ctx context.Context) bool {
// SetHost mocks base method.
func (m *MockValidatorClient) SetHost(host string) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "EnsureReady", ctx)
ret0, _ := ret[0].(bool)
return ret0
m.ctrl.Call(m, "SetHost", host)
}
// EnsureReady indicates an expected call of EnsureReady.
func (mr *MockValidatorClientMockRecorder) EnsureReady(ctx any) *gomock.Call {
// SetHost indicates an expected call of SetHost.
func (mr *MockValidatorClientMockRecorder) SetHost(host any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureReady", reflect.TypeOf((*MockValidatorClient)(nil).EnsureReady), ctx)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHost", reflect.TypeOf((*MockValidatorClient)(nil).SetHost), host)
}
// StartEventStream mocks base method.

View File

@@ -128,18 +128,18 @@ func (mr *MockValidatorMockRecorder) EventsChan() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EventsChan", reflect.TypeOf((*MockValidator)(nil).EventsChan))
}
// EnsureReady mocks base method.
func (m *MockValidator) EnsureReady(arg0 context.Context) bool {
// FindHealthyHost mocks base method.
func (m *MockValidator) FindHealthyHost(arg0 context.Context) bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "EnsureReady", arg0)
ret := m.ctrl.Call(m, "FindHealthyHost", arg0)
ret0, _ := ret[0].(bool)
return ret0
}
// EnsureReady indicates an expected call of EnsureReady.
func (mr *MockValidatorMockRecorder) EnsureReady(arg0 any) *gomock.Call {
// FindHealthyHost indicates an expected call of FindHealthyHost.
func (mr *MockValidatorMockRecorder) FindHealthyHost(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureReady", reflect.TypeOf((*MockValidator)(nil).EnsureReady), arg0)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindHealthyHost", reflect.TypeOf((*MockValidator)(nil).FindHealthyHost), arg0)
}
// GenesisTime mocks base method.

View File

@@ -25,7 +25,6 @@ go_library(
],
deps = [
"//api/grpc:go_default_library",
"//api/rest:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//cmd/validator/flags:go_default_library",
"//config/fieldparams:go_default_library",

View File

@@ -3,13 +3,14 @@ package accounts
import (
"context"
"io"
"net/http"
"os"
"time"
grpcutil "github.com/OffchainLabs/prysm/v7/api/grpc"
"github.com/OffchainLabs/prysm/v7/api/rest"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
"github.com/OffchainLabs/prysm/v7/validator/accounts/wallet"
beaconApi "github.com/OffchainLabs/prysm/v7/validator/client/beacon-api"
iface "github.com/OffchainLabs/prysm/v7/validator/client/iface"
nodeClientFactory "github.com/OffchainLabs/prysm/v7/validator/client/node-client-factory"
validatorClientFactory "github.com/OffchainLabs/prysm/v7/validator/client/validator-client-factory"
@@ -76,17 +77,22 @@ func (acm *CLIManager) prepareBeaconClients(ctx context.Context) (*iface.Validat
}
ctx = grpcutil.AppendHeaders(ctx, acm.grpcHeaders)
conn, err := validatorHelpers.NewNodeConnection(
validatorHelpers.WithGRPC(ctx, acm.beaconRPCProvider, acm.dialOpts),
validatorHelpers.WithREST(acm.beaconApiEndpoint, rest.WithHttpTimeout(acm.beaconApiTimeout)),
)
grpcConn, err := grpc.DialContext(ctx, acm.beaconRPCProvider, acm.dialOpts...)
if err != nil {
return nil, nil, err
return nil, nil, errors.Wrapf(err, "could not dial endpoint %s", acm.beaconRPCProvider)
}
conn := validatorHelpers.NewNodeConnection(
grpcConn,
acm.beaconApiEndpoint,
validatorHelpers.WithBeaconApiTimeout(acm.beaconApiTimeout),
)
validatorClient := validatorClientFactory.NewValidatorClient(conn)
nodeClient := nodeClientFactory.NewNodeClient(conn)
restHandler := beaconApi.NewBeaconApiRestHandler(
http.Client{Timeout: acm.beaconApiTimeout},
acm.beaconApiEndpoint,
)
validatorClient := validatorClientFactory.NewValidatorClient(conn, restHandler)
nodeClient := nodeClientFactory.NewNodeClient(conn, restHandler)
return &validatorClient, &nodeClient, nil
}

Some files were not shown because too many files have changed in this diff Show More