Compare commits

..

3 Commits

Author SHA1 Message Date
terence
8011396a48 James feedback 2 2026-02-04 17:17:15 -08:00
terence
d9dae87c74 James feedback 1 2026-02-04 08:10:03 -08:00
terence tsao
f75c3082d5 gloas: implement modified process withdrawals 2026-02-01 19:41:59 -08:00
216 changed files with 4744 additions and 8419 deletions

View File

@@ -12,11 +12,11 @@ jobs:
- name: Check version consistency
run: |
WORKSPACE_VERSION=$(grep 'consensus_spec_version = ' WORKSPACE | sed 's/.*"\(.*\)"/\1/')
ETHSPECIFY_VERSION=$(grep '^version:' .ethspecify.yml | sed 's/version: //')
ETHSPECIFY_VERSION=$(grep '^version:' specrefs/.ethspecify.yml | sed 's/version: //')
if [ "$WORKSPACE_VERSION" != "$ETHSPECIFY_VERSION" ]; then
echo "Version mismatch between WORKSPACE and ethspecify"
echo " WORKSPACE: $WORKSPACE_VERSION"
echo " .ethspecify.yml: $ETHSPECIFY_VERSION"
echo " specrefs/.ethspecify.yml: $ETHSPECIFY_VERSION"
exit 1
else
echo "Versions match: $WORKSPACE_VERSION"
@@ -26,7 +26,7 @@ jobs:
run: python3 -mpip install ethspecify
- name: Update spec references
run: ethspecify
run: ethspecify process --path=specrefs
- name: Check for differences
run: |
@@ -40,4 +40,4 @@ jobs:
fi
- name: Check spec references
run: ethspecify check
run: ethspecify check --path=specrefs

View File

@@ -2,7 +2,7 @@ name: Go
on:
push:
branches: [ master, develop ]
branches: [ master ]
pull_request:
branches: [ '*' ]
merge_group:

View File

@@ -33,8 +33,9 @@ formatters:
generated: lax
paths:
- validator/web/site_data.go
- .*_test.go
- proto
- tools/analyzers
- third_party$
- builtin$
- examples$
- examples$

View File

@@ -273,16 +273,16 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.7.0-alpha.2"
consensus_spec_version = "v1.7.0-alpha.1"
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
consensus_spec_tests(
name = "consensus_spec_tests",
flavors = {
"general": "sha256-iGQsGZ1cHah+2CSod9jC3kN8Ku4n6KO0hIwfINrn/po=",
"minimal": "sha256-TgcYt8N8sXSttdHTGvOa+exUZ1zn1UzlAMz0V7i37xc=",
"mainnet": "sha256-LnXyiLoJtrvEvbqLDSAAqpLMdN/lXv92SAgYG8fNjCs=",
"general": "sha256-j5R3jA7Oo4OSDMTvpMuD+8RomaCByeFSwtfkq6fL0Zg=",
"minimal": "sha256-tdTqByoyswOS4r6OxFmo70y2BP7w1TgEok+gf4cbxB0=",
"mainnet": "sha256-5gB4dt6SnSDKzdBc06VedId3NkgvSYyv9n9FRxWKwYI=",
},
version = consensus_spec_version,
)
@@ -298,7 +298,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-Y/67Dg393PksZj5rTFNLntiJ6hNdB7Rxbu5gZE2gebY=",
integrity = "sha256-J+43DrK1pF658kTXTwMS6zGf4KDjvas++m8w2a8swpg=",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -1,19 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"fallback.go",
"log.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/api/fallback",
visibility = ["//visibility:public"],
deps = ["@com_github_sirupsen_logrus//:go_default_library"],
)
go_test(
name = "go_default_test",
srcs = ["fallback_test.go"],
embed = [":go_default_library"],
deps = ["//testing/assert:go_default_library"],
)

View File

@@ -1,66 +0,0 @@
package fallback
import (
"context"
"github.com/sirupsen/logrus"
)
// HostProvider is the subset of connection-provider methods that EnsureReady
// needs. Both grpc.GrpcConnectionProvider and rest.RestConnectionProvider
// satisfy this interface.
type HostProvider interface {
Hosts() []string
CurrentHost() string
SwitchHost(index int) error
}
// ReadyChecker can report whether the current endpoint is ready.
// iface.NodeClient satisfies this implicitly.
type ReadyChecker interface {
IsReady(ctx context.Context) bool
}
// EnsureReady iterates through the configured hosts and returns true as soon as
// one responds as ready. It starts from the provider's current host and wraps
// around using modular arithmetic, performing failover when a host is not ready.
func EnsureReady(ctx context.Context, provider HostProvider, checker ReadyChecker) bool {
hosts := provider.Hosts()
numHosts := len(hosts)
startingHost := provider.CurrentHost()
var attemptedHosts []string
// Find current index
currentIdx := 0
for i, h := range hosts {
if h == startingHost {
currentIdx = i
break
}
}
for i := range numHosts {
if checker.IsReady(ctx) {
if len(attemptedHosts) > 0 {
log.WithFields(logrus.Fields{
"previous": startingHost,
"current": provider.CurrentHost(),
"tried": attemptedHosts,
}).Info("Switched to responsive beacon node")
}
return true
}
attemptedHosts = append(attemptedHosts, provider.CurrentHost())
// Try next host if not the last iteration
if i < numHosts-1 {
nextIdx := (currentIdx + i + 1) % numHosts
if err := provider.SwitchHost(nextIdx); err != nil {
log.WithError(err).Error("Failed to switch host")
}
}
}
log.WithField("tried", attemptedHosts).Warn("No responsive beacon node found")
return false
}

View File

@@ -1,94 +0,0 @@
package fallback
import (
"context"
"testing"
"github.com/OffchainLabs/prysm/v7/testing/assert"
)
// mockHostProvider is a minimal HostProvider for unit tests.
type mockHostProvider struct {
hosts []string
hostIndex int
}
func (m *mockHostProvider) Hosts() []string { return m.hosts }
func (m *mockHostProvider) CurrentHost() string {
return m.hosts[m.hostIndex%len(m.hosts)]
}
func (m *mockHostProvider) SwitchHost(index int) error { m.hostIndex = index; return nil }
// mockReadyChecker records per-call IsReady results in sequence.
type mockReadyChecker struct {
results []bool
idx int
}
func (m *mockReadyChecker) IsReady(_ context.Context) bool {
if m.idx >= len(m.results) {
return false
}
r := m.results[m.idx]
m.idx++
return r
}
func TestEnsureReady_SingleHostReady(t *testing.T) {
provider := &mockHostProvider{hosts: []string{"http://host1:3500"}, hostIndex: 0}
checker := &mockReadyChecker{results: []bool{true}}
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
assert.Equal(t, 0, provider.hostIndex)
}
func TestEnsureReady_SingleHostNotReady(t *testing.T) {
provider := &mockHostProvider{hosts: []string{"http://host1:3500"}, hostIndex: 0}
checker := &mockReadyChecker{results: []bool{false}}
assert.Equal(t, false, EnsureReady(t.Context(), provider, checker))
}
func TestEnsureReady_SingleHostError(t *testing.T) {
provider := &mockHostProvider{hosts: []string{"http://host1:3500"}, hostIndex: 0}
checker := &mockReadyChecker{results: []bool{false}}
assert.Equal(t, false, EnsureReady(t.Context(), provider, checker))
}
func TestEnsureReady_MultipleHostsFirstReady(t *testing.T) {
provider := &mockHostProvider{
hosts: []string{"http://host1:3500", "http://host2:3500"},
hostIndex: 0,
}
checker := &mockReadyChecker{results: []bool{true}}
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
assert.Equal(t, 0, provider.hostIndex)
}
func TestEnsureReady_MultipleHostsFailoverToSecond(t *testing.T) {
provider := &mockHostProvider{
hosts: []string{"http://host1:3500", "http://host2:3500"},
hostIndex: 0,
}
checker := &mockReadyChecker{results: []bool{false, true}}
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
assert.Equal(t, 1, provider.hostIndex)
}
func TestEnsureReady_MultipleHostsNoneReady(t *testing.T) {
provider := &mockHostProvider{
hosts: []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"},
hostIndex: 0,
}
checker := &mockReadyChecker{results: []bool{false, false, false}}
assert.Equal(t, false, EnsureReady(t.Context(), provider, checker))
}
func TestEnsureReady_WrapAroundFromNonZeroIndex(t *testing.T) {
provider := &mockHostProvider{
hosts: []string{"http://host0:3500", "http://host1:3500", "http://host2:3500"},
hostIndex: 1,
}
// host1 (start) fails, host2 fails, host0 succeeds
checker := &mockReadyChecker{results: []bool{false, false, true}}
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
assert.Equal(t, 0, provider.hostIndex)
}

View File

@@ -1,9 +0,0 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package fallback
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "api/fallback")

View File

@@ -3,16 +3,13 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"grpc_connection_provider.go",
"grpcutils.go",
"log.go",
"mock_grpc_provider.go",
"parameters.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/api/grpc",
visibility = ["//visibility:public"],
deps = [
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
@@ -21,17 +18,12 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"grpc_connection_provider_test.go",
"grpcutils_test.go",
],
srcs = ["grpcutils_test.go"],
embed = [":go_default_library"],
deps = [
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//credentials/insecure:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
],
)

View File

@@ -1,186 +0,0 @@
package grpc
import (
"context"
"strings"
"sync"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc"
)
// GrpcConnectionProvider manages gRPC connections for failover support.
// It allows switching between different beacon node endpoints when the current one becomes unavailable.
// Only one connection is maintained at a time - when switching hosts, the old connection is closed.
type GrpcConnectionProvider interface {
// CurrentConn returns the currently active gRPC connection.
// The connection is created lazily on first call.
// Returns nil if the provider has been closed.
CurrentConn() *grpc.ClientConn
// CurrentHost returns the address of the currently active endpoint.
CurrentHost() string
// Hosts returns all configured endpoint addresses.
Hosts() []string
// SwitchHost switches to the endpoint at the given index.
// The new connection is created lazily on next CurrentConn() call.
SwitchHost(index int) error
// ConnectionCounter returns a monotonically increasing counter that increments
// each time SwitchHost changes the active endpoint. This allows consumers to
// detect connection changes even when the host string returns to a previous value
// (e.g., host0 → host1 → host0).
ConnectionCounter() uint64
// Close closes the current connection.
Close()
}
type grpcConnectionProvider struct {
// Immutable after construction - no lock needed for reads
endpoints []string
ctx context.Context
dialOpts []grpc.DialOption
// Current connection state (protected by mutex)
currentIndex uint64
conn *grpc.ClientConn
connCounter uint64
mu sync.Mutex
closed bool
}
// NewGrpcConnectionProvider creates a new connection provider that manages gRPC connections.
// The endpoint parameter can be a comma-separated list of addresses (e.g., "host1:4000,host2:4000").
// Only one connection is maintained at a time, created lazily on first use.
func NewGrpcConnectionProvider(
ctx context.Context,
endpoint string,
dialOpts []grpc.DialOption,
) (GrpcConnectionProvider, error) {
endpoints := parseEndpoints(endpoint)
if len(endpoints) == 0 {
return nil, errors.New("no gRPC endpoints provided")
}
log.WithFields(logrus.Fields{
"endpoints": endpoints,
"count": len(endpoints),
}).Info("Initialized gRPC connection provider")
return &grpcConnectionProvider{
endpoints: endpoints,
ctx: ctx,
dialOpts: dialOpts,
}, nil
}
// parseEndpoints splits a comma-separated endpoint string into individual endpoints.
func parseEndpoints(endpoint string) []string {
if endpoint == "" {
return nil
}
endpoints := make([]string, 0, 1)
for p := range strings.SplitSeq(endpoint, ",") {
if p = strings.TrimSpace(p); p != "" {
endpoints = append(endpoints, p)
}
}
return endpoints
}
func (p *grpcConnectionProvider) CurrentConn() *grpc.ClientConn {
p.mu.Lock()
defer p.mu.Unlock()
if p.closed {
return nil
}
// Return existing connection if available
if p.conn != nil {
return p.conn
}
// Create connection lazily
ep := p.endpoints[p.currentIndex]
conn, err := grpc.DialContext(p.ctx, ep, p.dialOpts...)
if err != nil {
log.WithError(err).WithField("endpoint", ep).Error("Failed to create gRPC connection")
return nil
}
p.conn = conn
log.WithField("endpoint", ep).Debug("Created gRPC connection")
return conn
}
func (p *grpcConnectionProvider) CurrentHost() string {
p.mu.Lock()
defer p.mu.Unlock()
return p.endpoints[p.currentIndex]
}
func (p *grpcConnectionProvider) Hosts() []string {
// Return a copy to maintain immutability
hosts := make([]string, len(p.endpoints))
copy(hosts, p.endpoints)
return hosts
}
func (p *grpcConnectionProvider) SwitchHost(index int) error {
if index < 0 || index >= len(p.endpoints) {
return errors.Errorf("invalid host index %d, must be between 0 and %d", index, len(p.endpoints)-1)
}
p.mu.Lock()
defer p.mu.Unlock()
if uint64(index) == p.currentIndex {
return nil // Already on this host
}
oldHost := p.endpoints[p.currentIndex]
oldConn := p.conn
p.conn = nil // Clear immediately - new connection created lazily
p.currentIndex = uint64(index)
p.connCounter++
// Close old connection asynchronously to avoid blocking the caller
if oldConn != nil {
go func() {
if err := oldConn.Close(); err != nil {
log.WithError(err).WithField("endpoint", oldHost).Debug("Failed to close previous connection")
}
}()
}
log.WithFields(logrus.Fields{
"previousHost": oldHost,
"newHost": p.endpoints[index],
}).Debug("Switched gRPC endpoint")
return nil
}
func (p *grpcConnectionProvider) ConnectionCounter() uint64 {
p.mu.Lock()
defer p.mu.Unlock()
return p.connCounter
}
func (p *grpcConnectionProvider) Close() {
p.mu.Lock()
defer p.mu.Unlock()
if p.closed {
return
}
p.closed = true
if p.conn != nil {
if err := p.conn.Close(); err != nil {
log.WithError(err).WithField("endpoint", p.endpoints[p.currentIndex]).Debug("Failed to close gRPC connection")
}
p.conn = nil
}
}

View File

@@ -1,207 +0,0 @@
package grpc
import (
"context"
"net"
"reflect"
"strings"
"testing"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
func TestParseEndpoints(t *testing.T) {
tests := []struct {
name string
input string
expected []string
}{
{"single endpoint", "localhost:4000", []string{"localhost:4000"}},
{"multiple endpoints", "host1:4000,host2:4000,host3:4000", []string{"host1:4000", "host2:4000", "host3:4000"}},
{"endpoints with spaces", "host1:4000, host2:4000 , host3:4000", []string{"host1:4000", "host2:4000", "host3:4000"}},
{"empty string", "", nil},
{"only commas", ",,,", []string{}},
{"trailing comma", "host1:4000,host2:4000,", []string{"host1:4000", "host2:4000"}},
{"leading comma", ",host1:4000,host2:4000", []string{"host1:4000", "host2:4000"}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := parseEndpoints(tt.input)
if !reflect.DeepEqual(tt.expected, got) {
t.Errorf("parseEndpoints(%q) = %v, want %v", tt.input, got, tt.expected)
}
})
}
}
func TestNewGrpcConnectionProvider_Errors(t *testing.T) {
t.Run("no endpoints", func(t *testing.T) {
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
_, err := NewGrpcConnectionProvider(context.Background(), "", dialOpts)
require.ErrorContains(t, "no gRPC endpoints provided", err)
})
}
func TestGrpcConnectionProvider_LazyConnection(t *testing.T) {
// Start only one server but configure provider with two endpoints
lis, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
server := grpc.NewServer()
go func() { _ = server.Serve(lis) }()
defer server.Stop()
validAddr := lis.Addr().String()
invalidAddr := "127.0.0.1:1" // Port 1 is unlikely to be listening
// Provider should succeed even though second endpoint is invalid (lazy connections)
endpoint := validAddr + "," + invalidAddr
ctx := context.Background()
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
require.NoError(t, err, "Provider creation should succeed with lazy connections")
defer func() { provider.Close() }()
// First endpoint should work
conn := provider.CurrentConn()
assert.NotNil(t, conn, "First connection should be created lazily")
}
func TestGrpcConnectionProvider_SingleConnectionModel(t *testing.T) {
// Create provider with 3 endpoints
var addrs []string
var servers []*grpc.Server
for range 3 {
lis, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
server := grpc.NewServer()
go func() { _ = server.Serve(lis) }()
addrs = append(addrs, lis.Addr().String())
servers = append(servers, server)
}
defer func() {
for _, s := range servers {
s.Stop()
}
}()
endpoint := strings.Join(addrs, ",")
ctx := context.Background()
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
require.NoError(t, err)
defer func() { provider.Close() }()
// Access the internal state to verify single connection behavior
p := provider.(*grpcConnectionProvider)
// Initially no connection
p.mu.Lock()
assert.Equal(t, (*grpc.ClientConn)(nil), p.conn, "Connection should be nil before access")
p.mu.Unlock()
// Access connection - should create one
conn0 := provider.CurrentConn()
assert.NotNil(t, conn0)
p.mu.Lock()
assert.NotNil(t, p.conn, "Connection should be created after CurrentConn()")
firstConn := p.conn
p.mu.Unlock()
// Call CurrentConn again - should return same connection
conn0Again := provider.CurrentConn()
assert.Equal(t, conn0, conn0Again, "Should return same connection")
// Switch to different host - old connection should be closed, new one created lazily
require.NoError(t, provider.SwitchHost(1))
p.mu.Lock()
assert.Equal(t, (*grpc.ClientConn)(nil), p.conn, "Connection should be nil after SwitchHost (lazy)")
p.mu.Unlock()
// Get new connection
conn1 := provider.CurrentConn()
assert.NotNil(t, conn1)
assert.NotEqual(t, firstConn, conn1, "Should be a different connection after switching hosts")
}
// testProvider creates a provider with n test servers and returns cleanup function.
func testProvider(t *testing.T, n int) (GrpcConnectionProvider, []string, func()) {
var addrs []string
var cleanups []func()
for range n {
lis, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
server := grpc.NewServer()
go func() { _ = server.Serve(lis) }()
addrs = append(addrs, lis.Addr().String())
cleanups = append(cleanups, server.Stop)
}
endpoint := strings.Join(addrs, ",")
ctx := context.Background()
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
require.NoError(t, err)
cleanup := func() {
provider.Close()
for _, c := range cleanups {
c()
}
}
return provider, addrs, cleanup
}
func TestGrpcConnectionProvider(t *testing.T) {
provider, addrs, cleanup := testProvider(t, 3)
defer cleanup()
t.Run("initial state", func(t *testing.T) {
assert.Equal(t, 3, len(provider.Hosts()))
assert.Equal(t, addrs[0], provider.CurrentHost())
assert.NotNil(t, provider.CurrentConn())
})
t.Run("SwitchHost", func(t *testing.T) {
require.NoError(t, provider.SwitchHost(1))
assert.Equal(t, addrs[1], provider.CurrentHost())
assert.NotNil(t, provider.CurrentConn()) // New connection created lazily
require.NoError(t, provider.SwitchHost(0))
assert.Equal(t, addrs[0], provider.CurrentHost())
require.ErrorContains(t, "invalid host index", provider.SwitchHost(-1))
require.ErrorContains(t, "invalid host index", provider.SwitchHost(3))
})
t.Run("SwitchHost circular", func(t *testing.T) {
// Test round-robin style switching using SwitchHost with manual index
indices := []int{1, 2, 0, 1} // Simulate circular switching
for i, idx := range indices {
require.NoError(t, provider.SwitchHost(idx))
assert.Equal(t, addrs[idx], provider.CurrentHost(), "iteration %d", i)
}
})
t.Run("Hosts returns copy", func(t *testing.T) {
hosts := provider.Hosts()
original := hosts[0]
hosts[0] = "modified"
assert.Equal(t, original, provider.Hosts()[0])
})
}
func TestGrpcConnectionProvider_Close(t *testing.T) {
provider, _, cleanup := testProvider(t, 1)
defer cleanup()
assert.NotNil(t, provider.CurrentConn())
provider.Close()
assert.Equal(t, (*grpc.ClientConn)(nil), provider.CurrentConn())
provider.Close() // Double close is safe
}

View File

@@ -1,27 +0,0 @@
package grpc
import "google.golang.org/grpc"
// MockGrpcProvider implements GrpcConnectionProvider for testing.
type MockGrpcProvider struct {
MockConn *grpc.ClientConn
MockHosts []string
CurrentIndex int
ConnCounter uint64
}
func (m *MockGrpcProvider) CurrentConn() *grpc.ClientConn { return m.MockConn }
func (m *MockGrpcProvider) CurrentHost() string {
if len(m.MockHosts) > 0 {
return m.MockHosts[m.CurrentIndex]
}
return ""
}
func (m *MockGrpcProvider) Hosts() []string { return m.MockHosts }
func (m *MockGrpcProvider) SwitchHost(idx int) error {
m.CurrentIndex = idx
m.ConnCounter++
return nil
}
func (m *MockGrpcProvider) ConnectionCounter() uint64 { return m.ConnCounter }
func (m *MockGrpcProvider) Close() {}

View File

@@ -1,34 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"log.go",
"mock_rest_provider.go",
"rest_connection_provider.go",
"rest_handler.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/api/rest",
visibility = ["//visibility:public"],
deps = [
"//api:go_default_library",
"//api/apiutil:go_default_library",
"//api/client:go_default_library",
"//config/params:go_default_library",
"//network/httputil:go_default_library",
"//runtime/version:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opentelemetry_go_contrib_instrumentation_net_http_otelhttp//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["rest_connection_provider_test.go"],
embed = [":go_default_library"],
deps = [
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
],
)

View File

@@ -1,9 +0,0 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package rest
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "api/rest")

View File

@@ -1,46 +0,0 @@
package rest
import (
"bytes"
"context"
"net/http"
)
// MockRestProvider implements RestConnectionProvider for testing.
type MockRestProvider struct {
MockClient *http.Client
MockHandler Handler
MockHosts []string
HostIndex int
}
func (m *MockRestProvider) HttpClient() *http.Client { return m.MockClient }
func (m *MockRestProvider) Handler() Handler { return m.MockHandler }
func (m *MockRestProvider) CurrentHost() string {
if len(m.MockHosts) > 0 {
return m.MockHosts[m.HostIndex%len(m.MockHosts)]
}
return ""
}
func (m *MockRestProvider) Hosts() []string { return m.MockHosts }
func (m *MockRestProvider) SwitchHost(index int) error { m.HostIndex = index; return nil }
// MockHandler implements Handler for testing.
type MockHandler struct {
MockHost string
}
func (m *MockHandler) Get(_ context.Context, _ string, _ any) error { return nil }
func (m *MockHandler) GetStatusCode(_ context.Context, _ string) (int, error) {
return http.StatusOK, nil
}
func (m *MockHandler) GetSSZ(_ context.Context, _ string) ([]byte, http.Header, error) {
return nil, nil, nil
}
func (m *MockHandler) Post(_ context.Context, _ string, _ map[string]string, _ *bytes.Buffer, _ any) error {
return nil
}
func (m *MockHandler) PostSSZ(_ context.Context, _ string, _ map[string]string, _ *bytes.Buffer) ([]byte, http.Header, error) {
return nil, nil, nil
}
func (m *MockHandler) Host() string { return m.MockHost }

View File

@@ -1,158 +0,0 @@
package rest
import (
"net/http"
"strings"
"sync/atomic"
"time"
"github.com/OffchainLabs/prysm/v7/api/client"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
)
// RestConnectionProvider manages HTTP client configuration for REST API with failover support.
// It allows switching between different beacon node REST endpoints when the current one becomes unavailable.
type RestConnectionProvider interface {
// HttpClient returns the configured HTTP client with headers, timeout, and optional tracing.
HttpClient() *http.Client
// Handler returns the REST handler for making API requests.
Handler() Handler
// CurrentHost returns the current REST API endpoint URL.
CurrentHost() string
// Hosts returns all configured REST API endpoint URLs.
Hosts() []string
// SwitchHost switches to the endpoint at the given index.
SwitchHost(index int) error
}
// RestConnectionProviderOption is a functional option for configuring the REST connection provider.
type RestConnectionProviderOption func(*restConnectionProvider)
// WithHttpTimeout sets the HTTP client timeout.
func WithHttpTimeout(timeout time.Duration) RestConnectionProviderOption {
return func(p *restConnectionProvider) {
p.timeout = timeout
}
}
// WithHttpHeaders sets custom HTTP headers to include in all requests.
func WithHttpHeaders(headers map[string][]string) RestConnectionProviderOption {
return func(p *restConnectionProvider) {
p.headers = headers
}
}
// WithTracing enables OpenTelemetry tracing for HTTP requests.
func WithTracing() RestConnectionProviderOption {
return func(p *restConnectionProvider) {
p.enableTracing = true
}
}
type restConnectionProvider struct {
endpoints []string
httpClient *http.Client
restHandler *handler
currentIndex atomic.Uint64
timeout time.Duration
headers map[string][]string
enableTracing bool
}
// NewRestConnectionProvider creates a new REST connection provider that manages HTTP client configuration.
// The endpoint parameter can be a comma-separated list of URLs (e.g., "http://host1:3500,http://host2:3500").
func NewRestConnectionProvider(endpoint string, opts ...RestConnectionProviderOption) (RestConnectionProvider, error) {
endpoints := parseEndpoints(endpoint)
if len(endpoints) == 0 {
return nil, errors.New("no REST API endpoints provided")
}
p := &restConnectionProvider{
endpoints: endpoints,
}
for _, opt := range opts {
opt(p)
}
// Build the HTTP transport chain
var transport http.RoundTripper = http.DefaultTransport
// Add custom headers if configured
if len(p.headers) > 0 {
transport = client.NewCustomHeadersTransport(transport, p.headers)
}
// Add tracing if enabled
if p.enableTracing {
transport = otelhttp.NewTransport(transport)
}
p.httpClient = &http.Client{
Timeout: p.timeout,
Transport: transport,
}
// Create the REST handler with the HTTP client and initial host
p.restHandler = newHandler(*p.httpClient, endpoints[0])
log.WithFields(logrus.Fields{
"endpoints": endpoints,
"count": len(endpoints),
}).Info("Initialized REST connection provider")
return p, nil
}
// parseEndpoints splits a comma-separated endpoint string into individual endpoints.
func parseEndpoints(endpoint string) []string {
if endpoint == "" {
return nil
}
endpoints := make([]string, 0, 1)
for p := range strings.SplitSeq(endpoint, ",") {
if p = strings.TrimSpace(p); p != "" {
endpoints = append(endpoints, p)
}
}
return endpoints
}
func (p *restConnectionProvider) HttpClient() *http.Client {
return p.httpClient
}
func (p *restConnectionProvider) Handler() Handler {
return p.restHandler
}
func (p *restConnectionProvider) CurrentHost() string {
return p.endpoints[p.currentIndex.Load()]
}
func (p *restConnectionProvider) Hosts() []string {
// Return a copy to maintain immutability
hosts := make([]string, len(p.endpoints))
copy(hosts, p.endpoints)
return hosts
}
func (p *restConnectionProvider) SwitchHost(index int) error {
if index < 0 || index >= len(p.endpoints) {
return errors.Errorf("invalid host index %d, must be between 0 and %d", index, len(p.endpoints)-1)
}
oldIdx := p.currentIndex.Load()
p.currentIndex.Store(uint64(index))
// Update the rest handler's host
p.restHandler.SwitchHost(p.endpoints[index])
log.WithFields(logrus.Fields{
"previousHost": p.endpoints[oldIdx],
"newHost": p.endpoints[index],
}).Debug("Switched REST endpoint")
return nil
}

View File

@@ -1,80 +0,0 @@
package rest
import (
"reflect"
"testing"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestParseEndpoints(t *testing.T) {
tests := []struct {
name string
input string
expected []string
}{
{"single endpoint", "http://localhost:3500", []string{"http://localhost:3500"}},
{"multiple endpoints", "http://host1:3500,http://host2:3500,http://host3:3500", []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"}},
{"endpoints with spaces", "http://host1:3500, http://host2:3500 , http://host3:3500", []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"}},
{"empty string", "", nil},
{"only commas", ",,,", []string{}},
{"trailing comma", "http://host1:3500,http://host2:3500,", []string{"http://host1:3500", "http://host2:3500"}},
{"leading comma", ",http://host1:3500,http://host2:3500", []string{"http://host1:3500", "http://host2:3500"}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := parseEndpoints(tt.input)
if !reflect.DeepEqual(tt.expected, got) {
t.Errorf("parseEndpoints(%q) = %v, want %v", tt.input, got, tt.expected)
}
})
}
}
func TestNewRestConnectionProvider_Errors(t *testing.T) {
t.Run("no endpoints", func(t *testing.T) {
_, err := NewRestConnectionProvider("")
require.ErrorContains(t, "no REST API endpoints provided", err)
})
}
func TestRestConnectionProvider(t *testing.T) {
provider, err := NewRestConnectionProvider("http://host1:3500,http://host2:3500,http://host3:3500")
require.NoError(t, err)
t.Run("initial state", func(t *testing.T) {
assert.Equal(t, 3, len(provider.Hosts()))
assert.Equal(t, "http://host1:3500", provider.CurrentHost())
assert.NotNil(t, provider.HttpClient())
})
t.Run("SwitchHost", func(t *testing.T) {
require.NoError(t, provider.SwitchHost(1))
assert.Equal(t, "http://host2:3500", provider.CurrentHost())
require.NoError(t, provider.SwitchHost(0))
assert.Equal(t, "http://host1:3500", provider.CurrentHost())
require.ErrorContains(t, "invalid host index", provider.SwitchHost(-1))
require.ErrorContains(t, "invalid host index", provider.SwitchHost(3))
})
t.Run("Hosts returns copy", func(t *testing.T) {
hosts := provider.Hosts()
original := hosts[0]
hosts[0] = "modified"
assert.Equal(t, original, provider.Hosts()[0])
})
}
func TestRestConnectionProvider_WithOptions(t *testing.T) {
headers := map[string][]string{"Authorization": {"Bearer token"}}
provider, err := NewRestConnectionProvider(
"http://localhost:3500",
WithHttpHeaders(headers),
WithHttpTimeout(30000000000), // 30 seconds in nanoseconds
WithTracing(),
)
require.NoError(t, err)
assert.NotNil(t, provider.HttpClient())
assert.Equal(t, "http://localhost:3500", provider.CurrentHost())
}

View File

@@ -509,17 +509,17 @@ func (s *SignedBlindedBeaconBlockFulu) SigString() string {
// ----------------------------------------------------------------------------
type ExecutionPayloadBid struct {
ParentBlockHash string `json:"parent_block_hash"`
ParentBlockRoot string `json:"parent_block_root"`
BlockHash string `json:"block_hash"`
PrevRandao string `json:"prev_randao"`
FeeRecipient string `json:"fee_recipient"`
GasLimit string `json:"gas_limit"`
BuilderIndex string `json:"builder_index"`
Slot string `json:"slot"`
Value string `json:"value"`
ExecutionPayment string `json:"execution_payment"`
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
ParentBlockHash string `json:"parent_block_hash"`
ParentBlockRoot string `json:"parent_block_root"`
BlockHash string `json:"block_hash"`
PrevRandao string `json:"prev_randao"`
FeeRecipient string `json:"fee_recipient"`
GasLimit string `json:"gas_limit"`
BuilderIndex string `json:"builder_index"`
Slot string `json:"slot"`
Value string `json:"value"`
ExecutionPayment string `json:"execution_payment"`
BlobKzgCommitmentsRoot string `json:"blob_kzg_commitments_root"`
}
type SignedExecutionPayloadBid struct {

View File

@@ -2939,22 +2939,18 @@ func SignedExecutionPayloadBidFromConsensus(b *eth.SignedExecutionPayloadBid) *S
}
func ExecutionPayloadBidFromConsensus(b *eth.ExecutionPayloadBid) *ExecutionPayloadBid {
blobKzgCommitments := make([]string, len(b.BlobKzgCommitments))
for i := range b.BlobKzgCommitments {
blobKzgCommitments[i] = hexutil.Encode(b.BlobKzgCommitments[i])
}
return &ExecutionPayloadBid{
ParentBlockHash: hexutil.Encode(b.ParentBlockHash),
ParentBlockRoot: hexutil.Encode(b.ParentBlockRoot),
BlockHash: hexutil.Encode(b.BlockHash),
PrevRandao: hexutil.Encode(b.PrevRandao),
FeeRecipient: hexutil.Encode(b.FeeRecipient),
GasLimit: fmt.Sprintf("%d", b.GasLimit),
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex),
Slot: fmt.Sprintf("%d", b.Slot),
Value: fmt.Sprintf("%d", b.Value),
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment),
BlobKzgCommitments: blobKzgCommitments,
ParentBlockHash: hexutil.Encode(b.ParentBlockHash),
ParentBlockRoot: hexutil.Encode(b.ParentBlockRoot),
BlockHash: hexutil.Encode(b.BlockHash),
PrevRandao: hexutil.Encode(b.PrevRandao),
FeeRecipient: hexutil.Encode(b.FeeRecipient),
GasLimit: fmt.Sprintf("%d", b.GasLimit),
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex),
Slot: fmt.Sprintf("%d", b.Slot),
Value: fmt.Sprintf("%d", b.Value),
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment),
BlobKzgCommitmentsRoot: hexutil.Encode(b.BlobKzgCommitmentsRoot),
}
}
@@ -3191,30 +3187,22 @@ func (b *ExecutionPayloadBid) ToConsensus() (*eth.ExecutionPayloadBid, error) {
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayment")
}
err = slice.VerifyMaxLength(b.BlobKzgCommitments, fieldparams.MaxBlobCommitmentsPerBlock)
blobKzgCommitmentsRoot, err := bytesutil.DecodeHexWithLength(b.BlobKzgCommitmentsRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "BlobKzgCommitments")
}
blobKzgCommitments := make([][]byte, len(b.BlobKzgCommitments))
for i, commitment := range b.BlobKzgCommitments {
kzg, err := bytesutil.DecodeHexWithLength(commitment, fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("BlobKzgCommitments[%d]", i))
}
blobKzgCommitments[i] = kzg
return nil, server.NewDecodeError(err, "BlobKzgCommitmentsRoot")
}
return &eth.ExecutionPayloadBid{
ParentBlockHash: parentBlockHash,
ParentBlockRoot: parentBlockRoot,
BlockHash: blockHash,
PrevRandao: prevRandao,
FeeRecipient: feeRecipient,
GasLimit: gasLimit,
BuilderIndex: primitives.BuilderIndex(builderIndex),
Slot: primitives.Slot(slot),
Value: primitives.Gwei(value),
ExecutionPayment: primitives.Gwei(executionPayment),
BlobKzgCommitments: blobKzgCommitments,
ParentBlockHash: parentBlockHash,
ParentBlockRoot: parentBlockRoot,
BlockHash: blockHash,
PrevRandao: prevRandao,
FeeRecipient: feeRecipient,
GasLimit: gasLimit,
BuilderIndex: primitives.BuilderIndex(builderIndex),
Slot: primitives.Slot(slot),
Value: primitives.Gwei(value),
ExecutionPayment: primitives.Gwei(executionPayment),
BlobKzgCommitmentsRoot: blobKzgCommitmentsRoot,
}, nil
}

View File

@@ -85,7 +85,6 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//io/logs:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//monitoring/tracing/trace:go_default_library",

View File

@@ -10,7 +10,6 @@ import (
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/io/logs"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
prysmTime "github.com/OffchainLabs/prysm/v7/time"
@@ -88,45 +87,36 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesis time.Time, daWaitedTime time.Duration) error {
startTime, err := slots.StartTime(genesis, block.Slot())
if err != nil {
return errors.Wrap(err, "failed to get slot start time")
return err
}
parentRoot := block.ParentRoot()
blkRoot := fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8])
finalizedRoot := fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8])
sinceSlotStartTime := prysmTime.Now().Sub(startTime)
lessFields := logrus.Fields{
"slot": block.Slot(),
"block": blkRoot,
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": finalizedRoot,
"epoch": slots.ToEpoch(block.Slot()),
"sinceSlotStartTime": sinceSlotStartTime,
}
moreFields := logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": blkRoot,
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": finalizedRoot,
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": sinceSlotStartTime,
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"dataAvailabilityWaitedTime": daWaitedTime,
}
level := logs.PackageVerbosity("beacon-chain/blockchain")
level := log.Logger.GetLevel()
if level >= logrus.DebugLevel {
log.WithFields(moreFields).Info("Synced new block")
return nil
parentRoot := block.ParentRoot()
lf := logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"dataAvailabilityWaitedTime": daWaitedTime,
}
log.WithFields(lf).Debug("Synced new block")
} else {
log.WithFields(logrus.Fields{
"slot": block.Slot(),
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"epoch": slots.ToEpoch(block.Slot()),
}).Info("Synced new block")
}
log.WithFields(lessFields).WithField(logs.LogTargetField, logs.LogTargetUser).Info("Synced new block")
log.WithFields(moreFields).WithField(logs.LogTargetField, logs.LogTargetEphemeral).Info("Synced new block")
return nil
}

View File

@@ -7,6 +7,7 @@ go_library(
"payload_attestation.go",
"pending_payment.go",
"proposer_slashing.go",
"withdrawal.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas",
visibility = ["//visibility:public"],
@@ -38,6 +39,7 @@ go_test(
"payload_attestation_test.go",
"pending_payment_test.go",
"proposer_slashing_test.go",
"withdrawal_test.go",
],
embed = [":go_default_library"],
deps = [

View File

@@ -17,56 +17,27 @@ import (
)
// ProcessExecutionPayloadBid processes a signed execution payload bid in the Gloas fork.
// Spec v1.7.0-alpha.0 (pseudocode):
// process_execution_payload_bid(state: BeaconState, block: BeaconBlock):
//
// <spec fn="process_execution_payload_bid" fork="gloas" hash="823c9f3a">
// def process_execution_payload_bid(state: BeaconState, block: BeaconBlock) -> None:
// signed_bid = block.body.signed_execution_payload_bid
// bid = signed_bid.message
// builder_index = bid.builder_index
// amount = bid.value
//
// # For self-builds, amount must be zero regardless of withdrawal credential prefix
// if builder_index == BUILDER_INDEX_SELF_BUILD:
// assert amount == 0
// assert signed_bid.signature == bls.G2_POINT_AT_INFINITY
// else:
// # Verify that the builder is active
// assert is_active_builder(state, builder_index)
// # Verify that the builder has funds to cover the bid
// assert can_builder_cover_bid(state, builder_index, amount)
// # Verify that the bid signature is valid
// assert verify_execution_payload_bid_signature(state, signed_bid)
//
// # Verify commitments are under limit
// assert (
// len(bid.blob_kzg_commitments)
// <= get_blob_parameters(get_current_epoch(state)).max_blobs_per_block
// )
//
// # Verify that the bid is for the current slot
// assert bid.slot == block.slot
// # Verify that the bid is for the right parent block
// assert bid.parent_block_hash == state.latest_block_hash
// assert bid.parent_block_root == block.parent_root
// assert bid.prev_randao == get_randao_mix(state, get_current_epoch(state))
//
// # Record the pending payment if there is some payment
// if amount > 0:
// pending_payment = BuilderPendingPayment(
// weight=0,
// withdrawal=BuilderPendingWithdrawal(
// fee_recipient=bid.fee_recipient,
// amount=amount,
// builder_index=builder_index,
// ),
// )
// state.builder_pending_payments[SLOTS_PER_EPOCH + bid.slot % SLOTS_PER_EPOCH] = (
// pending_payment
// )
//
// # Cache the signed execution payload bid
// state.latest_execution_payload_bid = bid
// </spec>
// signed_bid = block.body.signed_execution_payload_bid
// bid = signed_bid.message
// builder_index = bid.builder_index
// amount = bid.value
// if builder_index == BUILDER_INDEX_SELF_BUILD:
// assert amount == 0
// assert signed_bid.signature == G2_POINT_AT_INFINITY
// else:
// assert is_active_builder(state, builder_index)
// assert can_builder_cover_bid(state, builder_index, amount)
// assert verify_execution_payload_bid_signature(state, signed_bid)
// assert bid.slot == block.slot
// assert bid.parent_block_hash == state.latest_block_hash
// assert bid.parent_block_root == block.parent_root
// assert bid.prev_randao == get_randao_mix(state, get_current_epoch(state))
// if amount > 0:
// state.builder_pending_payments[...] = BuilderPendingPayment(weight=0, withdrawal=BuilderPendingWithdrawal(fee_recipient=bid.fee_recipient, amount=amount, builder_index=builder_index))
// state.latest_execution_payload_bid = bid
func ProcessExecutionPayloadBid(st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) error {
signedBid, err := block.Body().SignedExecutionPayloadBid()
if err != nil {
@@ -115,12 +86,6 @@ func ProcessExecutionPayloadBid(st state.BeaconState, block interfaces.ReadOnlyB
}
}
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(block.Slot()))
commitmentCount := bid.BlobKzgCommitmentCount()
if commitmentCount > uint64(maxBlobsPerBlock) {
return fmt.Errorf("bid has %d blob KZG commitments over max %d", commitmentCount, maxBlobsPerBlock)
}
if err := validateBidConsistency(st, bid, block); err != nil {
return errors.Wrap(err, "bid consistency validation failed")
}

View File

@@ -184,28 +184,6 @@ func signBid(t *testing.T, sk common.SecretKey, bid *ethpb.ExecutionPayloadBid,
return out
}
func blobCommitmentsForSlot(slot primitives.Slot, count int) [][]byte {
max := int(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(slot)))
if count > max {
count = max
}
commitments := make([][]byte, count)
for i := range commitments {
commitments[i] = bytes.Repeat([]byte{0xEE}, 48)
}
return commitments
}
func tooManyBlobCommitmentsForSlot(slot primitives.Slot) [][]byte {
max := int(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(slot)))
count := max + 1
commitments := make([][]byte, count)
for i := range commitments {
commitments[i] = bytes.Repeat([]byte{0xEE}, 48)
}
return commitments
}
func TestProcessExecutionPayloadBid_SelfBuildSuccess(t *testing.T) {
slot := primitives.Slot(12)
proposerIdx := primitives.ValidatorIndex(0)
@@ -216,17 +194,17 @@ func TestProcessExecutionPayloadBid_SelfBuildSuccess(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 0,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 0,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
signed := &ethpb.SignedExecutionPayloadBid{
Message: bid,
@@ -258,16 +236,16 @@ func TestProcessExecutionPayloadBid_SelfBuildNonZeroAmountFails(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, [48]byte{})
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
PrevRandao: randao[:],
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
PrevRandao: randao[:],
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
}
signed := &ethpb.SignedExecutionPayloadBid{
Message: bid,
@@ -302,17 +280,17 @@ func TestProcessExecutionPayloadBid_PendingPaymentAndCacheBid(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, balance, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 500_000,
ExecutionPayment: 1,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 500_000,
ExecutionPayment: 1,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
@@ -363,17 +341,17 @@ func TestProcessExecutionPayloadBid_BuilderNotActive(t *testing.T) {
state = stateIface.(*state_native.BeaconState)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0x03}, 32),
BlockHash: bytes.Repeat([]byte{0x04}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0x03}, 32),
BlockHash: bytes.Repeat([]byte{0x04}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x05}, 32),
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
@@ -416,17 +394,17 @@ func TestProcessExecutionPayloadBid_CannotCoverBid(t *testing.T) {
state = stateIface.(*state_native.BeaconState)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 25,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 25,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
@@ -458,17 +436,17 @@ func TestProcessExecutionPayloadBid_InvalidSignature(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
// Use an invalid signature.
invalidSig := [96]byte{1}
@@ -485,42 +463,6 @@ func TestProcessExecutionPayloadBid_InvalidSignature(t *testing.T) {
require.ErrorContains(t, "bid signature validation failed", err)
}
func TestProcessExecutionPayloadBid_TooManyBlobCommitments(t *testing.T) {
slot := primitives.Slot(9)
proposerIdx := primitives.ValidatorIndex(0)
builderIdx := params.BeaconConfig().BuilderIndexSelfBuild
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
pubKey := [48]byte{}
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
BuilderIndex: builderIdx,
Slot: slot,
BlobKzgCommitments: tooManyBlobCommitmentsForSlot(slot),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
signed := &ethpb.SignedExecutionPayloadBid{
Message: bid,
Signature: common.InfiniteSignature[:],
}
block := stubBlock{
slot: slot,
proposer: proposerIdx,
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
body: stubBlockBody{signedBid: signed},
v: version.Gloas,
}
err := ProcessExecutionPayloadBid(state, block)
require.ErrorContains(t, "blob KZG commitments over max", err)
}
func TestProcessExecutionPayloadBid_SlotMismatch(t *testing.T) {
slot := primitives.Slot(10)
builderIdx := primitives.BuilderIndex(1)
@@ -536,17 +478,17 @@ func TestProcessExecutionPayloadBid_SlotMismatch(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot + 1, // mismatch
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot + 1, // mismatch
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
@@ -578,17 +520,17 @@ func TestProcessExecutionPayloadBid_ParentHashMismatch(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32), // mismatch
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32), // mismatch
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
@@ -621,17 +563,17 @@ func TestProcessExecutionPayloadBid_ParentRootMismatch(t *testing.T) {
parentRoot := bytes.Repeat([]byte{0x22}, 32)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: parentRoot,
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: parentRoot,
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
@@ -663,17 +605,17 @@ func TestProcessExecutionPayloadBid_PrevRandaoMismatch(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: bytes.Repeat([]byte{0x01}, 32), // mismatch
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: bytes.Repeat([]byte{0x01}, 32), // mismatch
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)

View File

@@ -24,21 +24,14 @@ import (
)
// ProcessPayloadAttestations validates payload attestations in a block body.
// Spec v1.7.0-alpha.0 (pseudocode):
// process_payload_attestation(state: BeaconState, payload_attestation: PayloadAttestation):
//
// <spec fn="process_payload_attestation" fork="gloas" hash="f46bf0b0">
// def process_payload_attestation(
// state: BeaconState, payload_attestation: PayloadAttestation
// ) -> None:
// data = payload_attestation.data
//
// # Check that the attestation is for the parent beacon block
// assert data.beacon_block_root == state.latest_block_header.parent_root
// # Check that the attestation is for the previous slot
// assert data.slot + 1 == state.slot
// # Verify signature
// indexed_payload_attestation = get_indexed_payload_attestation(state, payload_attestation)
// assert is_valid_indexed_payload_attestation(state, indexed_payload_attestation)
// </spec>
// data = payload_attestation.data
// assert data.beacon_block_root == state.latest_block_header.parent_root
// assert data.slot + 1 == state.slot
// indexed = get_indexed_payload_attestation(state, data.slot, payload_attestation)
// assert is_valid_indexed_payload_attestation(state, indexed)
func ProcessPayloadAttestations(ctx context.Context, st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) error {
atts, err := body.PayloadAttestations()
if err != nil {
@@ -97,24 +90,17 @@ func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState
}
// payloadCommittee returns the payload timeliness committee for a given slot for the state.
// Spec v1.7.0-alpha.0 (pseudocode):
// get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
//
// <spec fn="get_ptc" fork="gloas" hash="ae15f761">
// def get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
// """
// Get the payload timeliness committee for the given ``slot``.
// """
// epoch = compute_epoch_at_slot(slot)
// seed = hash(get_seed(state, epoch, DOMAIN_PTC_ATTESTER) + uint_to_bytes(slot))
// indices: List[ValidatorIndex] = []
// # Concatenate all committees for this slot in order
// committees_per_slot = get_committee_count_per_slot(state, epoch)
// for i in range(committees_per_slot):
// committee = get_beacon_committee(state, slot, CommitteeIndex(i))
// indices.extend(committee)
// return compute_balance_weighted_selection(
// state, indices, seed, size=PTC_SIZE, shuffle_indices=False
// )
// </spec>
// epoch = compute_epoch_at_slot(slot)
// seed = hash(get_seed(state, epoch, DOMAIN_PTC_ATTESTER) + uint_to_bytes(slot))
// indices = []
// committees_per_slot = get_committee_count_per_slot(state, epoch)
// for i in range(committees_per_slot):
// committee = get_beacon_committee(state, slot, CommitteeIndex(i))
// indices.extend(committee)
// return compute_balance_weighted_selection(state, indices, seed, size=PTC_SIZE, shuffle_indices=False)
func payloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
epoch := slots.ToEpoch(slot)
seed, err := ptcSeed(st, epoch, slot)
@@ -166,35 +152,17 @@ func ptcSeed(st state.ReadOnlyBeaconState, epoch primitives.Epoch, slot primitiv
}
// selectByBalance selects a balance-weighted subset of input candidates.
// Spec v1.7.0-alpha.0 (pseudocode):
// compute_balance_weighted_selection(state, indices, seed, size, shuffle_indices):
// Note: shuffle_indices is false for PTC.
//
// <spec fn="compute_balance_weighted_selection" fork="gloas" hash="2c9f1c23">
// def compute_balance_weighted_selection(
// state: BeaconState,
// indices: Sequence[ValidatorIndex],
// seed: Bytes32,
// size: uint64,
// shuffle_indices: bool,
// ) -> Sequence[ValidatorIndex]:
// """
// Return ``size`` indices sampled by effective balance, using ``indices``
// as candidates. If ``shuffle_indices`` is ``True``, candidate indices
// are themselves sampled from ``indices`` by shuffling it, otherwise
// ``indices`` is traversed in order.
// """
// total = uint64(len(indices))
// assert total > 0
// selected: List[ValidatorIndex] = []
// i = uint64(0)
// while len(selected) < size:
// next_index = i % total
// if shuffle_indices:
// next_index = compute_shuffled_index(next_index, total, seed)
// candidate_index = indices[next_index]
// if compute_balance_weighted_acceptance(state, candidate_index, seed, i):
// selected.append(candidate_index)
// i += 1
// return selected
// </spec>
// total = len(indices); selected = []; i = 0
// while len(selected) < size:
// next = i % total
// if shuffle_indices: next = compute_shuffled_index(next, total, seed)
// if compute_balance_weighted_acceptance(state, indices[next], seed, i):
// selected.append(indices[next])
// i += 1
func selectByBalanceFill(
ctx context.Context,
st state.ReadOnlyBeaconState,
@@ -231,22 +199,15 @@ func selectByBalanceFill(
}
// acceptByBalance determines if a validator is accepted based on its effective balance.
// Spec v1.7.0-alpha.0 (pseudocode):
// compute_balance_weighted_acceptance(state, index, seed, i):
//
// <spec fn="compute_balance_weighted_acceptance" fork="gloas" hash="9954dcd0">
// def compute_balance_weighted_acceptance(
// state: BeaconState, index: ValidatorIndex, seed: Bytes32, i: uint64
// ) -> bool:
// """
// Return whether to accept the selection of the validator ``index``, with probability
// proportional to its ``effective_balance``, and randomness given by ``seed`` and ``i``.
// """
// MAX_RANDOM_VALUE = 2**16 - 1
// random_bytes = hash(seed + uint_to_bytes(i // 16))
// offset = i % 16 * 2
// random_value = bytes_to_uint64(random_bytes[offset : offset + 2])
// effective_balance = state.validators[index].effective_balance
// return effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value
// </spec>
// MAX_RANDOM_VALUE = 2**16 - 1
// random_bytes = hash(seed + uint_to_bytes(i // 16))
// offset = i % 16 * 2
// random_value = bytes_to_uint64(random_bytes[offset:offset+2])
// effective_balance = state.validators[index].effective_balance
// return effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value
func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex, seedBuf []byte, hashFunc func([]byte) [32]byte, maxBalance uint64, round uint64) (bool, error) {
// Reuse the seed buffer by overwriting the last 8 bytes with the round counter.
binary.LittleEndian.PutUint64(seedBuf[len(seedBuf)-8:], round/16)
@@ -263,26 +224,16 @@ func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex
}
// validIndexedPayloadAttestation verifies the signature of an indexed payload attestation.
// Spec v1.7.0-alpha.0 (pseudocode):
// is_valid_indexed_payload_attestation(state: BeaconState, indexed_payload_attestation: IndexedPayloadAttestation) -> bool:
//
// <spec fn="is_valid_indexed_payload_attestation" fork="gloas" hash="d76e0f89">
// def is_valid_indexed_payload_attestation(
// state: BeaconState, attestation: IndexedPayloadAttestation
// ) -> bool:
// """
// Check if ``attestation`` is non-empty, has sorted indices, and has
// a valid aggregate signature.
// """
// # Verify indices are non-empty and sorted
// indices = attestation.attesting_indices
// if len(indices) == 0 or not indices == sorted(indices):
// return False
//
// # Verify aggregate signature
// pubkeys = [state.validators[i].pubkey for i in indices]
// domain = get_domain(state, DOMAIN_PTC_ATTESTER, compute_epoch_at_slot(attestation.data.slot))
// signing_root = compute_signing_root(attestation.data, domain)
// return bls.FastAggregateVerify(pubkeys, signing_root, attestation.signature)
// </spec>
// indices = indexed_payload_attestation.attesting_indices
// return len(indices) > 0 and indices == sorted(indices) and
// bls.FastAggregateVerify(
// [state.validators[i].pubkey for i in indices],
// compute_signing_root(indexed_payload_attestation.data, get_domain(state, DOMAIN_PTC_ATTESTER, compute_epoch_at_slot(attestation.data.slot)),
// indexed_payload_attestation.signature,
// )
func validIndexedPayloadAttestation(st state.ReadOnlyBeaconState, att *consensus_types.IndexedPayloadAttestation) error {
indices := att.AttestingIndices
if len(indices) == 0 || !slices.IsSorted(indices) {

View File

@@ -10,21 +10,17 @@ import (
)
// ProcessBuilderPendingPayments processes the builder pending payments from the previous epoch.
// Spec v1.7.0-alpha.0 (pseudocode):
// def process_builder_pending_payments(state: BeaconState) -> None:
//
// <spec fn="process_builder_pending_payments" fork="gloas" hash="10da48dd">
// def process_builder_pending_payments(state: BeaconState) -> None:
// """
// Processes the builder pending payments from the previous epoch.
// """
// quorum = get_builder_payment_quorum_threshold(state)
// for payment in state.builder_pending_payments[:SLOTS_PER_EPOCH]:
// if payment.weight >= quorum:
// state.builder_pending_withdrawals.append(payment.withdrawal)
// quorum = get_builder_payment_quorum_threshold(state)
// for payment in state.builder_pending_payments[:SLOTS_PER_EPOCH]:
// if payment.weight >= quorum:
// state.builder_pending_withdrawals.append(payment.withdrawal)
//
// old_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:]
// new_payments = [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
// state.builder_pending_payments = old_payments + new_payments
// </spec>
// old_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:]
// new_payments = [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
// state.builder_pending_payments = old_payments + new_payments
func ProcessBuilderPendingPayments(state state.BeaconState) error {
quorum, err := builderQuorumThreshold(state)
if err != nil {
@@ -57,16 +53,12 @@ func ProcessBuilderPendingPayments(state state.BeaconState) error {
}
// builderQuorumThreshold calculates the quorum threshold for builder payments.
// Spec v1.7.0-alpha.0 (pseudocode):
// def get_builder_payment_quorum_threshold(state: BeaconState) -> uint64:
//
// <spec fn="get_builder_payment_quorum_threshold" fork="gloas" hash="a64b7ffb">
// def get_builder_payment_quorum_threshold(state: BeaconState) -> uint64:
// """
// Calculate the quorum threshold for builder payments.
// """
// per_slot_balance = get_total_active_balance(state) // SLOTS_PER_EPOCH
// quorum = per_slot_balance * BUILDER_PAYMENT_THRESHOLD_NUMERATOR
// return uint64(quorum // BUILDER_PAYMENT_THRESHOLD_DENOMINATOR)
// </spec>
// per_slot_balance = get_total_active_balance(state) // SLOTS_PER_EPOCH
// quorum = per_slot_balance * BUILDER_PAYMENT_THRESHOLD_NUMERATOR
// return uint64(quorum // BUILDER_PAYMENT_THRESHOLD_DENOMINATOR)
func builderQuorumThreshold(state state.ReadOnlyBeaconState) (primitives.Gwei, error) {
activeBalance, err := helpers.TotalActiveBalance(state)
if err != nil {

View File

@@ -11,20 +11,16 @@ import (
)
// RemoveBuilderPendingPayment removes the pending builder payment for the proposal slot.
// Spec v1.7.0 (pseudocode):
//
// <spec fn="process_proposer_slashing" fork="gloas" lines="22-32" hash="4da721ef">
// # [New in Gloas:EIP7732]
// # Remove the BuilderPendingPayment corresponding to
// # this proposal if it is still in the 2-epoch window.
// slot = header_1.slot
// proposal_epoch = compute_epoch_at_slot(slot)
// if proposal_epoch == get_current_epoch(state):
// payment_index = SLOTS_PER_EPOCH + slot % SLOTS_PER_EPOCH
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
// payment_index = SLOTS_PER_EPOCH + slot % SLOTS_PER_EPOCH
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
// elif proposal_epoch == get_previous_epoch(state):
// payment_index = slot % SLOTS_PER_EPOCH
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
// </spec>
// payment_index = slot % SLOTS_PER_EPOCH
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
func RemoveBuilderPendingPayment(st state.BeaconState, header *eth.BeaconBlockHeader) error {
proposalEpoch := slots.ToEpoch(header.Slot)
currentEpoch := time.CurrentEpoch(st)

View File

@@ -0,0 +1,105 @@
package gloas
import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/pkg/errors"
)
// ProcessWithdrawals applies withdrawals to the state for Gloas.
//
// Spec v1.7.0-alpha.1 (pseudocode):
//
// def process_withdrawals(
//
// state: BeaconState,
// # [Modified in Gloas:EIP7732]
// # Removed `payload`
//
// ) -> None:
//
// # [New in Gloas:EIP7732]
// # Return early if the parent block is empty
// if not is_parent_block_full(state):
// return
//
// # Get expected withdrawals
// expected = get_expected_withdrawals(state)
//
// # Apply expected withdrawals
// apply_withdrawals(state, expected.withdrawals)
//
// # Update withdrawals fields in the state
// update_next_withdrawal_index(state, expected.withdrawals)
// # [New in Gloas:EIP7732]
// update_payload_expected_withdrawals(state, expected.withdrawals)
// # [New in Gloas:EIP7732]
// update_builder_pending_withdrawals(state, expected.processed_builder_withdrawals_count)
// update_pending_partial_withdrawals(state, expected.processed_partial_withdrawals_count)
// # [New in Gloas:EIP7732]
// update_next_withdrawal_builder_index(state, expected.processed_builders_sweep_count)
// update_next_withdrawal_validator_index(state, expected.withdrawals)
func ProcessWithdrawals(st state.BeaconState) error {
full, err := st.IsParentBlockFull()
if err != nil {
return errors.Wrap(err, "could not get parent block full status")
}
if !full {
return nil
}
expected, err := st.ExpectedWithdrawalsGloas()
if err != nil {
return errors.Wrap(err, "could not get expected withdrawals")
}
if err := st.DecreaseWithdrawalBalances(expected.Withdrawals); err != nil {
return errors.Wrap(err, "could not decrease withdrawal balances")
}
if len(expected.Withdrawals) > 0 {
if err := st.SetNextWithdrawalIndex(expected.Withdrawals[len(expected.Withdrawals)-1].Index + 1); err != nil {
return errors.Wrap(err, "could not set next withdrawal index")
}
}
err = st.SetPayloadExpectedWithdrawals(expected.Withdrawals)
if err != nil {
return errors.Wrap(err, "could not set payload expected withdrawals")
}
err = st.DequeueBuilderPendingWithdrawals(expected.ProcessedBuilderWithdrawalsCount)
if err != nil {
return errors.Wrap(err, "unable to dequeue builder pending withdrawals from state")
}
if err := st.DequeuePendingPartialWithdrawals(expected.ProcessedPartialWithdrawalsCount); err != nil {
return errors.Wrap(err, "unable to dequeue partial withdrawals from state")
}
err = st.SetNextWithdrawalBuilderIndex(expected.NextWithdrawalBuilderIndex)
if err != nil {
return errors.Wrap(err, "could not set next withdrawal builder index")
}
var nextValidatorIndex primitives.ValidatorIndex
if uint64(len(expected.Withdrawals)) < params.BeaconConfig().MaxWithdrawalsPerPayload {
nextValidatorIndex, err = st.NextWithdrawalValidatorIndex()
if err != nil {
return errors.Wrap(err, "could not get next withdrawal validator index")
}
nextValidatorIndex += primitives.ValidatorIndex(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
nextValidatorIndex = nextValidatorIndex % primitives.ValidatorIndex(st.NumValidators())
} else {
nextValidatorIndex = expected.Withdrawals[len(expected.Withdrawals)-1].ValidatorIndex + 1
if nextValidatorIndex == primitives.ValidatorIndex(st.NumValidators()) {
nextValidatorIndex = 0
}
}
if err := st.SetNextWithdrawalValidatorIndex(nextValidatorIndex); err != nil {
return errors.Wrap(err, "could not set next withdrawal validator index")
}
return nil
}

View File

@@ -0,0 +1,34 @@
package gloas
import (
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestProcessWithdrawals_ParentBlockNotFull(t *testing.T) {
state, err := state_native.InitializeFromProtoUnsafeGloas(&ethpb.BeaconStateGloas{})
require.NoError(t, err)
st := &withdrawalsState{BeaconState: state}
require.NoError(t, ProcessWithdrawals(st))
require.Equal(t, false, st.expectedCalled)
}
type withdrawalsState struct {
state.BeaconState
expectedCalled bool
decreaseCalled bool
}
func (w *withdrawalsState) IsParentBlockFull() (bool, error) {
return false, nil
}
func (w *withdrawalsState) ExpectedWithdrawalsGloas() (state.ExpectedWithdrawalsGloasResult, error) {
w.expectedCalled = true
return state.ExpectedWithdrawalsGloasResult{}, nil
}

View File

@@ -143,11 +143,10 @@ func ProcessSlot(ctx context.Context, state state.BeaconState) (state.BeaconStat
return nil, err
}
// <spec fn="process_slot" fork="gloas" lines="11-13" hash="62b28839">
// Spec v1.6.1 (pseudocode):
// # [New in Gloas:EIP7732]
// # Unset the next payload availability
// state.execution_payload_availability[(state.slot + 1) % SLOTS_PER_HISTORICAL_ROOT] = 0b0
// </spec>
if state.Version() >= version.Gloas {
index := uint64((state.Slot() + 1) % params.BeaconConfig().SlotsPerHistoricalRoot)
if err := state.UpdateExecutionPayloadAvailabilityAtIndex(index, 0x0); err != nil {

View File

@@ -78,7 +78,7 @@ func newGloasState(t *testing.T, slot primitives.Slot, availability []byte) stat
BlockHash: make([]byte, 32),
PrevRandao: make([]byte, 32),
FeeRecipient: make([]byte, 20),
BlobKzgCommitments: [][]byte{make([]byte, 48)},
BlobKzgCommitmentsRoot: make([]byte, 32),
},
Eth1Data: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),

View File

@@ -67,6 +67,7 @@ func getSubscriptionStatusFromDB(t *testing.T, db *Store) bool {
return subscribed
}
func TestUpdateCustodyInfo(t *testing.T) {
ctx := t.Context()

View File

@@ -6,6 +6,7 @@ go_library(
"doc.go",
"errors.go",
"forkchoice.go",
"last_root.go",
"log.go",
"metrics.go",
"node.go",
@@ -50,6 +51,7 @@ go_test(
srcs = [
"ffg_update_test.go",
"forkchoice_test.go",
"last_root_test.go",
"no_vote_test.go",
"node_test.go",
"on_tick_test.go",

View File

@@ -32,6 +32,7 @@ func New() *ForkChoice {
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
proposerBoostRoot: [32]byte{},
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
slashedIndices: make(map[primitives.ValidatorIndex]bool),
receivedBlocksLastEpoch: [fieldparams.SlotsPerEpoch]primitives.Slot{},
}

View File

@@ -0,0 +1,26 @@
package doublylinkedtree
import (
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/time/slots"
)
// LastRoot returns the last canonical block root in the given epoch
func (f *ForkChoice) LastRoot(epoch primitives.Epoch) [32]byte {
head := f.store.headNode
headEpoch := slots.ToEpoch(head.slot)
epochEnd, err := slots.EpochEnd(epoch)
if err != nil {
return [32]byte{}
}
if headEpoch <= epoch {
return head.root
}
for head != nil && head.slot > epochEnd {
head = head.parent
}
if head == nil {
return [32]byte{}
}
return head.root
}

View File

@@ -0,0 +1,38 @@
package doublylinkedtree
import (
"testing"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestLastRoot(t *testing.T) {
f := setup(0, 0)
ctx := t.Context()
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, [32]byte{'1'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'2'}, [32]byte{'1'}, [32]byte{'2'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 3, [32]byte{'3'}, [32]byte{'1'}, [32]byte{'3'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 32, [32]byte{'4'}, [32]byte{'3'}, [32]byte{'4'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 33, [32]byte{'5'}, [32]byte{'2'}, [32]byte{'5'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 34, [32]byte{'6'}, [32]byte{'5'}, [32]byte{'6'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
headNode := f.store.nodeByRoot[[32]byte{'6'}]
f.store.headNode = headNode
require.Equal(t, [32]byte{'6'}, f.store.headNode.root)
require.Equal(t, [32]byte{'2'}, f.LastRoot(0))
require.Equal(t, [32]byte{'6'}, f.LastRoot(1))
require.Equal(t, [32]byte{'6'}, f.LastRoot(2))
}

View File

@@ -94,5 +94,6 @@ func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node, invalidRo
s.previousProposerBoostScore = 0
}
delete(s.nodeByRoot, node.root)
delete(s.nodeByPayload, node.payloadHash)
return invalidRoots, nil
}

View File

@@ -113,6 +113,7 @@ func (s *Store) insert(ctx context.Context,
}
}
s.nodeByPayload[payloadHash] = n
s.nodeByRoot[root] = n
if parent == nil {
if s.treeRootNode == nil {
@@ -121,6 +122,7 @@ func (s *Store) insert(ctx context.Context,
s.highestReceivedNode = n
} else {
delete(s.nodeByRoot, root)
delete(s.nodeByPayload, payloadHash)
return nil, errInvalidParentRoot
}
} else {
@@ -189,6 +191,7 @@ func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalized
node.children = nil
delete(s.nodeByRoot, node.root)
delete(s.nodeByPayload, node.payloadHash)
return nil
}
@@ -270,6 +273,21 @@ func (f *ForkChoice) HighestReceivedBlockSlot() primitives.Slot {
return f.store.highestReceivedNode.slot
}
// HighestReceivedBlockDelay returns the number of slots that the highest
// received block was late when receiving it. For example, a block was late by 12 slots,
// then this method is expected to return 12.
func (f *ForkChoice) HighestReceivedBlockDelay() primitives.Slot {
n := f.store.highestReceivedNode
if n == nil {
return 0
}
sss, err := slots.SinceSlotStart(n.slot, f.store.genesisTime, n.timestamp)
if err != nil {
return 0
}
return primitives.Slot(uint64(sss/time.Second) / params.BeaconConfig().SecondsPerSlot)
}
// ReceivedBlocksLastEpoch returns the number of blocks received in the last epoch
func (f *ForkChoice) ReceivedBlocksLastEpoch() (uint64, error) {
count := uint64(0)

View File

@@ -128,9 +128,10 @@ func TestStore_Insert(t *testing.T) {
// The new node does not have a parent.
treeRootNode := &Node{slot: 0, root: indexToHash(0)}
nodeByRoot := map[[32]byte]*Node{indexToHash(0): treeRootNode}
nodeByPayload := map[[32]byte]*Node{indexToHash(0): treeRootNode}
jc := &forkchoicetypes.Checkpoint{Epoch: 0}
fc := &forkchoicetypes.Checkpoint{Epoch: 0}
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, nodeByPayload: nodeByPayload, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
payloadHash := [32]byte{'a'}
ctx := t.Context()
_, blk, err := prepareForkchoiceState(ctx, 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1)
@@ -237,6 +238,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
s.finalizedCheckpoint.Root = indexToHash(1)
require.NoError(t, s.prune(t.Context()))
require.Equal(t, len(s.nodeByRoot), 1)
require.Equal(t, len(s.nodeByPayload), 1)
}
// This test starts with the following branching diagram
@@ -317,6 +319,8 @@ func TestStore_PruneMapsNodes(t *testing.T) {
s.finalizedCheckpoint.Root = indexToHash(1)
require.NoError(t, s.prune(t.Context()))
require.Equal(t, len(s.nodeByRoot), 1)
require.Equal(t, len(s.nodeByPayload), 1)
}
func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
@@ -335,6 +339,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, uint64(1), count)
require.Equal(t, primitives.Slot(1), f.HighestReceivedBlockSlot())
require.Equal(t, primitives.Slot(0), f.HighestReceivedBlockDelay())
// 64
// Received block last epoch is 1
@@ -347,6 +352,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, uint64(1), count)
require.Equal(t, primitives.Slot(64), f.HighestReceivedBlockSlot())
require.Equal(t, primitives.Slot(0), f.HighestReceivedBlockDelay())
// 64 65
// Received block last epoch is 2
@@ -359,6 +365,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, uint64(2), count)
require.Equal(t, primitives.Slot(65), f.HighestReceivedBlockSlot())
require.Equal(t, primitives.Slot(1), f.HighestReceivedBlockDelay())
// 64 65 66
// Received block last epoch is 3
@@ -710,3 +717,17 @@ func TestStore_CleanupInserting(t *testing.T) {
require.NotNil(t, f.InsertNode(ctx, st, blk))
require.Equal(t, false, f.HasNode(blk.Root()))
}
func TestStore_HighestReceivedBlockDelay(t *testing.T) {
f := ForkChoice{
store: &Store{
genesisTime: time.Unix(0, 0),
highestReceivedNode: &Node{
slot: 10,
timestamp: time.Unix(int64(((10 + 12) * params.BeaconConfig().SecondsPerSlot)), 0), // 12 slots late
},
},
}
require.Equal(t, primitives.Slot(12), f.HighestReceivedBlockDelay())
}

View File

@@ -36,6 +36,7 @@ type Store struct {
treeRootNode *Node // the root node of the store tree.
headNode *Node // last head Node
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
nodeByPayload map[[fieldparams.RootLength]byte]*Node // nodes indexed by payload Hash
slashedIndices map[primitives.ValidatorIndex]bool // the list of equivocating validator indices
originRoot [fieldparams.RootLength]byte // The genesis block root
genesisTime time.Time

View File

@@ -67,11 +67,13 @@ type FastGetter interface {
HasNode([32]byte) bool
HighestReceivedBlockSlot() primitives.Slot
HighestReceivedBlockRoot() [32]byte
HighestReceivedBlockDelay() primitives.Slot
IsCanonical(root [32]byte) bool
IsOptimistic(root [32]byte) (bool, error)
IsViableForCheckpoint(*forkchoicetypes.Checkpoint) (bool, error)
JustifiedCheckpoint() *forkchoicetypes.Checkpoint
JustifiedPayloadBlockHash() [32]byte
LastRoot(primitives.Epoch) [32]byte
NodeCount() int
PreviousJustifiedCheckpoint() *forkchoicetypes.Checkpoint
ProposerBoost() [fieldparams.RootLength]byte

View File

@@ -121,6 +121,13 @@ func (ro *ROForkChoice) HighestReceivedBlockRoot() [32]byte {
return ro.getter.HighestReceivedBlockRoot()
}
// HighestReceivedBlockDelay delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) HighestReceivedBlockDelay() primitives.Slot {
ro.l.RLock()
defer ro.l.RUnlock()
return ro.getter.HighestReceivedBlockDelay()
}
// ReceivedBlocksLastEpoch delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) ReceivedBlocksLastEpoch() (uint64, error) {
ro.l.RLock()
@@ -156,6 +163,13 @@ func (ro *ROForkChoice) Slot(root [32]byte) (primitives.Slot, error) {
return ro.getter.Slot(root)
}
// LastRoot delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) LastRoot(e primitives.Epoch) [32]byte {
ro.l.RLock()
defer ro.l.RUnlock()
return ro.getter.LastRoot(e)
}
// DependentRoot delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) DependentRoot(epoch primitives.Epoch) ([32]byte, error) {
ro.l.RLock()

View File

@@ -30,6 +30,7 @@ const (
nodeCountCalled
highestReceivedBlockSlotCalled
highestReceivedBlockRootCalled
highestReceivedBlockDelayCalled
receivedBlocksLastEpochCalled
weightCalled
isOptimisticCalled
@@ -117,6 +118,11 @@ func TestROLocking(t *testing.T) {
call: highestReceivedBlockSlotCalled,
cb: func(g FastGetter) { g.HighestReceivedBlockSlot() },
},
{
name: "highestReceivedBlockDelayCalled",
call: highestReceivedBlockDelayCalled,
cb: func(g FastGetter) { g.HighestReceivedBlockDelay() },
},
{
name: "receivedBlocksLastEpochCalled",
call: receivedBlocksLastEpochCalled,
@@ -142,6 +148,11 @@ func TestROLocking(t *testing.T) {
call: slotCalled,
cb: func(g FastGetter) { _, err := g.Slot([32]byte{}); _discard(t, err) },
},
{
name: "lastRootCalled",
call: lastRootCalled,
cb: func(g FastGetter) { g.LastRoot(0) },
},
{
name: "targetRootForEpochCalled",
call: targetRootForEpochCalled,
@@ -254,6 +265,11 @@ func (ro *mockROForkchoice) HighestReceivedBlockRoot() [32]byte {
return [32]byte{}
}
func (ro *mockROForkchoice) HighestReceivedBlockDelay() primitives.Slot {
ro.calls = append(ro.calls, highestReceivedBlockDelayCalled)
return 0
}
func (ro *mockROForkchoice) ReceivedBlocksLastEpoch() (uint64, error) {
ro.calls = append(ro.calls, receivedBlocksLastEpochCalled)
return 0, nil
@@ -279,6 +295,11 @@ func (ro *mockROForkchoice) Slot(_ [32]byte) (primitives.Slot, error) {
return 0, nil
}
func (ro *mockROForkchoice) LastRoot(_ primitives.Epoch) [32]byte {
ro.calls = append(ro.calls, lastRootCalled)
return [32]byte{}
}
// DependentRoot impoements FastGetter.
func (ro *mockROForkchoice) DependentRoot(_ primitives.Epoch) ([32]byte, error) {
ro.calls = append(ro.calls, dependentRootCalled)

View File

@@ -134,20 +134,10 @@ type BeaconNode struct {
// New creates a new node instance, sets up configuration options, and registers
// every required service to the node.
func New(cliCtx *cli.Context, cancel context.CancelFunc, optFuncs []func(*cli.Context) ([]Option, error), opts ...Option) (*BeaconNode, error) {
func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*BeaconNode, error) {
if err := configureBeacon(cliCtx); err != nil {
return nil, errors.Wrap(err, "could not set beacon configuration options")
}
for _, of := range optFuncs {
ofo, err := of(cliCtx)
if err != nil {
return nil, err
}
if ofo != nil {
opts = append(opts, ofo...)
}
}
ctx := cliCtx.Context
beacon := &BeaconNode{

View File

@@ -59,7 +59,7 @@ func TestNodeClose_OK(t *testing.T) {
WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
}
node, err := New(ctx, cancel, nil, options...)
node, err := New(ctx, cancel, options...)
require.NoError(t, err)
node.Close()
@@ -87,7 +87,7 @@ func TestNodeStart_Ok(t *testing.T) {
WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
}
node, err := New(ctx, cancel, nil, options...)
node, err := New(ctx, cancel, options...)
require.NoError(t, err)
require.NotNil(t, node.lcStore)
node.services = &runtime.ServiceRegistry{}
@@ -116,7 +116,7 @@ func TestNodeStart_SyncChecker(t *testing.T) {
WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
}
node, err := New(ctx, cancel, nil, options...)
node, err := New(ctx, cancel, options...)
require.NoError(t, err)
go func() {
node.Start()
@@ -151,7 +151,7 @@ func TestClearDB(t *testing.T) {
WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
}
_, err = New(context, cancel, nil, options...)
_, err = New(context, cancel, options...)
require.NoError(t, err)
require.LogsContain(t, hook, "Removing database")
}

View File

@@ -26,8 +26,8 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits/mock"
p2pMock "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/crypto/bls"

View File

@@ -84,6 +84,7 @@ func TestGetSpec(t *testing.T) {
config.FuluForkVersion = []byte("FuluForkVersion")
config.FuluForkEpoch = 109
config.GloasForkEpoch = 110
config.MaxBuildersPerWithdrawalsSweep = 112
config.BLSWithdrawalPrefixByte = byte('b')
config.ETH1AddressWithdrawalPrefixByte = byte('c')
config.GenesisDelay = 24
@@ -220,7 +221,7 @@ func TestGetSpec(t *testing.T) {
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
data, ok := resp.Data.(map[string]any)
require.Equal(t, true, ok)
assert.Equal(t, 192, len(data))
assert.Equal(t, 193, len(data))
for k, v := range data {
t.Run(k, func(t *testing.T) {
switch k {
@@ -302,6 +303,8 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "109", v)
case "GLOAS_FORK_EPOCH":
assert.Equal(t, "110", v)
case "MAX_BUILDERS_PER_WITHDRAWALS_SWEEP":
assert.Equal(t, "112", v)
case "MIN_ANCHOR_POW_BLOCK_DIFFICULTY":
assert.Equal(t, "1000", v)
case "BLS_WITHDRAWAL_PREFIX":

View File

@@ -3,6 +3,7 @@ package state
import (
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
)
@@ -13,6 +14,10 @@ type writeOnlyGloasFields interface {
RotateBuilderPendingPayments() error
AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal) error
UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val byte) error
SetPayloadExpectedWithdrawals(withdrawals []*enginev1.Withdrawal) error
DecreaseWithdrawalBalances(withdrawals []*enginev1.Withdrawal) error
DequeueBuilderPendingWithdrawals(num uint64) error
SetNextWithdrawalBuilderIndex(idx primitives.BuilderIndex) error
}
type readOnlyGloasFields interface {
@@ -21,4 +26,15 @@ type readOnlyGloasFields interface {
CanBuilderCoverBid(primitives.BuilderIndex, primitives.Gwei) (bool, error)
LatestBlockHash() ([32]byte, error)
BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error)
IsParentBlockFull() (bool, error)
ExpectedWithdrawalsGloas() (ExpectedWithdrawalsGloasResult, error)
}
// ExpectedWithdrawalsGloasResult bundles the expected withdrawals and related counters
// for the Gloas fork to avoid positional return mistakes.
type ExpectedWithdrawalsGloasResult struct {
Withdrawals []*enginev1.Withdrawal
ProcessedBuilderWithdrawalsCount uint64
ProcessedPartialWithdrawalsCount uint64
NextWithdrawalBuilderIndex primitives.BuilderIndex
}

View File

@@ -1,13 +1,17 @@
package state_native
import (
"bytes"
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
)
// LatestBlockHash returns the hash of the latest execution block.
@@ -46,20 +50,14 @@ func (b *BeaconState) BuilderPubkey(builderIndex primitives.BuilderIndex) ([fiel
}
// IsActiveBuilder returns true if the builder placement is finalized and it has not initiated exit.
// Spec v1.7.0-alpha.0 (pseudocode):
// def is_active_builder(state: BeaconState, builder_index: BuilderIndex) -> bool:
//
// <spec fn="is_active_builder" fork="gloas" hash="1a599fb2">
// def is_active_builder(state: BeaconState, builder_index: BuilderIndex) -> bool:
// """
// Check if the builder at ``builder_index`` is active for the given ``state``.
// """
// builder = state.builders[builder_index]
// return (
// # Placement in builder list is finalized
// builder.deposit_epoch < state.finalized_checkpoint.epoch
// # Has not initiated exit
// and builder.withdrawable_epoch == FAR_FUTURE_EPOCH
// )
// </spec>
// builder = state.builders[builder_index]
// return (
// builder.deposit_epoch < state.finalized_checkpoint.epoch
// and builder.withdrawable_epoch == FAR_FUTURE_EPOCH
// )
func (b *BeaconState) IsActiveBuilder(builderIndex primitives.BuilderIndex) (bool, error) {
if b.version < version.Gloas {
return false, errNotSupported("IsActiveBuilder", b.version)
@@ -78,18 +76,15 @@ func (b *BeaconState) IsActiveBuilder(builderIndex primitives.BuilderIndex) (boo
}
// CanBuilderCoverBid returns true if the builder has enough balance to cover the given bid amount.
// Spec v1.7.0-alpha.0 (pseudocode):
// def can_builder_cover_bid(state: BeaconState, builder_index: BuilderIndex, bid_amount: Gwei) -> bool:
//
// <spec fn="can_builder_cover_bid" fork="gloas" hash="9e3f2d7c">
// def can_builder_cover_bid(
// state: BeaconState, builder_index: BuilderIndex, bid_amount: Gwei
// ) -> bool:
// builder_balance = state.builders[builder_index].balance
// pending_withdrawals_amount = get_pending_balance_to_withdraw_for_builder(state, builder_index)
// min_balance = MIN_DEPOSIT_AMOUNT + pending_withdrawals_amount
// if builder_balance < min_balance:
// return False
// return builder_balance - min_balance >= bid_amount
// </spec>
// builder_balance = state.builders[builder_index].balance
// pending_withdrawals_amount = get_pending_balance_to_withdraw_for_builder(state, builder_index)
// min_balance = MIN_DEPOSIT_AMOUNT + pending_withdrawals_amount
// if builder_balance < min_balance:
// return False
// return builder_balance - min_balance >= bid_amount
func (b *BeaconState) CanBuilderCoverBid(builderIndex primitives.BuilderIndex, bidAmount primitives.Gwei) (bool, error) {
if b.version < version.Gloas {
return false, errNotSupported("CanBuilderCoverBid", b.version)
@@ -156,3 +151,220 @@ func (b *BeaconState) BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment,
return b.builderPendingPaymentsVal(), nil
}
// IsParentBlockFull returns true if the last committed payload bid was fulfilled with a payload,
// which can only happen when both beacon block and payload were present.
// This function must be called on a beacon state before processing the execution payload bid in the block.
// Spec v1.7.0-alpha.2 (pseudocode):
// def is_parent_block_full(state: BeaconState) -> bool:
//
// return state.latest_execution_payload_bid.block_hash == state.latest_block_hash
func (b *BeaconState) IsParentBlockFull() (bool, error) {
if b.version < version.Gloas {
return false, errNotSupported("IsParentBlockFull", b.version)
}
b.lock.RLock()
defer b.lock.RUnlock()
if b.latestExecutionPayloadBid == nil {
return false, nil
}
return bytes.Equal(b.latestExecutionPayloadBid.BlockHash, b.latestBlockHash), nil
}
// ExpectedWithdrawalsGloas returns the withdrawals that a proposer will need to pack in the next block
// applied to the current state. It is also used by validators to check that the execution payload carried
// the right number of withdrawals.
//
// Spec v1.7.0-alpha.1:
//
// def get_expected_withdrawals(state: BeaconState) -> ExpectedWithdrawals:
// withdrawal_index = state.next_withdrawal_index
// withdrawals: List[Withdrawal] = []
//
// # [New in Gloas:EIP7732]
// # Get builder withdrawals
// builder_withdrawals, withdrawal_index, processed_builder_withdrawals_count = (
// get_builder_withdrawals(state, withdrawal_index, withdrawals)
// )
// withdrawals.extend(builder_withdrawals)
//
// # Get partial withdrawals
// partial_withdrawals, withdrawal_index, processed_partial_withdrawals_count = (
// get_pending_partial_withdrawals(state, withdrawal_index, withdrawals)
// )
// withdrawals.extend(partial_withdrawals)
//
// # [New in Gloas:EIP7732]
// # Get builders sweep withdrawals
// builders_sweep_withdrawals, withdrawal_index, processed_builders_sweep_count = (
// get_builders_sweep_withdrawals(state, withdrawal_index, withdrawals)
// )
// withdrawals.extend(builders_sweep_withdrawals)
//
// # Get validators sweep withdrawals
// validators_sweep_withdrawals, withdrawal_index, processed_validators_sweep_count = (
// get_validators_sweep_withdrawals(state, withdrawal_index, withdrawals)
// )
// withdrawals.extend(validators_sweep_withdrawals)
//
// return ExpectedWithdrawals(
// withdrawals,
// # [New in Gloas:EIP7732]
// processed_builder_withdrawals_count,
// processed_partial_withdrawals_count,
// # [New in Gloas:EIP7732]
// processed_builders_sweep_count,
// processed_validators_sweep_count,
// )
func (b *BeaconState) ExpectedWithdrawalsGloas() (state.ExpectedWithdrawalsGloasResult, error) {
if b.version < version.Gloas {
return state.ExpectedWithdrawalsGloasResult{}, errNotSupported("ExpectedWithdrawalsGloas", b.version)
}
b.lock.RLock()
defer b.lock.RUnlock()
cfg := params.BeaconConfig()
withdrawals := make([]*enginev1.Withdrawal, 0, cfg.MaxWithdrawalsPerPayload)
withdrawalIndex := b.nextWithdrawalIndex
withdrawalIndex, processedBuilderWithdrawalsCount, err := b.appendBuilderWithdrawals(withdrawalIndex, &withdrawals)
if err != nil {
return state.ExpectedWithdrawalsGloasResult{}, err
}
withdrawalIndex, processedPartialWithdrawalsCount, err := b.appendPendingPartialWithdrawals(withdrawalIndex, &withdrawals)
if err != nil {
return state.ExpectedWithdrawalsGloasResult{}, err
}
withdrawalIndex, nextBuilderIndex, err := b.appendBuildersSweepWithdrawals(withdrawalIndex, &withdrawals)
if err != nil {
return state.ExpectedWithdrawalsGloasResult{}, err
}
err = b.appendValidatorsSweepWithdrawals(withdrawalIndex, &withdrawals)
if err != nil {
return state.ExpectedWithdrawalsGloasResult{}, err
}
return state.ExpectedWithdrawalsGloasResult{
Withdrawals: withdrawals,
ProcessedBuilderWithdrawalsCount: processedBuilderWithdrawalsCount,
ProcessedPartialWithdrawalsCount: processedPartialWithdrawalsCount,
NextWithdrawalBuilderIndex: nextBuilderIndex,
}, nil
}
// appendBuilderWithdrawals returns builder pending withdrawals, the updated withdrawal index,
// and the processed count, following spec v1.7.0-alpha.2:
//
// def get_builder_withdrawals(state, withdrawal_index, prior_withdrawals):
// withdrawals_limit = MAX_WITHDRAWALS_PER_PAYLOAD - 1
// assert len(prior_withdrawals) <= withdrawals_limit
// processed_count = 0
// withdrawals = []
// for withdrawal in state.builder_pending_withdrawals:
// if len(prior_withdrawals + withdrawals) >= withdrawals_limit:
// break
// withdrawals.append(Withdrawal(
// index=withdrawal_index,
// validator_index=convert_builder_index_to_validator_index(withdrawal.builder_index),
// address=withdrawal.fee_recipient,
// amount=withdrawal.amount,
// ))
// withdrawal_index += 1
// processed_count += 1
// return withdrawals, withdrawal_index, processed_count
func (b *BeaconState) appendBuilderWithdrawals(withdrawalIndex uint64, withdrawals *[]*enginev1.Withdrawal) (uint64, uint64, error) {
cfg := params.BeaconConfig()
withdrawalsLimit := int(cfg.MaxWithdrawalsPerPayload - 1)
ws := *withdrawals
if len(ws) > withdrawalsLimit {
return withdrawalIndex, 0, fmt.Errorf("prior withdrawals length %d exceeds limit %d", len(ws), withdrawalsLimit)
}
var processedCount uint64
for _, w := range b.builderPendingWithdrawals {
if len(ws) >= withdrawalsLimit {
break
}
ws = append(ws, &enginev1.Withdrawal{
Index: withdrawalIndex,
ValidatorIndex: w.BuilderIndex.ToValidatorIndex(),
Address: w.FeeRecipient,
Amount: uint64(w.Amount),
})
withdrawalIndex++
processedCount++
}
*withdrawals = ws
return withdrawalIndex, processedCount, nil
}
// appendBuildersSweepWithdrawals returns builder sweep withdrawals, the updated withdrawal index,
// and the processed count, following spec v1.7.0-alpha.2:
//
// def get_builders_sweep_withdrawals(state, withdrawal_index, prior_withdrawals):
// epoch = get_current_epoch(state)
// builders_limit = min(len(state.builders), MAX_BUILDERS_PER_WITHDRAWALS_SWEEP)
// withdrawals_limit = MAX_WITHDRAWALS_PER_PAYLOAD - 1
// assert len(prior_withdrawals) <= withdrawals_limit
// processed_count = 0
// withdrawals = []
// builder_index = state.next_withdrawal_builder_index
// for _ in range(builders_limit):
// if len(prior_withdrawals + withdrawals) >= withdrawals_limit:
// break
// builder = state.builders[builder_index]
// if builder.withdrawable_epoch <= epoch and builder.balance > 0:
// withdrawals.append(Withdrawal(
// index=withdrawal_index,
// validator_index=convert_builder_index_to_validator_index(builder_index),
// address=builder.execution_address,
// amount=builder.balance,
// ))
// withdrawal_index += 1
// builder_index = BuilderIndex((builder_index + 1) % len(state.builders))
// processed_count += 1
// return withdrawals, withdrawal_index, processed_count
func (b *BeaconState) appendBuildersSweepWithdrawals(withdrawalIndex uint64, withdrawals *[]*enginev1.Withdrawal) (uint64, primitives.BuilderIndex, error) {
cfg := params.BeaconConfig()
withdrawalsLimit := int(cfg.MaxWithdrawalsPerPayload - 1)
if len(*withdrawals) > withdrawalsLimit {
return withdrawalIndex, 0, fmt.Errorf("prior withdrawals length %d exceeds limit %d", len(*withdrawals), withdrawalsLimit)
}
ws := *withdrawals
buildersLimit := min(len(b.builders), int(cfg.MaxBuildersPerWithdrawalsSweep))
builderIndex := b.nextWithdrawalBuilderIndex
epoch := slots.ToEpoch(b.slot)
for range buildersLimit {
if len(ws) >= withdrawalsLimit {
break
}
builder := b.builders[builderIndex]
if builder != nil && builder.WithdrawableEpoch <= epoch && builder.Balance > 0 {
ws = append(ws, &enginev1.Withdrawal{
Index: withdrawalIndex,
ValidatorIndex: builderIndex.ToValidatorIndex(),
Address: builder.ExecutionAddress,
Amount: uint64(builder.Balance),
})
withdrawalIndex++
}
builderIndex = primitives.BuilderIndex((uint64(builderIndex) + 1) % uint64(len(b.builders)))
}
*withdrawals = ws
return withdrawalIndex, builderIndex, nil
}

View File

@@ -1,26 +1,27 @@
package state_native_test
package state_native
import (
"bytes"
"testing"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
"github.com/OffchainLabs/prysm/v7/time/slots"
)
func TestLatestBlockHash(t *testing.T) {
t.Run("returns error before gloas", func(t *testing.T) {
st, _ := util.DeterministicGenesisState(t, 1)
st := &BeaconState{version: version.Fulu}
_, err := st.LatestBlockHash()
require.ErrorContains(t, "is not supported", err)
})
t.Run("returns zero hash when unset", func(t *testing.T) {
st, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{})
st, err := InitializeFromProtoGloas(&ethpb.BeaconStateGloas{})
require.NoError(t, err)
got, err := st.LatestBlockHash()
@@ -33,7 +34,7 @@ func TestLatestBlockHash(t *testing.T) {
var want [32]byte
copy(want[:], hashBytes)
st, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
st, err := InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
LatestBlockHash: hashBytes,
})
require.NoError(t, err)
@@ -46,17 +47,14 @@ func TestLatestBlockHash(t *testing.T) {
func TestBuilderPubkey(t *testing.T) {
t.Run("returns error before gloas", func(t *testing.T) {
stIface, _ := util.DeterministicGenesisState(t, 1)
native, ok := stIface.(*state_native.BeaconState)
require.Equal(t, true, ok)
_, err := native.BuilderPubkey(0)
st := &BeaconState{version: version.Fulu}
_, err := st.BuilderPubkey(0)
require.ErrorContains(t, "is not supported", err)
})
t.Run("returns pubkey copy", func(t *testing.T) {
pubkey := bytes.Repeat([]byte{0xAA}, 48)
stIface, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
stIface, err := InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
Builders: []*ethpb.Builder{
{
Pubkey: pubkey,
@@ -80,12 +78,12 @@ func TestBuilderPubkey(t *testing.T) {
})
t.Run("out of range returns error", func(t *testing.T) {
stIface, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
stIface, err := InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
Builders: []*ethpb.Builder{},
})
require.NoError(t, err)
st := stIface.(*state_native.BeaconState)
st := stIface.(*BeaconState)
_, err = st.BuilderPubkey(1)
require.ErrorContains(t, "out of range", err)
})
@@ -93,7 +91,7 @@ func TestBuilderPubkey(t *testing.T) {
func TestBuilderHelpers(t *testing.T) {
t.Run("is active builder", func(t *testing.T) {
st, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
st, err := InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
Builders: []*ethpb.Builder{
{
Balance: 10,
@@ -120,7 +118,7 @@ func TestBuilderHelpers(t *testing.T) {
},
FinalizedCheckpoint: &ethpb.Checkpoint{Epoch: 2},
}
stInactive, err := state_native.InitializeFromProtoGloas(stProto)
stInactive, err := InitializeFromProtoGloas(stProto)
require.NoError(t, err)
active, err = stInactive.IsActiveBuilder(0)
@@ -129,7 +127,7 @@ func TestBuilderHelpers(t *testing.T) {
})
t.Run("can builder cover bid", func(t *testing.T) {
stIface, err := state_native.InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
stIface, err := InitializeFromProtoGloas(&ethpb.BeaconStateGloas{
Builders: []*ethpb.Builder{
{
Balance: primitives.Gwei(params.BeaconConfig().MinDepositAmount + 50),
@@ -147,7 +145,7 @@ func TestBuilderHelpers(t *testing.T) {
})
require.NoError(t, err)
st := stIface.(*state_native.BeaconState)
st := stIface.(*BeaconState)
ok, err := st.CanBuilderCoverBid(0, 20)
require.NoError(t, err)
require.Equal(t, true, ok)
@@ -159,10 +157,245 @@ func TestBuilderHelpers(t *testing.T) {
}
func TestBuilderPendingPayments_UnsupportedVersion(t *testing.T) {
stIface, err := state_native.InitializeFromProtoElectra(&ethpb.BeaconStateElectra{})
stIface, err := InitializeFromProtoElectra(&ethpb.BeaconStateElectra{})
require.NoError(t, err)
st := stIface.(*state_native.BeaconState)
st := stIface.(*BeaconState)
_, err = st.BuilderPendingPayments()
require.ErrorContains(t, "BuilderPendingPayments", err)
}
func TestIsParentBlockFull(t *testing.T) {
t.Run("returns error before gloas", func(t *testing.T) {
st := &BeaconState{version: version.Fulu}
_, err := st.IsParentBlockFull()
require.ErrorContains(t, "is not supported", err)
})
t.Run("returns false when bid is nil", func(t *testing.T) {
st := &BeaconState{version: version.Gloas}
got, err := st.IsParentBlockFull()
require.NoError(t, err)
require.Equal(t, false, got)
})
t.Run("returns true when hashes match", func(t *testing.T) {
hash := bytes.Repeat([]byte{0xAB}, 32)
st := &BeaconState{
version: version.Gloas,
latestExecutionPayloadBid: &ethpb.ExecutionPayloadBid{
BlockHash: hash,
},
latestBlockHash: hash,
}
got, err := st.IsParentBlockFull()
require.NoError(t, err)
require.Equal(t, true, got)
})
t.Run("returns false when hashes differ", func(t *testing.T) {
hash := bytes.Repeat([]byte{0xAB}, 32)
other := bytes.Repeat([]byte{0xCD}, 32)
st := &BeaconState{
version: version.Gloas,
latestExecutionPayloadBid: &ethpb.ExecutionPayloadBid{
BlockHash: hash,
},
latestBlockHash: other,
}
got, err := st.IsParentBlockFull()
require.NoError(t, err)
require.Equal(t, false, got)
})
}
func TestAppendBuilderWithdrawals(t *testing.T) {
t.Run("errors when prior withdrawals exceed limit", func(t *testing.T) {
st := &BeaconState{}
limit := params.BeaconConfig().MaxWithdrawalsPerPayload - 1
withdrawals := make([]*enginev1.Withdrawal, limit+1)
nextIndex, processed, err := st.appendBuilderWithdrawals(5, &withdrawals)
require.ErrorContains(t, "exceeds limit", err)
require.Equal(t, uint64(5), nextIndex)
require.Equal(t, uint64(0), processed)
require.Equal(t, int(limit+1), len(withdrawals))
})
t.Run("appends builder withdrawals and increments index", func(t *testing.T) {
st := &BeaconState{
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{
{BuilderIndex: 1, FeeRecipient: []byte{0x01}, Amount: 11},
{BuilderIndex: 2, FeeRecipient: []byte{0x02}, Amount: 22},
{BuilderIndex: 3, FeeRecipient: []byte{0x03}, Amount: 33},
},
}
withdrawals := []*enginev1.Withdrawal{
{Index: 7, ValidatorIndex: 9, Address: []byte{0xAA}, Amount: 99},
}
nextIndex, processed, err := st.appendBuilderWithdrawals(10, &withdrawals)
require.NoError(t, err)
require.Equal(t, uint64(13), nextIndex)
require.Equal(t, uint64(3), processed)
require.Equal(t, 4, len(withdrawals))
require.DeepEqual(t, &enginev1.Withdrawal{
Index: 10,
ValidatorIndex: primitives.BuilderIndex(1).ToValidatorIndex(),
Address: []byte{0x01},
Amount: 11,
}, withdrawals[1])
require.DeepEqual(t, &enginev1.Withdrawal{
Index: 11,
ValidatorIndex: primitives.BuilderIndex(2).ToValidatorIndex(),
Address: []byte{0x02},
Amount: 22,
}, withdrawals[2])
require.DeepEqual(t, &enginev1.Withdrawal{
Index: 12,
ValidatorIndex: primitives.BuilderIndex(3).ToValidatorIndex(),
Address: []byte{0x03},
Amount: 33,
}, withdrawals[3])
})
t.Run("respects per-payload limit", func(t *testing.T) {
limit := params.BeaconConfig().MaxWithdrawalsPerPayload - 1
st := &BeaconState{
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{
{BuilderIndex: 4, FeeRecipient: []byte{0x04}, Amount: 44},
{BuilderIndex: 5, FeeRecipient: []byte{0x05}, Amount: 55},
},
}
withdrawals := make([]*enginev1.Withdrawal, limit-1)
nextIndex, processed, err := st.appendBuilderWithdrawals(20, &withdrawals)
require.NoError(t, err)
require.Equal(t, uint64(21), nextIndex)
require.Equal(t, uint64(1), processed)
require.Equal(t, int(limit), len(withdrawals))
require.DeepEqual(t, &enginev1.Withdrawal{
Index: 20,
ValidatorIndex: primitives.BuilderIndex(4).ToValidatorIndex(),
Address: []byte{0x04},
Amount: 44,
}, withdrawals[len(withdrawals)-1])
})
t.Run("does not append when already at limit", func(t *testing.T) {
limit := params.BeaconConfig().MaxWithdrawalsPerPayload - 1
if limit == 0 {
t.Skip("withdrawals limit too small")
}
st := &BeaconState{
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{
{BuilderIndex: 6, FeeRecipient: []byte{0x06}, Amount: 66},
},
}
withdrawals := make([]*enginev1.Withdrawal, limit)
nextIndex, processed, err := st.appendBuilderWithdrawals(30, &withdrawals)
require.NoError(t, err)
require.Equal(t, uint64(30), nextIndex)
require.Equal(t, uint64(0), processed)
require.Equal(t, int(limit), len(withdrawals))
})
}
func TestAppendBuildersSweepWithdrawals(t *testing.T) {
t.Run("errors when prior withdrawals exceed limit", func(t *testing.T) {
st := &BeaconState{}
limit := params.BeaconConfig().MaxWithdrawalsPerPayload - 1
withdrawals := make([]*enginev1.Withdrawal, limit+1)
nextIndex, nextBuilderIndex, err := st.appendBuildersSweepWithdrawals(5, &withdrawals)
require.ErrorContains(t, "exceeds limit", err)
require.Equal(t, uint64(5), nextIndex)
require.Equal(t, primitives.BuilderIndex(0), nextBuilderIndex)
require.Equal(t, int(limit+1), len(withdrawals))
})
t.Run("appends eligible builders, skips ineligible", func(t *testing.T) {
epoch := primitives.Epoch(3)
st := &BeaconState{
slot: slots.UnsafeEpochStart(epoch),
nextWithdrawalBuilderIndex: 2,
builders: []*ethpb.Builder{
{ExecutionAddress: []byte{0x01}, Balance: 0, WithdrawableEpoch: epoch},
{ExecutionAddress: []byte{0x02}, Balance: 10, WithdrawableEpoch: epoch + 1},
{ExecutionAddress: []byte{0x03}, Balance: 20, WithdrawableEpoch: epoch},
},
}
withdrawals := []*enginev1.Withdrawal{}
nextIndex, nextBuilderIndex, err := st.appendBuildersSweepWithdrawals(100, &withdrawals)
require.NoError(t, err)
require.Equal(t, uint64(101), nextIndex)
require.Equal(t, primitives.BuilderIndex(2), nextBuilderIndex)
require.Equal(t, 1, len(withdrawals))
require.DeepEqual(t, &enginev1.Withdrawal{
Index: 100,
ValidatorIndex: primitives.BuilderIndex(2).ToValidatorIndex(),
Address: []byte{0x03},
Amount: 20,
}, withdrawals[0])
})
t.Run("respects max builders per sweep", func(t *testing.T) {
cfg := params.BeaconConfig()
max := int(cfg.MaxBuildersPerWithdrawalsSweep)
epoch := primitives.Epoch(1)
builders := make([]*ethpb.Builder, max+2)
for i := range builders {
builders[i] = &ethpb.Builder{
ExecutionAddress: []byte{byte(i + 1)},
Balance: 1,
WithdrawableEpoch: epoch,
}
}
start := len(builders) - 1
st := &BeaconState{
slot: slots.UnsafeEpochStart(epoch),
nextWithdrawalBuilderIndex: primitives.BuilderIndex(start),
builders: builders,
}
withdrawals := []*enginev1.Withdrawal{}
nextIndex, nextBuilderIndex, err := st.appendBuildersSweepWithdrawals(7, &withdrawals)
require.NoError(t, err)
limit := int(cfg.MaxWithdrawalsPerPayload - 1)
expectedCount := min(max, limit)
require.Equal(t, uint64(7)+uint64(expectedCount), nextIndex)
require.Equal(t, expectedCount, len(withdrawals))
expectedNext := primitives.BuilderIndex((uint64(start) + uint64(expectedCount)) % uint64(len(builders)))
require.Equal(t, expectedNext, nextBuilderIndex)
})
t.Run("stops when payload limit reached", func(t *testing.T) {
cfg := params.BeaconConfig()
limit := cfg.MaxWithdrawalsPerPayload - 1
if limit < 1 {
t.Skip("withdrawals limit too small")
}
epoch := primitives.Epoch(2)
builders := []*ethpb.Builder{
{ExecutionAddress: []byte{0x0A}, Balance: 3, WithdrawableEpoch: epoch},
{ExecutionAddress: []byte{0x0B}, Balance: 4, WithdrawableEpoch: epoch},
}
st := &BeaconState{
slot: slots.UnsafeEpochStart(epoch),
nextWithdrawalBuilderIndex: 0,
builders: builders,
}
withdrawals := make([]*enginev1.Withdrawal, limit)
nextIndex, nextBuilderIndex, err := st.appendBuildersSweepWithdrawals(20, &withdrawals)
require.NoError(t, err)
require.Equal(t, uint64(20), nextIndex)
require.Equal(t, int(limit), len(withdrawals))
require.Equal(t, primitives.BuilderIndex(0), nextBuilderIndex)
})
}

View File

@@ -133,11 +133,22 @@ func (b *BeaconState) appendPendingPartialWithdrawals(withdrawalIndex uint64, wi
return withdrawalIndex, 0, nil
}
cfg := params.BeaconConfig()
withdrawalsLimit := min(
len(*withdrawals)+int(cfg.MaxPendingPartialsPerWithdrawalsSweep),
int(cfg.MaxWithdrawalsPerPayload-1),
)
if len(*withdrawals) > withdrawalsLimit {
return withdrawalIndex, 0, fmt.Errorf("prior withdrawals length %d exceeds limit %d", len(*withdrawals), withdrawalsLimit)
}
ws := *withdrawals
epoch := slots.ToEpoch(b.slot)
var processedPartialWithdrawalsCount uint64
for _, w := range b.pendingPartialWithdrawals {
if w.WithdrawableEpoch > epoch || len(ws) >= int(params.BeaconConfig().MaxPendingPartialsPerWithdrawalsSweep) {
isWithdrawable := w.WithdrawableEpoch <= epoch
hasReachedLimit := len(ws) >= withdrawalsLimit
if !isWithdrawable || hasReachedLimit {
break
}
@@ -149,7 +160,7 @@ func (b *BeaconState) appendPendingPartialWithdrawals(withdrawalIndex uint64, wi
if err != nil {
return withdrawalIndex, 0, fmt.Errorf("could not retrieve balance at index %d: %w", w.Index, err)
}
hasSufficientEffectiveBalance := v.EffectiveBalance() >= params.BeaconConfig().MinActivationBalance
hasSufficientEffectiveBalance := v.EffectiveBalance() >= cfg.MinActivationBalance
var totalWithdrawn uint64
for _, wi := range ws {
if wi.ValidatorIndex == w.Index {
@@ -160,9 +171,9 @@ func (b *BeaconState) appendPendingPartialWithdrawals(withdrawalIndex uint64, wi
if err != nil {
return withdrawalIndex, 0, errors.Wrapf(err, "failed to subtract balance %d with total withdrawn %d", vBal, totalWithdrawn)
}
hasExcessBalance := balance > params.BeaconConfig().MinActivationBalance
if v.ExitEpoch() == params.BeaconConfig().FarFutureEpoch && hasSufficientEffectiveBalance && hasExcessBalance {
amount := min(balance-params.BeaconConfig().MinActivationBalance, w.Amount)
hasExcessBalance := balance > cfg.MinActivationBalance
if v.ExitEpoch() == cfg.FarFutureEpoch && hasSufficientEffectiveBalance && hasExcessBalance {
amount := min(balance-cfg.MinActivationBalance, w.Amount)
ws = append(ws, &enginev1.Withdrawal{
Index: withdrawalIndex,
ValidatorIndex: w.Index,
@@ -183,7 +194,7 @@ func (b *BeaconState) appendValidatorsSweepWithdrawals(withdrawalIndex uint64, w
validatorIndex := b.nextWithdrawalValidatorIndex
validatorsLen := b.validatorsLen()
epoch := slots.ToEpoch(b.slot)
bound := min(uint64(validatorsLen), params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
bound := min(validatorsLen, int(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep))
for range bound {
val, err := b.validatorAtIndexReadOnly(validatorIndex)
if err != nil {
@@ -222,7 +233,7 @@ func (b *BeaconState) appendValidatorsSweepWithdrawals(withdrawalIndex uint64, w
})
withdrawalIndex++
}
if uint64(len(ws)) == params.BeaconConfig().MaxWithdrawalsPerPayload {
if len(ws) == int(params.BeaconConfig().MaxWithdrawalsPerPayload) {
break
}
validatorIndex += 1

View File

@@ -1,6 +1,7 @@
package state_native
import (
"errors"
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
@@ -8,8 +9,10 @@ import (
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
pkgerrors "github.com/pkg/errors"
)
// RotateBuilderPendingPayments rotates the queue by dropping slots per epoch payments from the
@@ -82,20 +85,20 @@ func (b *BeaconState) SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid)
parentBlockRoot := h.ParentBlockRoot()
blockHash := h.BlockHash()
randao := h.PrevRandao()
blobKzgCommitments := h.BlobKzgCommitments()
blobKzgCommitmentsRoot := h.BlobKzgCommitmentsRoot()
feeRecipient := h.FeeRecipient()
b.latestExecutionPayloadBid = &ethpb.ExecutionPayloadBid{
ParentBlockHash: parentBlockHash[:],
ParentBlockRoot: parentBlockRoot[:],
BlockHash: blockHash[:],
PrevRandao: randao[:],
GasLimit: h.GasLimit(),
BuilderIndex: h.BuilderIndex(),
Slot: h.Slot(),
Value: h.Value(),
ExecutionPayment: h.ExecutionPayment(),
BlobKzgCommitments: blobKzgCommitments,
FeeRecipient: feeRecipient[:],
ParentBlockHash: parentBlockHash[:],
ParentBlockRoot: parentBlockRoot[:],
BlockHash: blockHash[:],
PrevRandao: randao[:],
GasLimit: h.GasLimit(),
BuilderIndex: h.BuilderIndex(),
Slot: h.Slot(),
Value: h.Value(),
ExecutionPayment: h.ExecutionPayment(),
BlobKzgCommitmentsRoot: blobKzgCommitmentsRoot[:],
FeeRecipient: feeRecipient[:],
}
b.markFieldAsDirty(types.LatestExecutionPayloadBid)
@@ -161,3 +164,154 @@ func (b *BeaconState) UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val
b.markFieldAsDirty(types.ExecutionPayloadAvailability)
return nil
}
// SetPayloadExpectedWithdrawals stores the expected withdrawals for the next payload.
func (b *BeaconState) SetPayloadExpectedWithdrawals(withdrawals []*enginev1.Withdrawal) error {
if b.version < version.Gloas {
return errNotSupported("SetPayloadExpectedWithdrawals", b.version)
}
b.lock.Lock()
defer b.lock.Unlock()
b.payloadExpectedWithdrawals = withdrawals
b.markFieldAsDirty(types.PayloadExpectedWithdrawals)
return nil
}
// DequeueBuilderPendingWithdrawals removes processed builder withdrawals from the front of the queue.
func (b *BeaconState) DequeueBuilderPendingWithdrawals(n uint64) error {
if b.version < version.Gloas {
return errNotSupported("DequeueBuilderPendingWithdrawals", b.version)
}
if n == 0 {
return nil
}
b.lock.Lock()
defer b.lock.Unlock()
if n > uint64(len(b.builderPendingWithdrawals)) {
return errors.New("cannot dequeue more builder withdrawals than are in the queue")
}
if b.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs() > 1 {
withdrawals := make([]*ethpb.BuilderPendingWithdrawal, len(b.builderPendingWithdrawals))
copy(withdrawals, b.builderPendingWithdrawals)
b.builderPendingWithdrawals = withdrawals
b.sharedFieldReferences[types.BuilderPendingWithdrawals].MinusRef()
b.sharedFieldReferences[types.BuilderPendingWithdrawals] = stateutil.NewRef(1)
}
b.builderPendingWithdrawals = b.builderPendingWithdrawals[n:]
b.markFieldAsDirty(types.BuilderPendingWithdrawals)
b.rebuildTrie[types.BuilderPendingWithdrawals] = true
return nil
}
// SetNextWithdrawalBuilderIndex sets the next builder index for the withdrawals sweep.
func (b *BeaconState) SetNextWithdrawalBuilderIndex(index primitives.BuilderIndex) error {
if b.version < version.Gloas {
return errNotSupported("SetNextWithdrawalBuilderIndex", b.version)
}
b.lock.Lock()
defer b.lock.Unlock()
b.nextWithdrawalBuilderIndex = index
b.markFieldAsDirty(types.NextWithdrawalBuilderIndex)
return nil
}
// DecreaseWithdrawalBalances applies withdrawal balance decreases for validators and builders.
// This method holds the state lock for the full batch to avoid lock churn.
func (b *BeaconState) DecreaseWithdrawalBalances(withdrawals []*enginev1.Withdrawal) error {
if b.version < version.Gloas {
return errNotSupported("DecreaseWithdrawalBalances", b.version)
}
if len(withdrawals) == 0 {
return nil
}
b.lock.Lock()
defer b.lock.Unlock()
var (
balanceIndices []uint64
builderIndices []uint64
)
for _, withdrawal := range withdrawals {
if withdrawal == nil {
return errors.New("withdrawal is nil")
}
if withdrawal.Amount == 0 {
continue
}
if withdrawal.ValidatorIndex.IsBuilderIndex() {
builderIndex := withdrawal.ValidatorIndex.ToBuilderIndex()
if err := b.decreaseBuilderBalanceLockFree(builderIndex, withdrawal.Amount); err != nil {
return err
}
builderIndices = append(builderIndices, uint64(builderIndex))
continue
}
balAtIdx, err := b.balanceAtIndex(withdrawal.ValidatorIndex)
if err != nil {
return err
}
newBal := decreaseBalanceWithVal(balAtIdx, withdrawal.Amount)
if err := b.balancesMultiValue.UpdateAt(b, uint64(withdrawal.ValidatorIndex), newBal); err != nil {
return pkgerrors.Wrap(err, "could not update balances")
}
balanceIndices = append(balanceIndices, uint64(withdrawal.ValidatorIndex))
}
if len(balanceIndices) > 0 {
b.markFieldAsDirty(types.Balances)
b.addDirtyIndices(types.Balances, balanceIndices)
}
if len(builderIndices) > 0 {
b.markFieldAsDirty(types.Builders)
b.addDirtyIndices(types.Builders, builderIndices)
}
return nil
}
func (b *BeaconState) decreaseBuilderBalanceLockFree(builderIndex primitives.BuilderIndex, amount uint64) error {
idx := uint64(builderIndex)
if idx >= uint64(len(b.builders)) {
return fmt.Errorf("builder index %d out of range (len=%d)", builderIndex, len(b.builders))
}
if b.sharedFieldReferences[types.Builders].Refs() > 1 {
builders := make([]*ethpb.Builder, len(b.builders))
copy(builders, b.builders)
b.builders = builders
b.sharedFieldReferences[types.Builders].MinusRef()
b.sharedFieldReferences[types.Builders] = stateutil.NewRef(1)
}
builder := b.builders[idx]
bal := uint64(builder.Balance)
if amount >= bal {
builder.Balance = 0
} else {
builder.Balance = primitives.Gwei(bal - amount)
}
return nil
}
func decreaseBalanceWithVal(currBalance, delta uint64) uint64 {
if delta > currBalance {
return 0
}
return currBalance - delta
}

View File

@@ -8,23 +8,24 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
type testExecutionPayloadBid struct {
parentBlockHash [32]byte
parentBlockRoot [32]byte
blockHash [32]byte
prevRandao [32]byte
blobKzgCommitments [][]byte
feeRecipient [20]byte
gasLimit uint64
builderIndex primitives.BuilderIndex
slot primitives.Slot
value primitives.Gwei
executionPayment primitives.Gwei
parentBlockHash [32]byte
parentBlockRoot [32]byte
blockHash [32]byte
prevRandao [32]byte
blobKzgCommitmentsRoot [32]byte
feeRecipient [20]byte
gasLimit uint64
builderIndex primitives.BuilderIndex
slot primitives.Slot
value primitives.Gwei
executionPayment primitives.Gwei
}
func (t testExecutionPayloadBid) ParentBlockHash() [32]byte { return t.parentBlockHash }
@@ -40,12 +41,9 @@ func (t testExecutionPayloadBid) Value() primitives.Gwei { return t.value }
func (t testExecutionPayloadBid) ExecutionPayment() primitives.Gwei {
return t.executionPayment
}
func (t testExecutionPayloadBid) BlobKzgCommitments() [][]byte { return t.blobKzgCommitments }
func (t testExecutionPayloadBid) BlobKzgCommitmentCount() uint64 {
return uint64(len(t.blobKzgCommitments))
}
func (t testExecutionPayloadBid) FeeRecipient() [20]byte { return t.feeRecipient }
func (t testExecutionPayloadBid) IsNil() bool { return false }
func (t testExecutionPayloadBid) BlobKzgCommitmentsRoot() [32]byte { return t.blobKzgCommitmentsRoot }
func (t testExecutionPayloadBid) FeeRecipient() [20]byte { return t.feeRecipient }
func (t testExecutionPayloadBid) IsNil() bool { return false }
func TestSetExecutionPayloadBid(t *testing.T) {
t.Run("previous fork returns expected error", func(t *testing.T) {
@@ -60,7 +58,7 @@ func TestSetExecutionPayloadBid(t *testing.T) {
parentBlockRoot = [32]byte(bytes.Repeat([]byte{0xCD}, 32))
blockHash = [32]byte(bytes.Repeat([]byte{0xEF}, 32))
prevRandao = [32]byte(bytes.Repeat([]byte{0x11}, 32))
blobCommitments = [][]byte{bytes.Repeat([]byte{0x22}, 48)}
blobRoot = [32]byte(bytes.Repeat([]byte{0x22}, 32))
feeRecipient [20]byte
)
copy(feeRecipient[:], bytes.Repeat([]byte{0x33}, len(feeRecipient)))
@@ -69,17 +67,17 @@ func TestSetExecutionPayloadBid(t *testing.T) {
dirtyFields: make(map[types.FieldIndex]bool),
}
bid := testExecutionPayloadBid{
parentBlockHash: parentBlockHash,
parentBlockRoot: parentBlockRoot,
blockHash: blockHash,
prevRandao: prevRandao,
blobKzgCommitments: blobCommitments,
feeRecipient: feeRecipient,
gasLimit: 123,
builderIndex: 7,
slot: 9,
value: 11,
executionPayment: 22,
parentBlockHash: parentBlockHash,
parentBlockRoot: parentBlockRoot,
blockHash: blockHash,
prevRandao: prevRandao,
blobKzgCommitmentsRoot: blobRoot,
feeRecipient: feeRecipient,
gasLimit: 123,
builderIndex: 7,
slot: 9,
value: 11,
executionPayment: 22,
}
require.NoError(t, st.SetExecutionPayloadBid(bid))
@@ -89,7 +87,7 @@ func TestSetExecutionPayloadBid(t *testing.T) {
require.DeepEqual(t, parentBlockRoot[:], st.latestExecutionPayloadBid.ParentBlockRoot)
require.DeepEqual(t, blockHash[:], st.latestExecutionPayloadBid.BlockHash)
require.DeepEqual(t, prevRandao[:], st.latestExecutionPayloadBid.PrevRandao)
require.DeepEqual(t, blobCommitments, st.latestExecutionPayloadBid.BlobKzgCommitments)
require.DeepEqual(t, blobRoot[:], st.latestExecutionPayloadBid.BlobKzgCommitmentsRoot)
require.DeepEqual(t, feeRecipient[:], st.latestExecutionPayloadBid.FeeRecipient)
require.Equal(t, uint64(123), st.latestExecutionPayloadBid.GasLimit)
require.Equal(t, primitives.BuilderIndex(7), st.latestExecutionPayloadBid.BuilderIndex)
@@ -232,6 +230,235 @@ func TestRotateBuilderPendingPayments_UnsupportedVersion(t *testing.T) {
require.ErrorContains(t, "RotateBuilderPendingPayments", err)
}
func TestSetPayloadExpectedWithdrawals(t *testing.T) {
t.Run("previous fork returns expected error", func(t *testing.T) {
st := &BeaconState{version: version.Fulu}
err := st.SetPayloadExpectedWithdrawals([]*enginev1.Withdrawal{})
require.ErrorContains(t, "SetPayloadExpectedWithdrawals", err)
})
t.Run("allows nil input and marks dirty", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
}
require.NoError(t, st.SetPayloadExpectedWithdrawals(nil))
require.Equal(t, true, st.payloadExpectedWithdrawals == nil)
require.Equal(t, true, st.dirtyFields[types.PayloadExpectedWithdrawals])
})
t.Run("sets and marks dirty", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
payloadExpectedWithdrawals: []*enginev1.Withdrawal{{Index: 1}, {Index: 2}},
}
withdrawals := []*enginev1.Withdrawal{{Index: 3}}
require.NoError(t, st.SetPayloadExpectedWithdrawals(withdrawals))
require.DeepEqual(t, withdrawals, st.payloadExpectedWithdrawals)
require.Equal(t, true, st.dirtyFields[types.PayloadExpectedWithdrawals])
})
}
func TestDecreaseWithdrawalBalances(t *testing.T) {
t.Run("previous fork returns expected error", func(t *testing.T) {
st := &BeaconState{version: version.Fulu}
err := st.DecreaseWithdrawalBalances([]*enginev1.Withdrawal{{}})
require.ErrorContains(t, "DecreaseWithdrawalBalances", err)
})
t.Run("rejects nil withdrawal", func(t *testing.T) {
st := &BeaconState{version: version.Gloas}
err := st.DecreaseWithdrawalBalances([]*enginev1.Withdrawal{nil})
require.ErrorContains(t, "withdrawal is nil", err)
})
t.Run("no-op on empty input", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
dirtyIndices: make(map[types.FieldIndex][]uint64),
rebuildTrie: make(map[types.FieldIndex]bool),
}
require.NoError(t, st.DecreaseWithdrawalBalances(nil))
require.Equal(t, 0, len(st.dirtyFields))
require.Equal(t, 0, len(st.dirtyIndices))
})
t.Run("updates validator and builder balances and tracks dirty indices", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
dirtyIndices: make(map[types.FieldIndex][]uint64),
rebuildTrie: make(map[types.FieldIndex]bool),
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.Builders: stateutil.NewRef(1),
},
balancesMultiValue: NewMultiValueBalances([]uint64{100, 200, 300}),
builders: []*ethpb.Builder{
{Balance: 1000},
{Balance: 50},
},
}
withdrawals := []*enginev1.Withdrawal{
{ValidatorIndex: primitives.ValidatorIndex(1), Amount: 20},
{ValidatorIndex: primitives.BuilderIndex(1).ToValidatorIndex(), Amount: 30},
{ValidatorIndex: primitives.ValidatorIndex(2), Amount: 400},
{ValidatorIndex: primitives.BuilderIndex(0).ToValidatorIndex(), Amount: 2000},
{ValidatorIndex: primitives.ValidatorIndex(0), Amount: 0},
}
require.NoError(t, st.DecreaseWithdrawalBalances(withdrawals))
require.DeepEqual(t, []uint64{100, 180, 0}, st.Balances())
require.Equal(t, primitives.Gwei(0), st.builders[0].Balance)
require.Equal(t, primitives.Gwei(20), st.builders[1].Balance)
require.Equal(t, true, st.dirtyFields[types.Balances])
require.Equal(t, true, st.dirtyFields[types.Builders])
require.DeepEqual(t, []uint64{1, 2}, st.dirtyIndices[types.Balances])
require.DeepEqual(t, []uint64{1, 0}, st.dirtyIndices[types.Builders])
})
t.Run("returns error on builder index out of range", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
dirtyIndices: make(map[types.FieldIndex][]uint64),
rebuildTrie: make(map[types.FieldIndex]bool),
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.Builders: stateutil.NewRef(1),
},
builders: []*ethpb.Builder{{Balance: 5}},
}
err := st.DecreaseWithdrawalBalances([]*enginev1.Withdrawal{
{ValidatorIndex: primitives.BuilderIndex(2).ToValidatorIndex(), Amount: 1},
})
require.ErrorContains(t, "out of range", err)
require.Equal(t, false, st.dirtyFields[types.Builders])
require.Equal(t, 0, len(st.dirtyIndices[types.Builders]))
})
}
func TestDequeueBuilderPendingWithdrawals(t *testing.T) {
t.Run("previous fork returns expected error", func(t *testing.T) {
st := &BeaconState{version: version.Fulu}
err := st.DequeueBuilderPendingWithdrawals(1)
require.ErrorContains(t, "DequeueBuilderPendingWithdrawals", err)
})
t.Run("returns error when dequeueing more than length", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.BuilderPendingWithdrawals: stateutil.NewRef(1),
},
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{{Amount: 1}},
}
err := st.DequeueBuilderPendingWithdrawals(2)
require.ErrorContains(t, "cannot dequeue more builder withdrawals", err)
require.Equal(t, 1, len(st.builderPendingWithdrawals))
require.Equal(t, false, st.dirtyFields[types.BuilderPendingWithdrawals])
})
t.Run("no-op on zero", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.BuilderPendingWithdrawals: stateutil.NewRef(1),
},
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{{Amount: 1}},
}
require.NoError(t, st.DequeueBuilderPendingWithdrawals(0))
require.Equal(t, 1, len(st.builderPendingWithdrawals))
require.Equal(t, false, st.dirtyFields[types.BuilderPendingWithdrawals])
require.Equal(t, false, st.rebuildTrie[types.BuilderPendingWithdrawals])
})
t.Run("dequeues and marks dirty", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.BuilderPendingWithdrawals: stateutil.NewRef(1),
},
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{
{Amount: 1},
{Amount: 2},
{Amount: 3},
},
rebuildTrie: make(map[types.FieldIndex]bool),
}
require.NoError(t, st.DequeueBuilderPendingWithdrawals(2))
require.Equal(t, 1, len(st.builderPendingWithdrawals))
require.Equal(t, primitives.Gwei(3), st.builderPendingWithdrawals[0].Amount)
require.Equal(t, true, st.dirtyFields[types.BuilderPendingWithdrawals])
require.Equal(t, true, st.rebuildTrie[types.BuilderPendingWithdrawals])
})
t.Run("copy-on-write preserves shared state", func(t *testing.T) {
sharedRef := stateutil.NewRef(2)
sharedWithdrawals := []*ethpb.BuilderPendingWithdrawal{
{Amount: 1},
{Amount: 2},
{Amount: 3},
}
st1 := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.BuilderPendingWithdrawals: sharedRef,
},
builderPendingWithdrawals: sharedWithdrawals,
rebuildTrie: make(map[types.FieldIndex]bool),
}
st2 := &BeaconState{
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
types.BuilderPendingWithdrawals: sharedRef,
},
builderPendingWithdrawals: sharedWithdrawals,
}
require.NoError(t, st1.DequeueBuilderPendingWithdrawals(2))
require.Equal(t, primitives.Gwei(3), st1.builderPendingWithdrawals[0].Amount)
require.Equal(t, 3, len(st2.builderPendingWithdrawals))
require.Equal(t, primitives.Gwei(1), st2.builderPendingWithdrawals[0].Amount)
require.Equal(t, uint(1), st1.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs())
require.Equal(t, uint(1), st2.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs())
})
}
func TestSetNextWithdrawalBuilderIndex(t *testing.T) {
t.Run("previous fork returns expected error", func(t *testing.T) {
st := &BeaconState{version: version.Fulu}
err := st.SetNextWithdrawalBuilderIndex(1)
require.ErrorContains(t, "SetNextWithdrawalBuilderIndex", err)
})
t.Run("sets and marks dirty", func(t *testing.T) {
st := &BeaconState{
version: version.Gloas,
dirtyFields: make(map[types.FieldIndex]bool),
}
require.NoError(t, st.SetNextWithdrawalBuilderIndex(7))
require.Equal(t, primitives.BuilderIndex(7), st.nextWithdrawalBuilderIndex)
require.Equal(t, true, st.dirtyFields[types.NextWithdrawalBuilderIndex])
})
}
func TestAppendBuilderPendingWithdrawal_CopyOnWrite(t *testing.T) {
wd := &ethpb.BuilderPendingWithdrawal{
FeeRecipient: make([]byte, 20),

View File

@@ -1027,10 +1027,10 @@ func TestGetVerifyingStateEdgeCases(t *testing.T) {
sc: signatureCache,
sr: &mockStateByRooter{sbr: sbrErrorIfCalled(t)}, // Should not be called
hsp: &mockHeadStateProvider{
headRoot: parentRoot[:], // Same as parent
headSlot: 32, // Epoch 1
headState: fuluState.Copy(), // HeadState (not ReadOnly) for ProcessSlots
headStateReadOnly: nil, // Should not use ReadOnly path
headRoot: parentRoot[:], // Same as parent
headSlot: 32, // Epoch 1
headState: fuluState.Copy(), // HeadState (not ReadOnly) for ProcessSlots
headStateReadOnly: nil, // Should not use ReadOnly path
},
fc: &mockForkchoicer{
// Return same root for both to simulate same chain
@@ -1045,8 +1045,8 @@ func TestGetVerifyingStateEdgeCases(t *testing.T) {
// Wrap to detect HeadState call
originalHsp := initializer.shared.hsp.(*mockHeadStateProvider)
wrappedHsp := &mockHeadStateProvider{
headRoot: originalHsp.headRoot,
headSlot: originalHsp.headSlot,
headRoot: originalHsp.headRoot,
headSlot: originalHsp.headSlot,
headState: originalHsp.headState,
}
initializer.shared.hsp = &headStateCallTracker{

View File

@@ -1,3 +0,0 @@
### Added
- Set beacon node options after reading the config file.

View File

@@ -1,3 +0,0 @@
### Fixed
- Fixed a bug where `cmd/beacon-chain/execution` was being ignored by `hack/gen-logs.sh` due to a `.gitignore` rule.

View File

@@ -1,3 +0,0 @@
### Changed
- Fixed the logging issue described in #16314.

View File

@@ -1,7 +0,0 @@
### Changed
- gRPC fallback now matches rest api implementation and will also check and connect to only synced nodes.
### Removed
- gRPC resolver for load balancing, the new implementation matches rest api's so we should remove the resolver so it's handled the same way for consistency.

View File

@@ -1,11 +0,0 @@
### Ignored
- moved finding healthy node logic to connection provider and other various cleanup on naming.
### Changed
- Improved node fallback logs.
### Fixed
- a potential race condition when switching hosts quickly and reconnecting to same host on an old connection.

View File

@@ -1,3 +0,0 @@
### Changed
- Improved integrations with ethspecify so specrefs can be used throughout the codebase.

View File

@@ -1,2 +0,0 @@
### Ignored
- Remove unused `HighestBlockDelay` method in forkchoice.

View File

@@ -1,2 +0,0 @@
### Ignored
- Remove unused method in forkchoice.

View File

@@ -1,2 +0,0 @@
### Ignored
- Remove unused map in forkchoice.

View File

@@ -1,3 +0,0 @@
### Ignored
- Updated golangci to run lint on tests too.

View File

@@ -1,3 +0,0 @@
### Ignored
- Add handy documentation for SSZ Query package (`encoding/ssz/query`).

View File

@@ -1,3 +0,0 @@
### Changed
- Moved blob KZG commitments into `ExecutionPayloadBid` and removed them from `ExecutionPayloadEnvelope` for Gloas.

View File

@@ -0,0 +1,2 @@
### Added
- add modified process withdrawals for gloas

View File

@@ -1,3 +0,0 @@
### Added
- Add read only wrapper for execution payload envelope for gloas

View File

@@ -1,2 +0,0 @@
### Ignored
- Run go fmt

View File

@@ -1,9 +1,5 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package execution
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "cmd/beacon-chain/execution")
var log = logrus.WithField("prefix", "execution")

View File

@@ -188,8 +188,8 @@ func before(ctx *cli.Context) error {
return errors.Wrap(err, "failed to parse log vmodule")
}
// set the global logging level and data
logs.SetLoggingLevelAndData(verbosityLevel, vmodule, maxLevel, ctx.Bool(flags.DisableEphemeralLogFile.Name))
// set the global logging level to allow for the highest verbosity requested
logs.SetLoggingLevel(max(verbosityLevel, maxLevel))
format := ctx.String(cmd.LogFormat.Name)
switch format {
@@ -210,7 +210,6 @@ func before(ctx *cli.Context) error {
Formatter: formatter,
Writer: os.Stderr,
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
Identifier: logs.LogTargetUser,
})
case "fluentd":
f := joonix.NewFormatter()
@@ -368,8 +367,17 @@ func startNode(ctx *cli.Context, cancel context.CancelFunc) error {
backfill.BeaconNodeOptions,
das.BeaconNodeOptions,
}
for _, of := range optFuncs {
ofo, err := of(ctx)
if err != nil {
return err
}
if ofo != nil {
opts = append(opts, ofo...)
}
}
beacon, err := node.New(ctx, cancel, optFuncs, opts...)
beacon, err := node.New(ctx, cancel, opts...)
if err != nil {
return fmt.Errorf("unable to start beacon node: %w", err)
}

View File

@@ -164,8 +164,8 @@ func main() {
return errors.Wrap(err, "failed to parse log vmodule")
}
// set the global logging level and data
logs.SetLoggingLevelAndData(verbosityLevel, vmodule, maxLevel, ctx.Bool(flags.DisableEphemeralLogFile.Name))
// set the global logging level to allow for the highest verbosity requested
logs.SetLoggingLevel(max(maxLevel, verbosityLevel))
logFileName := ctx.String(cmd.LogFileName.Name)
@@ -188,7 +188,6 @@ func main() {
Formatter: formatter,
Writer: os.Stderr,
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
Identifier: logs.LogTargetUser,
})
case "fluentd":
f := joonix.NewFormatter()

View File

@@ -130,6 +130,7 @@ type BeaconChainConfig struct {
MaxWithdrawalsPerPayload uint64 `yaml:"MAX_WITHDRAWALS_PER_PAYLOAD" spec:"true"` // MaxWithdrawalsPerPayload defines the maximum number of withdrawals in a block.
MaxBlsToExecutionChanges uint64 `yaml:"MAX_BLS_TO_EXECUTION_CHANGES" spec:"true"` // MaxBlsToExecutionChanges defines the maximum number of BLS-to-execution-change objects in a block.
MaxValidatorsPerWithdrawalsSweep uint64 `yaml:"MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP" spec:"true"` // MaxValidatorsPerWithdrawalsSweep bounds the size of the sweep searching for withdrawals per slot.
MaxBuildersPerWithdrawalsSweep uint64 `yaml:"MAX_BUILDERS_PER_WITHDRAWALS_SWEEP" spec:"true"` // MaxBuildersPerWithdrawalsSweep bounds the size of the builder withdrawals sweep per slot.
// BLS domain values.
DomainBeaconProposer [4]byte `yaml:"DOMAIN_BEACON_PROPOSER" spec:"true"` // DomainBeaconProposer defines the BLS signature domain for beacon proposal verification.

View File

@@ -194,6 +194,7 @@ func ConfigToYaml(cfg *BeaconChainConfig) []byte {
fmt.Sprintf("SHARD_COMMITTEE_PERIOD: %d", cfg.ShardCommitteePeriod),
fmt.Sprintf("MIN_VALIDATOR_WITHDRAWABILITY_DELAY: %d", cfg.MinValidatorWithdrawabilityDelay),
fmt.Sprintf("MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: %d", cfg.MaxValidatorsPerWithdrawalsSweep),
fmt.Sprintf("MAX_BUILDERS_PER_WITHDRAWALS_SWEEP: %d", cfg.MaxBuildersPerWithdrawalsSweep),
fmt.Sprintf("MAX_SEED_LOOKAHEAD: %d", cfg.MaxSeedLookahead),
fmt.Sprintf("EJECTION_BALANCE: %d", cfg.EjectionBalance),
fmt.Sprintf("MIN_PER_EPOCH_CHURN_LIMIT: %d", cfg.MinPerEpochChurnLimit),

View File

@@ -175,6 +175,7 @@ var mainnetBeaconConfig = &BeaconChainConfig{
MaxWithdrawalsPerPayload: 16,
MaxBlsToExecutionChanges: 16,
MaxValidatorsPerWithdrawalsSweep: 16384,
MaxBuildersPerWithdrawalsSweep: 16384,
// BLS domain values.
DomainBeaconProposer: bytesutil.Uint32ToBytes4(0x00000000),

View File

@@ -94,6 +94,7 @@ func compareConfigs(t *testing.T, expected, actual *params.BeaconChainConfig) {
require.DeepEqual(t, expected.MaxDeposits, actual.MaxDeposits)
require.DeepEqual(t, expected.MaxVoluntaryExits, actual.MaxVoluntaryExits)
require.DeepEqual(t, expected.MaxWithdrawalsPerPayload, actual.MaxWithdrawalsPerPayload)
require.DeepEqual(t, expected.MaxBuildersPerWithdrawalsSweep, actual.MaxBuildersPerWithdrawalsSweep)
require.DeepEqual(t, expected.DomainBeaconProposer, actual.DomainBeaconProposer)
require.DeepEqual(t, expected.DomainRandao, actual.DomainRandao)
require.DeepEqual(t, expected.DomainBeaconAttester, actual.DomainBeaconAttester)

View File

@@ -4,7 +4,6 @@ go_library(
name = "go_default_library",
srcs = [
"execution.go",
"execution_payload_envelope.go",
"factory.go",
"get_payload.go",
"getters.go",
@@ -46,7 +45,6 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"execution_payload_envelope_test.go",
"execution_test.go",
"factory_test.go",
"getters_test.go",

View File

@@ -1,119 +0,0 @@
package blocks
import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
field_params "github.com/OffchainLabs/prysm/v7/config/fieldparams"
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"google.golang.org/protobuf/proto"
)
type signedExecutionPayloadEnvelope struct {
s *ethpb.SignedExecutionPayloadEnvelope
}
type executionPayloadEnvelope struct {
p *ethpb.ExecutionPayloadEnvelope
}
// WrappedROSignedExecutionPayloadEnvelope wraps a signed execution payload envelope proto in a read-only interface.
func WrappedROSignedExecutionPayloadEnvelope(s *ethpb.SignedExecutionPayloadEnvelope) (interfaces.ROSignedExecutionPayloadEnvelope, error) {
w := signedExecutionPayloadEnvelope{s: s}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// WrappedROExecutionPayloadEnvelope wraps an execution payload envelope proto in a read-only interface.
func WrappedROExecutionPayloadEnvelope(p *ethpb.ExecutionPayloadEnvelope) (interfaces.ROExecutionPayloadEnvelope, error) {
w := &executionPayloadEnvelope{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Envelope returns the execution payload envelope as a read-only interface.
func (s signedExecutionPayloadEnvelope) Envelope() (interfaces.ROExecutionPayloadEnvelope, error) {
return WrappedROExecutionPayloadEnvelope(s.s.Message)
}
// Signature returns the BLS signature as a 96-byte array.
func (s signedExecutionPayloadEnvelope) Signature() [field_params.BLSSignatureLength]byte {
return [field_params.BLSSignatureLength]byte(s.s.Signature)
}
// IsNil reports whether the signed envelope or its contents are invalid.
func (s signedExecutionPayloadEnvelope) IsNil() bool {
if s.s == nil {
return true
}
if len(s.s.Signature) != field_params.BLSSignatureLength {
return true
}
w := executionPayloadEnvelope{p: s.s.Message}
return w.IsNil()
}
// SigningRoot computes the signing root for the signed envelope with the provided domain.
func (s signedExecutionPayloadEnvelope) SigningRoot(domain []byte) (root [32]byte, err error) {
return signing.ComputeSigningRoot(s.s.Message, domain)
}
// Proto returns the underlying protobuf message.
func (s signedExecutionPayloadEnvelope) Proto() proto.Message {
return s.s
}
// IsNil reports whether the envelope or its required fields are invalid.
func (p *executionPayloadEnvelope) IsNil() bool {
if p.p == nil {
return true
}
if p.p.Payload == nil {
return true
}
if len(p.p.BeaconBlockRoot) != field_params.RootLength {
return true
}
return false
}
// IsBlinded reports whether the envelope contains a blinded payload.
func (p *executionPayloadEnvelope) IsBlinded() bool {
return !p.IsNil() && p.p.Payload == nil
}
// Execution returns the execution payload as a read-only interface.
func (p *executionPayloadEnvelope) Execution() (interfaces.ExecutionData, error) {
return WrappedExecutionPayloadDeneb(p.p.Payload)
}
// ExecutionRequests returns the execution requests attached to the envelope.
func (p *executionPayloadEnvelope) ExecutionRequests() *enginev1.ExecutionRequests {
return p.p.ExecutionRequests
}
// BuilderIndex returns the proposer/builder index for the envelope.
func (p *executionPayloadEnvelope) BuilderIndex() primitives.BuilderIndex {
return p.p.BuilderIndex
}
// BeaconBlockRoot returns the beacon block root referenced by the envelope.
func (p *executionPayloadEnvelope) BeaconBlockRoot() [field_params.RootLength]byte {
return [field_params.RootLength]byte(p.p.BeaconBlockRoot)
}
// Slot returns the slot of the envelope.
func (p *executionPayloadEnvelope) Slot() primitives.Slot {
return p.p.Slot
}
// StateRoot returns the state root carried by the envelope.
func (p *executionPayloadEnvelope) StateRoot() [field_params.RootLength]byte {
return [field_params.RootLength]byte(p.p.StateRoot)
}

View File

@@ -1,124 +0,0 @@
package blocks_test
import (
"bytes"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func validExecutionPayloadEnvelope() *ethpb.ExecutionPayloadEnvelope {
payload := &enginev1.ExecutionPayloadDeneb{
ParentHash: bytes.Repeat([]byte{0x01}, 32),
FeeRecipient: bytes.Repeat([]byte{0x02}, 20),
StateRoot: bytes.Repeat([]byte{0x03}, 32),
ReceiptsRoot: bytes.Repeat([]byte{0x04}, 32),
LogsBloom: bytes.Repeat([]byte{0x05}, 256),
PrevRandao: bytes.Repeat([]byte{0x06}, 32),
BlockNumber: 1,
GasLimit: 2,
GasUsed: 3,
Timestamp: 4,
BaseFeePerGas: bytes.Repeat([]byte{0x07}, 32),
BlockHash: bytes.Repeat([]byte{0x08}, 32),
Transactions: [][]byte{},
Withdrawals: []*enginev1.Withdrawal{},
BlobGasUsed: 0,
ExcessBlobGas: 0,
}
return &ethpb.ExecutionPayloadEnvelope{
Payload: payload,
ExecutionRequests: &enginev1.ExecutionRequests{},
BuilderIndex: 10,
BeaconBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
Slot: 9,
StateRoot: bytes.Repeat([]byte{0xBB}, 32),
}
}
func TestWrappedROExecutionPayloadEnvelope(t *testing.T) {
t.Run("returns error on nil payload", func(t *testing.T) {
invalid := validExecutionPayloadEnvelope()
invalid.Payload = nil
_, err := blocks.WrappedROExecutionPayloadEnvelope(invalid)
require.Equal(t, consensus_types.ErrNilObjectWrapped, err)
})
t.Run("returns error on invalid beacon root length", func(t *testing.T) {
invalid := validExecutionPayloadEnvelope()
invalid.BeaconBlockRoot = []byte{0x01}
_, err := blocks.WrappedROExecutionPayloadEnvelope(invalid)
require.Equal(t, consensus_types.ErrNilObjectWrapped, err)
})
t.Run("wraps and exposes fields", func(t *testing.T) {
env := validExecutionPayloadEnvelope()
wrapped, err := blocks.WrappedROExecutionPayloadEnvelope(env)
require.NoError(t, err)
require.Equal(t, primitives.BuilderIndex(10), wrapped.BuilderIndex())
require.Equal(t, primitives.Slot(9), wrapped.Slot())
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0xAA}, 32)), wrapped.BeaconBlockRoot())
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0xBB}, 32)), wrapped.StateRoot())
reqs := wrapped.ExecutionRequests()
require.NotNil(t, reqs)
exec, err := wrapped.Execution()
require.NoError(t, err)
assert.DeepEqual(t, env.Payload.ParentHash, exec.ParentHash())
require.Equal(t, false, wrapped.IsBlinded())
})
}
func TestWrappedROSignedExecutionPayloadEnvelope(t *testing.T) {
t.Run("returns error for invalid signature length", func(t *testing.T) {
signed := &ethpb.SignedExecutionPayloadEnvelope{
Message: validExecutionPayloadEnvelope(),
Signature: bytes.Repeat([]byte{0xAA}, 95),
}
_, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signed)
require.Equal(t, consensus_types.ErrNilObjectWrapped, err)
})
t.Run("returns error on nil envelope", func(t *testing.T) {
_, err := blocks.WrappedROSignedExecutionPayloadEnvelope(nil)
require.Equal(t, consensus_types.ErrNilObjectWrapped, err)
})
t.Run("wraps and provides envelope/signing data", func(t *testing.T) {
sig := bytes.Repeat([]byte{0xAB}, 96)
signed := &ethpb.SignedExecutionPayloadEnvelope{
Message: validExecutionPayloadEnvelope(),
Signature: sig,
}
wrapped, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signed)
require.NoError(t, err)
gotSig := wrapped.Signature()
assert.DeepEqual(t, [96]byte(sig), gotSig)
env, err := wrapped.Envelope()
require.NoError(t, err)
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0xAA}, 32)), env.BeaconBlockRoot())
domain := bytes.Repeat([]byte{0xCC}, 32)
wantRoot, err := signing.ComputeSigningRoot(signed.Message, domain)
require.NoError(t, err)
gotRoot, err := wrapped.SigningRoot(domain)
require.NoError(t, err)
require.Equal(t, wantRoot, gotRoot)
require.Equal(t, signed, wrapped.Proto())
})
}

View File

@@ -671,7 +671,7 @@ func hydrateBeaconBlockBodyGloas() *eth.BeaconBlockBodyGloas {
BlockHash: make([]byte, fieldparams.RootLength),
PrevRandao: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, 20),
BlobKzgCommitments: [][]byte{make([]byte, fieldparams.BLSPubkeyLength)},
BlobKzgCommitmentsRoot: make([]byte, fieldparams.RootLength),
},
Signature: make([]byte, fieldparams.BLSSignatureLength),
},

View File

@@ -5,7 +5,6 @@ import (
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
)
@@ -44,16 +43,11 @@ func (h executionPayloadBidGloas) IsNil() bool {
len(h.payload.ParentBlockRoot) != 32 ||
len(h.payload.BlockHash) != 32 ||
len(h.payload.PrevRandao) != 32 ||
len(h.payload.BlobKzgCommitmentsRoot) != 32 ||
len(h.payload.FeeRecipient) != 20 {
return true
}
for _, commitment := range h.payload.BlobKzgCommitments {
if len(commitment) != 48 {
return true
}
}
return false
}
@@ -137,14 +131,9 @@ func (h executionPayloadBidGloas) ExecutionPayment() primitives.Gwei {
return primitives.Gwei(h.payload.ExecutionPayment)
}
// BlobKzgCommitments returns the KZG commitments for blobs.
func (h executionPayloadBidGloas) BlobKzgCommitments() [][]byte {
return bytesutil.SafeCopy2dBytes(h.payload.BlobKzgCommitments)
}
// BlobKzgCommitmentCount returns the number of blob KZG commitments.
func (h executionPayloadBidGloas) BlobKzgCommitmentCount() uint64 {
return uint64(len(h.payload.BlobKzgCommitments))
// BlobKzgCommitmentsRoot returns the root of the KZG commitments for blobs.
func (h executionPayloadBidGloas) BlobKzgCommitmentsRoot() [32]byte {
return [32]byte(h.payload.BlobKzgCommitmentsRoot)
}
// FeeRecipient returns the execution address that will receive the builder payment.

View File

@@ -15,17 +15,17 @@ import (
func validExecutionPayloadBid() *ethpb.ExecutionPayloadBid {
return &ethpb.ExecutionPayloadBid{
ParentBlockHash: bytes.Repeat([]byte{0x01}, 32),
ParentBlockRoot: bytes.Repeat([]byte{0x02}, 32),
BlockHash: bytes.Repeat([]byte{0x03}, 32),
PrevRandao: bytes.Repeat([]byte{0x04}, 32),
GasLimit: 123,
BuilderIndex: 5,
Slot: 6,
Value: 7,
ExecutionPayment: 8,
BlobKzgCommitments: [][]byte{bytes.Repeat([]byte{0x05}, 48)},
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
ParentBlockHash: bytes.Repeat([]byte{0x01}, 32),
ParentBlockRoot: bytes.Repeat([]byte{0x02}, 32),
BlockHash: bytes.Repeat([]byte{0x03}, 32),
PrevRandao: bytes.Repeat([]byte{0x04}, 32),
GasLimit: 123,
BuilderIndex: 5,
Slot: 6,
Value: 7,
ExecutionPayment: 8,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x05}, 32),
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
}
}
@@ -52,8 +52,8 @@ func TestWrappedROExecutionPayloadBid(t *testing.T) {
mutate: func(b *ethpb.ExecutionPayloadBid) { b.PrevRandao = []byte{0x04} },
},
{
name: "blob kzg commitments length",
mutate: func(b *ethpb.ExecutionPayloadBid) { b.BlobKzgCommitments = [][]byte{[]byte{0x05}} },
name: "blob kzg commitments root",
mutate: func(b *ethpb.ExecutionPayloadBid) { b.BlobKzgCommitmentsRoot = []byte{0x05} },
},
{
name: "fee recipient",
@@ -85,8 +85,7 @@ func TestWrappedROExecutionPayloadBid(t *testing.T) {
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0x02}, 32)), wrapped.ParentBlockRoot())
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0x03}, 32)), wrapped.BlockHash())
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0x04}, 32)), wrapped.PrevRandao())
assert.DeepEqual(t, [][]byte{bytes.Repeat([]byte{0x05}, 48)}, wrapped.BlobKzgCommitments())
require.Equal(t, uint64(1), wrapped.BlobKzgCommitmentCount())
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0x05}, 32)), wrapped.BlobKzgCommitmentsRoot())
assert.DeepEqual(t, [20]byte(bytes.Repeat([]byte{0x06}, 20)), wrapped.FeeRecipient())
})
}

View File

@@ -5,7 +5,6 @@ go_library(
srcs = [
"beacon_block.go",
"error.go",
"execution_payload_envelope.go",
"light_client.go",
"signed_execution_payload_bid.go",
"utils.go",

View File

@@ -1,27 +0,0 @@
package interfaces
import (
field_params "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
"google.golang.org/protobuf/proto"
)
type ROSignedExecutionPayloadEnvelope interface {
Envelope() (ROExecutionPayloadEnvelope, error)
Signature() [field_params.BLSSignatureLength]byte
SigningRoot([]byte) ([32]byte, error)
IsNil() bool
Proto() proto.Message
}
type ROExecutionPayloadEnvelope interface {
Execution() (ExecutionData, error)
ExecutionRequests() *enginev1.ExecutionRequests
BuilderIndex() primitives.BuilderIndex
BeaconBlockRoot() [field_params.RootLength]byte
Slot() primitives.Slot
StateRoot() [field_params.RootLength]byte
IsBlinded() bool
IsNil() bool
}

View File

@@ -22,8 +22,7 @@ type ROExecutionPayloadBid interface {
Slot() primitives.Slot
Value() primitives.Gwei
ExecutionPayment() primitives.Gwei
BlobKzgCommitments() [][]byte
BlobKzgCommitmentCount() uint64
BlobKzgCommitmentsRoot() [32]byte
FeeRecipient() [20]byte
IsNil() bool
}

View File

@@ -13,6 +13,16 @@ var _ fssz.Unmarshaler = (*BuilderIndex)(nil)
// BuilderIndex is an index into the builder registry.
type BuilderIndex uint64
// ToValidatorIndex sets the builder flag on a builder index.
//
// Spec v1.6.1 (pseudocode):
// def convert_builder_index_to_validator_index(builder_index: BuilderIndex) -> ValidatorIndex:
//
// return ValidatorIndex(builder_index | BUILDER_INDEX_FLAG)
func (b BuilderIndex) ToValidatorIndex() ValidatorIndex {
return ValidatorIndex(uint64(b) | BuilderIndexFlag)
}
// HashTreeRoot returns the SSZ hash tree root of the index.
func (b BuilderIndex) HashTreeRoot() ([32]byte, error) {
return fssz.HashWithDefaultHasher(b)

View File

@@ -10,9 +10,34 @@ var _ fssz.HashRoot = (ValidatorIndex)(0)
var _ fssz.Marshaler = (*ValidatorIndex)(nil)
var _ fssz.Unmarshaler = (*ValidatorIndex)(nil)
// BuilderIndexFlag marks a ValidatorIndex as a BuilderIndex when the bit is set.
//
// Spec v1.6.1: BUILDER_INDEX_FLAG.
const BuilderIndexFlag uint64 = 1 << 40
// ValidatorIndex in eth2.
type ValidatorIndex uint64
// IsBuilderIndex returns true when the BuilderIndex flag is set on the validator index.
//
// Spec v1.6.1 (pseudocode):
// def is_builder_index(validator_index: ValidatorIndex) -> bool:
//
// return (validator_index & BUILDER_INDEX_FLAG) != 0
func (v ValidatorIndex) IsBuilderIndex() bool {
return uint64(v)&BuilderIndexFlag != 0
}
// ToBuilderIndex strips the builder flag from a validator index.
//
// Spec v1.6.1 (pseudocode):
// def convert_validator_index_to_builder_index(validator_index: ValidatorIndex) -> BuilderIndex:
//
// return BuilderIndex(validator_index & ~BUILDER_INDEX_FLAG)
func (v ValidatorIndex) ToBuilderIndex() BuilderIndex {
return BuilderIndex(uint64(v) & ^BuilderIndexFlag)
}
// Div divides validator index by x.
// This method panics if dividing by zero!
func (v ValidatorIndex) Div(x uint64) ValidatorIndex {

View File

@@ -33,3 +33,32 @@ func TestValidatorIndex_Casting(t *testing.T) {
}
})
}
func TestValidatorIndex_BuilderIndexFlagConversions(t *testing.T) {
base := uint64(42)
unflagged := ValidatorIndex(base)
if unflagged.IsBuilderIndex() {
t.Fatalf("expected unflagged validator index to not be a builder index")
}
if got, want := unflagged.ToBuilderIndex(), BuilderIndex(base); got != want {
t.Fatalf("unexpected builder index: got %d want %d", got, want)
}
flagged := ValidatorIndex(base | BuilderIndexFlag)
if !flagged.IsBuilderIndex() {
t.Fatalf("expected flagged validator index to be a builder index")
}
if got, want := flagged.ToBuilderIndex(), BuilderIndex(base); got != want {
t.Fatalf("unexpected builder index: got %d want %d", got, want)
}
builder := BuilderIndex(base)
roundTrip := builder.ToValidatorIndex()
if !roundTrip.IsBuilderIndex() {
t.Fatalf("expected round-tripped validator index to be a builder index")
}
if got, want := roundTrip.ToBuilderIndex(), builder; got != want {
t.Fatalf("unexpected round-trip builder index: got %d want %d", got, want)
}
}

View File

@@ -1,190 +0,0 @@
# SSZ Query Package
The `encoding/ssz/query` package provides a system for analyzing and querying SSZ ([Simple Serialize](https://github.com/ethereum/consensus-specs/blob/master/ssz/simple-serialize.md)) data structures, as well as generating Merkle proofs from them. It enables runtime analysis of SSZ-serialized Go objects with reflection, path-based queries through nested structures, generalized index calculation, and Merkle proof generation.
This package is designed to be generic. It operates on arbitrary SSZ-serialized Go values at runtime, so the same query/proof machinery applies equally to any SSZ type, including the BeaconState/BeaconBlock.
## Usage Example
```go
// 1. Analyze an SSZ object
block := &ethpb.BeaconBlock{...}
info, err := query.AnalyzeObject(block)
// 2. Parse a path
path, err := query.ParsePath(".body.attestations[0].data.slot")
// 3. Get the generalized index
gindex, err := query.GetGeneralizedIndexFromPath(info, path)
// 4. Generate a Merkle proof
proof, err := info.Prove(gindex)
// 5. Get offset and length to slice the SSZ-encoded bytes
sszBytes, _ := block.MarshalSSZ()
_, offset, length, err := query.CalculateOffsetAndLength(info, path)
// slotBytes contains the SSZ-encoded value at the queried path
slotBytes := sszBytes[offset : offset+length]
```
## Exported API
The main exported API consists of:
```go
// AnalyzeObject analyzes an SSZ object and returns its structural information
func AnalyzeObject(obj SSZObject) (*SszInfo, error)
// ParsePath parses a path string like ".field1.field2[0].field3"
func ParsePath(rawPath string) (Path, error)
// CalculateOffsetAndLength computes byte offset and length for a path within an SSZ object
func CalculateOffsetAndLength(sszInfo *SszInfo, path Path) (*SszInfo, uint64, uint64, error)
// GetGeneralizedIndexFromPath calculates the generalized index for a given path
func GetGeneralizedIndexFromPath(info *SszInfo, path Path) (uint64, error)
// Prove generates a Merkle proof for a target generalized index
func (s *SszInfo) Prove(gindex uint64) (*fastssz.Proof, error)
```
## Type System
### SSZ Types
The package now supports [all standard SSZ types](https://github.com/ethereum/consensus-specs/blob/master/ssz/simple-serialize.md#typing) except `ProgressiveList`, `ProgressiveContainer`, `ProgressiveBitlist`, `Union`, and `CompatibleUnion`.
### Core Data Structures
#### `SszInfo`
The `SszInfo` structure contains complete structural metadata for an SSZ type:
```go
type SszInfo struct {
sszType SszType // SSZ Type classification
typ reflect.Type // Go reflect.Type
source SSZObject // Original SSZObject reference. Mostly used for reusing SSZ methods like `HashTreeRoot`.
isVariable bool // True if contains variable-size fields
// Composite types have corresponding metadata. Other fields would be nil except for the current type.
containerInfo *containerInfo
listInfo *listInfo
vectorInfo *vectorInfo
bitlistInfo *bitlistInfo
bitvectorInfo *bitvectorInfo
}
```
#### `Path`
The `Path` structure represents navigation paths through SSZ structures. It supports accessing a field by field name, accessing an element by index (list/vector type), and finding the length of homogenous collection types. The `ParsePath` function parses a raw string into a `Path` instance, which is commonly used in other APIs like `CalculateOffsetAndLength` and `GetGeneralizedIndexFromPath`.
```go
type Path struct {
Length bool // Flag for length queries (e.g., len(.field))
Elements []PathElement // Sequence of field accesses and indices
}
type PathElement struct {
Name string // Field name
Index *uint64 // list/vector index (nil if not an index access)
}
```
## Implementation Details
### Type Analysis (`analyzer.go`)
The `AnalyzeObject` function performs recursive type introspection using Go reflection:
1. **Type Inspection** - Examines Go `reflect.Value` to determine SSZ type
- Basic types (`uint8`, `uint16`, `uint32`, `uint64`, `bool`): `SSZType` constants
- Slices: Determined from struct tags (`ssz-size` for vectors, `ssz-max` for lists). There is a related [write-up](https://hackmd.io/@junsong/H101DKnwxl) regarding struct tags.
- Structs: Analyzed as Containers with field ordering from JSON tags
- Pointers: Dereferenced automatically
2. **Variable-Length Population** - Determines actual sizes at runtime
- For lists: Iterates elements, caches sizes for variable-element lists
- For containers: Recursively populates variable fields, adjusts offsets
- For bitlists: Decodes bit length from bitvector
3. **Offset Calculation** - Computes byte positions within serialized data
- Fixed-size fields: Offset = sum of preceding field sizes
- Variable-size fields: Offset stored as 4-byte pointer entries
### Path Parsing (`path.go`)
The `ParsePath` function parses path strings with the following rules:
- **Dot notation**: `.field1.field2` for field access
- **Array indexing**: `[0]`, `[42]` for element access
- **Length queries**: `len(.field)` for list/vector lengths
- **Character set**: Only `[A-Za-z0-9._\[\]\(\)]` allowed
Example:
```go
path, _ := ParsePath(".nested.array_field[5].inner_field")
// Returns: Path{
// Elements: [
// PathElement{Name: "nested"},
// PathElement{Name: "array_field", Index: <Pointer to uint64(5)>},
// PathElement{Name: "inner_field"}
// ]
// }
```
### Generalized Index Calculation (`generalized_index.go`)
The generalized index is a tree position identifier. This package follows the [Ethereum consensus-specs](https://github.com/ethereum/consensus-specs/blob/master/ssz/merkle-proofs.md#generalized-merkle-tree-index) to calculate the generalized index.
### Merkle Proof Generation (`merkle_proof.go`, `proof_collector.go`)
The `Prove` method generates Merkle proofs using a single-sweep merkleization algorithm:
#### Algorithm Overview
**Key Terms:**
- **Target gindex** (generalized index): The position of the SSZ element you want to prove, expressed as a generalized Merkle tree index. Stored in `Proof.Index`.
- Note: The generalized index for root is 1.
- **Registered gindices**: The set of tree positions whose node hashes must be captured during merkleization in order to later assemble the proof.
- **Sibling node**: The node that shares the same parent as another node.
- **Leaf value**: The 32-byte hash of the target node (the node being proven). Stored in `Proof.Leaf`.
**Phases:**
1. **Registration Phase** (`addTarget`)
> Goal: determine exactly which sibling hashes are needed for the proof.
- Record the target gindex as the proof target.
- Starting from the target node, walk the Merkle tree from the leaf (target gindex) to the root (gindex = 1).
- At each step:
- Compute and register the sibling gindex (`i XOR 1`) as “must collect”.
- Move to the parent (`i = i/2`).
- This produces the full set of registered gindices (the sibling nodes on the target-to-root path).
2. **Merkleization Phase** (`merkleize`)
> Goal: recursively merkleize the tree and capture the needed hashes.
- Recursively traverse the SSZ structure and compute Merkle tree node hashes from leaves to root.
- Whenever the traversal computes a node whose gindex is in registered gindices, store that nodes hash for later proof construction.
3. **Proof Assembly Phase** (`toProof`)
> Goal: create the final `fastssz.Proof` object in the correct format and order.
```go
// Proof represents a merkle proof against a general index.
type Proof struct {
Index int
Leaf []byte
Hashes [][]byte
}
```
- Set `Proof.Index` to the target gindex.
- Set `Proof.Leaf` to the 32-byte hash of the target node.
- Build `Proof.Hashes` by walking from the target node up to (but not including) the root:
- At node `i`, append the stored hash for the sibling (`i XOR 1`).
- Move to the parent (`i = i/2`).
- The resulting `Proof.Hashes` is ordered from the target level upward, containing one sibling hash per tree level on the path to the root.

View File

@@ -31,11 +31,6 @@ EXCLUDED_PATH_PREFIXES=(
".vscode"
)
# Gitignore overrides: paths that should still be scanned even if ignored by VCS.
GITIGNORE_OVERRIDES=(
"cmd/beacon-chain/execution"
)
# The logrus import path
LOGRUS_IMPORT="github.com/sirupsen/logrus"
# ----------------------------
@@ -75,14 +70,6 @@ rg_args=(
-0 # NUL-delimited output
)
if [[ ${#GITIGNORE_OVERRIDES[@]} -gt 0 ]]; then
# Disable VCS ignores so overrides are honored.
rg_args+=( --no-ignore-vcs )
for ov in "${GITIGNORE_OVERRIDES[@]}"; do
rg_args+=( --glob "$ov/**" )
done
fi
for ex in "${EXCLUDED_PATH_PREFIXES[@]}"; do
rg_args+=( --glob "!$ex/**" )
done

View File

@@ -6,28 +6,20 @@ import (
"github.com/sirupsen/logrus"
)
type HookIdentifier string
type WriterHook struct {
AllowedLevels []logrus.Level
Writer io.Writer
Formatter logrus.Formatter
Identifier HookIdentifier
}
func (hook *WriterHook) Levels() []logrus.Level {
if len(hook.AllowedLevels) == 0 {
if hook.AllowedLevels == nil || len(hook.AllowedLevels) == 0 {
return logrus.AllLevels
}
return hook.AllowedLevels
}
func (hook *WriterHook) Fire(entry *logrus.Entry) error {
val, ok := entry.Data[LogTargetField]
if ok && val != hook.Identifier {
return nil
}
line, err := hook.Formatter.Format(entry)
if err != nil {
return err

View File

@@ -17,43 +17,11 @@ import (
"gopkg.in/natefinch/lumberjack.v2"
)
var (
userVerbosity = logrus.InfoLevel
vmodule = make(map[string]logrus.Level)
)
var ephemeralLogFileVerbosity = logrus.DebugLevel
const (
ephemeralLogFileVerbosity = logrus.DebugLevel
LogTargetField = "log_target"
LogTargetEphemeral HookIdentifier = "ephemeral"
LogTargetUser HookIdentifier = "user"
)
// SetLoggingLevelAndData sets the base logging level for logrus.
func SetLoggingLevelAndData(baseVerbosity logrus.Level, vmoduleMap map[string]logrus.Level, maxVmoduleLevel logrus.Level, disableEphemeral bool) {
userVerbosity = baseVerbosity
vmodule = vmoduleMap
globalLevel := max(baseVerbosity, maxVmoduleLevel)
if !disableEphemeral {
globalLevel = max(globalLevel, ephemeralLogFileVerbosity)
}
logrus.SetLevel(globalLevel)
}
// PackageVerbosity returns the verbosity of a given package.
func PackageVerbosity(packagePath string) logrus.Level {
bestLen := 0
bestLevel := userVerbosity
for k, v := range vmodule {
if k == packagePath || strings.HasPrefix(packagePath, k+"/") {
if len(k) > bestLen {
bestLen = len(k)
bestLevel = v
}
}
}
return bestLevel
// SetLoggingLevel sets the base logging level for logrus.
func SetLoggingLevel(lvl logrus.Level) {
logrus.SetLevel(max(lvl, ephemeralLogFileVerbosity))
}
func addLogWriter(w io.Writer) {
@@ -100,7 +68,6 @@ func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Le
Formatter: formatter,
Writer: f,
AllowedLevels: logrus.AllLevels[:max(lvl, maxVmoduleLevel)+1],
Identifier: LogTargetUser,
})
logrus.Debug("File logging initialized")
@@ -134,7 +101,6 @@ func ConfigureEphemeralLogFile(datadirPath string, app string) error {
Formatter: formatter,
Writer: debugWriter,
AllowedLevels: logrus.AllLevels[:ephemeralLogFileVerbosity+1],
Identifier: LogTargetEphemeral,
})
logrus.WithField("path", logFilePath).Debug("Ephemeral log file initialized")

View File

@@ -26,21 +26,21 @@ func TestLifecycle(t *testing.T) {
port := 1000 + rand.Intn(1000)
prometheusService := NewService(t.Context(), fmt.Sprintf(":%d", port), nil)
prometheusService.Start()
// Actively wait until the service responds on /metrics (faster and less flaky than a fixed sleep)
deadline := time.Now().Add(3 * time.Second)
for {
if time.Now().After(deadline) {
t.Fatalf("metrics endpoint not ready within timeout")
}
resp, err := http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))
if err == nil {
_ = resp.Body.Close()
if resp.StatusCode == http.StatusOK {
break
}
}
time.Sleep(50 * time.Millisecond)
}
// Actively wait until the service responds on /metrics (faster and less flaky than a fixed sleep)
deadline := time.Now().Add(3 * time.Second)
for {
if time.Now().After(deadline) {
t.Fatalf("metrics endpoint not ready within timeout")
}
resp, err := http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))
if err == nil {
_ = resp.Body.Close()
if resp.StatusCode == http.StatusOK {
break
}
}
time.Sleep(50 * time.Millisecond)
}
// Query the service to ensure it really started.
resp, err := http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))
@@ -49,18 +49,18 @@ func TestLifecycle(t *testing.T) {
err = prometheusService.Stop()
require.NoError(t, err)
// Actively wait until the service stops responding on /metrics
deadline = time.Now().Add(3 * time.Second)
for {
if time.Now().After(deadline) {
t.Fatalf("metrics endpoint still reachable after timeout")
}
_, err = http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))
if err != nil {
break
}
time.Sleep(50 * time.Millisecond)
}
// Actively wait until the service stops responding on /metrics
deadline = time.Now().Add(3 * time.Second)
for {
if time.Now().After(deadline) {
t.Fatalf("metrics endpoint still reachable after timeout")
}
_, err = http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))
if err != nil {
break
}
time.Sleep(50 * time.Millisecond)
}
// Query the service to ensure it really stopped.
_, err = http.Get(fmt.Sprintf("http://localhost:%d/metrics", port))

288
proto/engine/v1/gloas.pb.go generated Executable file
View File

@@ -0,0 +1,288 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.3
// protoc v3.21.7
// source: proto/engine/v1/gloas.proto
package enginev1
import (
reflect "reflect"
sync "sync"
github_com_OffchainLabs_prysm_v6_consensus_types_primitives "github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
_ "github.com/OffchainLabs/prysm/v7/proto/eth/ext"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ExecutionPayloadEnvelope struct {
state protoimpl.MessageState `protogen:"open.v1"`
Payload *ExecutionPayloadDeneb `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"`
ExecutionRequests *ExecutionRequests `protobuf:"bytes,2,opt,name=execution_requests,json=executionRequests,proto3" json:"execution_requests,omitempty"`
BuilderIndex github_com_OffchainLabs_prysm_v6_consensus_types_primitives.ValidatorIndex `protobuf:"varint,3,opt,name=builder_index,json=builderIndex,proto3" json:"builder_index,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.ValidatorIndex"`
BeaconBlockRoot []byte `protobuf:"bytes,4,opt,name=beacon_block_root,json=beaconBlockRoot,proto3" json:"beacon_block_root,omitempty" ssz-size:"32"`
Slot github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Slot `protobuf:"varint,5,opt,name=slot,proto3" json:"slot,omitempty" cast-type:"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Slot"`
BlobKzgCommitments [][]byte `protobuf:"bytes,6,rep,name=blob_kzg_commitments,json=blobKzgCommitments,proto3" json:"blob_kzg_commitments,omitempty" ssz-max:"4096" ssz-size:"?,48"`
StateRoot []byte `protobuf:"bytes,7,opt,name=state_root,json=stateRoot,proto3" json:"state_root,omitempty" ssz-size:"32"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ExecutionPayloadEnvelope) Reset() {
*x = ExecutionPayloadEnvelope{}
mi := &file_proto_engine_v1_gloas_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ExecutionPayloadEnvelope) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExecutionPayloadEnvelope) ProtoMessage() {}
func (x *ExecutionPayloadEnvelope) ProtoReflect() protoreflect.Message {
mi := &file_proto_engine_v1_gloas_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExecutionPayloadEnvelope.ProtoReflect.Descriptor instead.
func (*ExecutionPayloadEnvelope) Descriptor() ([]byte, []int) {
return file_proto_engine_v1_gloas_proto_rawDescGZIP(), []int{0}
}
func (x *ExecutionPayloadEnvelope) GetPayload() *ExecutionPayloadDeneb {
if x != nil {
return x.Payload
}
return nil
}
func (x *ExecutionPayloadEnvelope) GetExecutionRequests() *ExecutionRequests {
if x != nil {
return x.ExecutionRequests
}
return nil
}
func (x *ExecutionPayloadEnvelope) GetBuilderIndex() github_com_OffchainLabs_prysm_v6_consensus_types_primitives.ValidatorIndex {
if x != nil {
return x.BuilderIndex
}
return github_com_OffchainLabs_prysm_v6_consensus_types_primitives.ValidatorIndex(0)
}
func (x *ExecutionPayloadEnvelope) GetBeaconBlockRoot() []byte {
if x != nil {
return x.BeaconBlockRoot
}
return nil
}
func (x *ExecutionPayloadEnvelope) GetSlot() github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Slot {
if x != nil {
return x.Slot
}
return github_com_OffchainLabs_prysm_v6_consensus_types_primitives.Slot(0)
}
func (x *ExecutionPayloadEnvelope) GetBlobKzgCommitments() [][]byte {
if x != nil {
return x.BlobKzgCommitments
}
return nil
}
func (x *ExecutionPayloadEnvelope) GetStateRoot() []byte {
if x != nil {
return x.StateRoot
}
return nil
}
type SignedExecutionPayloadEnvelope struct {
state protoimpl.MessageState `protogen:"open.v1"`
Message *ExecutionPayloadEnvelope `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"`
Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty" ssz-size:"96"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SignedExecutionPayloadEnvelope) Reset() {
*x = SignedExecutionPayloadEnvelope{}
mi := &file_proto_engine_v1_gloas_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SignedExecutionPayloadEnvelope) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SignedExecutionPayloadEnvelope) ProtoMessage() {}
func (x *SignedExecutionPayloadEnvelope) ProtoReflect() protoreflect.Message {
mi := &file_proto_engine_v1_gloas_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SignedExecutionPayloadEnvelope.ProtoReflect.Descriptor instead.
func (*SignedExecutionPayloadEnvelope) Descriptor() ([]byte, []int) {
return file_proto_engine_v1_gloas_proto_rawDescGZIP(), []int{1}
}
func (x *SignedExecutionPayloadEnvelope) GetMessage() *ExecutionPayloadEnvelope {
if x != nil {
return x.Message
}
return nil
}
func (x *SignedExecutionPayloadEnvelope) GetSignature() []byte {
if x != nil {
return x.Signature
}
return nil
}
var File_proto_engine_v1_gloas_proto protoreflect.FileDescriptor
var file_proto_engine_v1_gloas_proto_rawDesc = []byte{
0x0a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2f, 0x76,
0x31, 0x2f, 0x67, 0x6c, 0x6f, 0x61, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x65,
0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x76,
0x31, 0x1a, 0x26, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2f,
0x76, 0x31, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x67,
0x69, 0x6e, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x2f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x65, 0x6c, 0x65, 0x63, 0x74,
0x72, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
0x65, 0x74, 0x68, 0x2f, 0x65, 0x78, 0x74, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa3, 0x04, 0x0a, 0x18, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74,
0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f,
0x70, 0x65, 0x12, 0x43, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65,
0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69,
0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x44, 0x65, 0x6e, 0x65, 0x62, 0x52, 0x07,
0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x54, 0x0a, 0x12, 0x65, 0x78, 0x65, 0x63, 0x75,
0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65,
0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69,
0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x52, 0x11, 0x65, 0x78, 0x65, 0x63,
0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x73, 0x0a,
0x0d, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x03,
0x20, 0x01, 0x28, 0x04, 0x42, 0x4e, 0x82, 0xb5, 0x18, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62,
0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65,
0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69,
0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x49,
0x6e, 0x64, 0x65, 0x78, 0x52, 0x0c, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x64,
0x65, 0x78, 0x12, 0x32, 0x0a, 0x11, 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x5f, 0x62, 0x6c, 0x6f,
0x63, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a,
0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x0f, 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f,
0x63, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x58, 0x0a, 0x04, 0x73, 0x6c, 0x6f, 0x74, 0x18, 0x05,
0x20, 0x01, 0x28, 0x04, 0x42, 0x44, 0x82, 0xb5, 0x18, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62,
0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65,
0x6e, 0x73, 0x75, 0x73, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x69, 0x6d, 0x69,
0x74, 0x69, 0x76, 0x65, 0x73, 0x2e, 0x53, 0x6c, 0x6f, 0x74, 0x52, 0x04, 0x73, 0x6c, 0x6f, 0x74,
0x12, 0x42, 0x0a, 0x14, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x6b, 0x7a, 0x67, 0x5f, 0x63, 0x6f, 0x6d,
0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x10,
0x8a, 0xb5, 0x18, 0x04, 0x3f, 0x2c, 0x34, 0x38, 0x92, 0xb5, 0x18, 0x04, 0x34, 0x30, 0x39, 0x36,
0x52, 0x12, 0x62, 0x6c, 0x6f, 0x62, 0x4b, 0x7a, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d,
0x65, 0x6e, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f,
0x6f, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32,
0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x22, 0x8e, 0x01, 0x0a, 0x1e,
0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50,
0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x12, 0x46,
0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x2c, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e,
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61,
0x79, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x52, 0x07, 0x6d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x24, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x39,
0x36, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x3b, 0x5a, 0x39,
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68,
0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36,
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2f, 0x76, 0x31,
0x3b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
}
var (
file_proto_engine_v1_gloas_proto_rawDescOnce sync.Once
file_proto_engine_v1_gloas_proto_rawDescData = file_proto_engine_v1_gloas_proto_rawDesc
)
func file_proto_engine_v1_gloas_proto_rawDescGZIP() []byte {
file_proto_engine_v1_gloas_proto_rawDescOnce.Do(func() {
file_proto_engine_v1_gloas_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_engine_v1_gloas_proto_rawDescData)
})
return file_proto_engine_v1_gloas_proto_rawDescData
}
var file_proto_engine_v1_gloas_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_proto_engine_v1_gloas_proto_goTypes = []any{
(*ExecutionPayloadEnvelope)(nil), // 0: ethereum.engine.v1.ExecutionPayloadEnvelope
(*SignedExecutionPayloadEnvelope)(nil), // 1: ethereum.engine.v1.SignedExecutionPayloadEnvelope
(*ExecutionPayloadDeneb)(nil), // 2: ethereum.engine.v1.ExecutionPayloadDeneb
(*ExecutionRequests)(nil), // 3: ethereum.engine.v1.ExecutionRequests
}
var file_proto_engine_v1_gloas_proto_depIdxs = []int32{
2, // 0: ethereum.engine.v1.ExecutionPayloadEnvelope.payload:type_name -> ethereum.engine.v1.ExecutionPayloadDeneb
3, // 1: ethereum.engine.v1.ExecutionPayloadEnvelope.execution_requests:type_name -> ethereum.engine.v1.ExecutionRequests
0, // 2: ethereum.engine.v1.SignedExecutionPayloadEnvelope.message:type_name -> ethereum.engine.v1.ExecutionPayloadEnvelope
3, // [3:3] is the sub-list for method output_type
3, // [3:3] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_proto_engine_v1_gloas_proto_init() }
func file_proto_engine_v1_gloas_proto_init() {
if File_proto_engine_v1_gloas_proto != nil {
return
}
file_proto_engine_v1_execution_engine_proto_init()
file_proto_engine_v1_electra_proto_init()
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_proto_engine_v1_gloas_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_proto_engine_v1_gloas_proto_goTypes,
DependencyIndexes: file_proto_engine_v1_gloas_proto_depIdxs,
MessageInfos: file_proto_engine_v1_gloas_proto_msgTypes,
}.Build()
File_proto_engine_v1_gloas_proto = out.File
file_proto_engine_v1_gloas_proto_rawDesc = nil
file_proto_engine_v1_gloas_proto_goTypes = nil
file_proto_engine_v1_gloas_proto_depIdxs = nil
}

View File

@@ -144,17 +144,15 @@ func copySignedExecutionPayloadBid(header *SignedExecutionPayloadBid) *SignedExe
}
if header.Message != nil {
copied.Message = &ExecutionPayloadBid{
ParentBlockHash: bytesutil.SafeCopyBytes(header.Message.ParentBlockHash),
ParentBlockRoot: bytesutil.SafeCopyBytes(header.Message.ParentBlockRoot),
BlockHash: bytesutil.SafeCopyBytes(header.Message.BlockHash),
PrevRandao: bytesutil.SafeCopyBytes(header.Message.PrevRandao),
FeeRecipient: bytesutil.SafeCopyBytes(header.Message.FeeRecipient),
GasLimit: header.Message.GasLimit,
BuilderIndex: header.Message.BuilderIndex,
Slot: header.Message.Slot,
Value: header.Message.Value,
ExecutionPayment: header.Message.ExecutionPayment,
BlobKzgCommitments: bytesutil.SafeCopy2dBytes(header.Message.BlobKzgCommitments),
ParentBlockHash: bytesutil.SafeCopyBytes(header.Message.ParentBlockHash),
ParentBlockRoot: bytesutil.SafeCopyBytes(header.Message.ParentBlockRoot),
BlockHash: bytesutil.SafeCopyBytes(header.Message.BlockHash),
FeeRecipient: bytesutil.SafeCopyBytes(header.Message.FeeRecipient),
GasLimit: header.Message.GasLimit,
BuilderIndex: header.Message.BuilderIndex,
Slot: header.Message.Slot,
Value: header.Message.Value,
BlobKzgCommitmentsRoot: bytesutil.SafeCopyBytes(header.Message.BlobKzgCommitmentsRoot),
}
}
return copied

View File

@@ -1215,16 +1215,15 @@ func genSignedExecutionPayloadBidGloas() *v1alpha1.SignedExecutionPayloadBid {
func genExecutionPayloadBidGloas() *v1alpha1.ExecutionPayloadBid {
return &v1alpha1.ExecutionPayloadBid{
ParentBlockHash: bytes(32),
ParentBlockRoot: bytes(32),
BlockHash: bytes(32),
FeeRecipient: bytes(20),
GasLimit: rand.Uint64(),
BuilderIndex: primitives.BuilderIndex(rand.Uint64()),
Slot: primitives.Slot(rand.Uint64()),
Value: primitives.Gwei(rand.Uint64()),
ExecutionPayment: primitives.Gwei(rand.Uint64()),
BlobKzgCommitments: [][]byte{bytes(48)},
ParentBlockHash: bytes(32),
ParentBlockRoot: bytes(32),
BlockHash: bytes(32),
FeeRecipient: bytes(20),
GasLimit: rand.Uint64(),
BuilderIndex: primitives.BuilderIndex(rand.Uint64()),
Slot: primitives.Slot(rand.Uint64()),
Value: primitives.Gwei(rand.Uint64()),
BlobKzgCommitmentsRoot: bytes(32),
}
}

View File

@@ -10,17 +10,17 @@ func (header *ExecutionPayloadBid) Copy() *ExecutionPayloadBid {
return nil
}
return &ExecutionPayloadBid{
ParentBlockHash: bytesutil.SafeCopyBytes(header.ParentBlockHash),
ParentBlockRoot: bytesutil.SafeCopyBytes(header.ParentBlockRoot),
BlockHash: bytesutil.SafeCopyBytes(header.BlockHash),
PrevRandao: bytesutil.SafeCopyBytes(header.PrevRandao),
FeeRecipient: bytesutil.SafeCopyBytes(header.FeeRecipient),
GasLimit: header.GasLimit,
BuilderIndex: header.BuilderIndex,
Slot: header.Slot,
Value: header.Value,
ExecutionPayment: header.ExecutionPayment,
BlobKzgCommitments: bytesutil.SafeCopy2dBytes(header.BlobKzgCommitments),
ParentBlockHash: bytesutil.SafeCopyBytes(header.ParentBlockHash),
ParentBlockRoot: bytesutil.SafeCopyBytes(header.ParentBlockRoot),
BlockHash: bytesutil.SafeCopyBytes(header.BlockHash),
PrevRandao: bytesutil.SafeCopyBytes(header.PrevRandao),
FeeRecipient: bytesutil.SafeCopyBytes(header.FeeRecipient),
GasLimit: header.GasLimit,
BuilderIndex: header.BuilderIndex,
Slot: header.Slot,
Value: header.Value,
ExecutionPayment: header.ExecutionPayment,
BlobKzgCommitmentsRoot: bytesutil.SafeCopyBytes(header.BlobKzgCommitmentsRoot),
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -33,7 +33,7 @@ option go_package = "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1;eth";
// slot: Slot
// value: Gwei
// execution_payment: Gwei
// blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
// blob_kzg_commitments_root: Root
message ExecutionPayloadBid {
bytes parent_block_hash = 1 [ (ethereum.eth.ext.ssz_size) = "32" ];
bytes parent_block_root = 2 [ (ethereum.eth.ext.ssz_size) = "32" ];
@@ -56,10 +56,7 @@ message ExecutionPayloadBid {
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Gwei"
];
repeated bytes blob_kzg_commitments = 11 [
(ethereum.eth.ext.ssz_size) = "?,48",
(ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"
];
bytes blob_kzg_commitments_root = 11 [ (ethereum.eth.ext.ssz_size) = "32" ];
}
// SignedExecutionPayloadBid wraps an execution payload bid with a signature.
@@ -369,6 +366,7 @@ message BuilderPendingWithdrawal {
// class DataColumnSidecar(Container):
// index: ColumnIndex
// column: List[Cell, MAX_BLOB_COMMITMENTS_PER_BLOCK]
// kzg_commitents: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
// kzg_proofs: List[KZGProof, MAX_BLOB_COMMITMENTS_PER_BLOCK]
// slot: Slot
// beacon_block_root: Root
@@ -378,6 +376,10 @@ message DataColumnSidecarGloas {
(ethereum.eth.ext.ssz_size) = "?,bytes_per_cell.size",
(ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"
];
repeated bytes kzg_commitments = 3 [
(ethereum.eth.ext.ssz_size) = "?,48",
(ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"
];
repeated bytes kzg_proofs = 4 [
(ethereum.eth.ext.ssz_size) = "?,48",
(ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"
@@ -400,6 +402,7 @@ message DataColumnSidecarGloas {
// builder_index: BuilderIndex
// beacon_block_root: Root
// slot: Slot
// blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
// state_root: Root
message ExecutionPayloadEnvelope {
ethereum.engine.v1.ExecutionPayloadDeneb payload = 1;
@@ -412,7 +415,11 @@ message ExecutionPayloadEnvelope {
(ethereum.eth.ext.cast_type) =
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives.Slot"
];
bytes state_root = 6 [ (ethereum.eth.ext.ssz_size) = "32" ];
repeated bytes blob_kzg_commitments = 6 [
(ethereum.eth.ext.ssz_size) = "?,48",
(ethereum.eth.ext.ssz_max) = "max_blob_commitments.size"
];
bytes state_root = 7 [ (ethereum.eth.ext.ssz_size) = "32" ];
}
// SignedExecutionPayloadEnvelope wraps an execution payload envelope with a signature.

View File

@@ -15,7 +15,6 @@ func (e *ExecutionPayloadBid) MarshalSSZ() ([]byte, error) {
// MarshalSSZTo ssz marshals the ExecutionPayloadBid object to a target array
func (e *ExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(192)
// Field (0) 'ParentBlockHash'
if size := len(e.ParentBlockHash); size != 32 {
@@ -67,22 +66,12 @@ func (e *ExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err error) {
// Field (9) 'ExecutionPayment'
dst = ssz.MarshalUint64(dst, uint64(e.ExecutionPayment))
// Offset (10) 'BlobKzgCommitments'
dst = ssz.WriteOffset(dst, offset)
offset += len(e.BlobKzgCommitments) * 48
// Field (10) 'BlobKzgCommitments'
if size := len(e.BlobKzgCommitments); size > 4096 {
err = ssz.ErrListTooBigFn("--.BlobKzgCommitments", size, 4096)
// Field (10) 'BlobKzgCommitmentsRoot'
if size := len(e.BlobKzgCommitmentsRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlobKzgCommitmentsRoot", size, 32)
return
}
for ii := 0; ii < len(e.BlobKzgCommitments); ii++ {
if size := len(e.BlobKzgCommitments[ii]); size != 48 {
err = ssz.ErrBytesLengthFn("--.BlobKzgCommitments[ii]", size, 48)
return
}
dst = append(dst, e.BlobKzgCommitments[ii]...)
}
dst = append(dst, e.BlobKzgCommitmentsRoot...)
return
}
@@ -91,13 +80,10 @@ func (e *ExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err error) {
func (e *ExecutionPayloadBid) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 192 {
if size != 220 {
return ssz.ErrSize
}
tail := buf
var o10 uint64
// Field (0) 'ParentBlockHash'
if cap(e.ParentBlockHash) == 0 {
e.ParentBlockHash = make([]byte, 0, len(buf[0:32]))
@@ -143,40 +129,18 @@ func (e *ExecutionPayloadBid) UnmarshalSSZ(buf []byte) error {
// Field (9) 'ExecutionPayment'
e.ExecutionPayment = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[180:188]))
// Offset (10) 'BlobKzgCommitments'
if o10 = ssz.ReadOffset(buf[188:192]); o10 > size {
return ssz.ErrOffset
// Field (10) 'BlobKzgCommitmentsRoot'
if cap(e.BlobKzgCommitmentsRoot) == 0 {
e.BlobKzgCommitmentsRoot = make([]byte, 0, len(buf[188:220]))
}
e.BlobKzgCommitmentsRoot = append(e.BlobKzgCommitmentsRoot, buf[188:220]...)
if o10 != 192 {
return ssz.ErrInvalidVariableOffset
}
// Field (10) 'BlobKzgCommitments'
{
buf = tail[o10:]
num, err := ssz.DivideInt2(len(buf), 48, 4096)
if err != nil {
return err
}
e.BlobKzgCommitments = make([][]byte, num)
for ii := 0; ii < num; ii++ {
if cap(e.BlobKzgCommitments[ii]) == 0 {
e.BlobKzgCommitments[ii] = make([]byte, 0, len(buf[ii*48:(ii+1)*48]))
}
e.BlobKzgCommitments[ii] = append(e.BlobKzgCommitments[ii], buf[ii*48:(ii+1)*48]...)
}
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the ExecutionPayloadBid object
func (e *ExecutionPayloadBid) SizeSSZ() (size int) {
size = 192
// Field (10) 'BlobKzgCommitments'
size += len(e.BlobKzgCommitments) * 48
size = 220
return
}
@@ -239,24 +203,12 @@ func (e *ExecutionPayloadBid) HashTreeRootWith(hh *ssz.Hasher) (err error) {
// Field (9) 'ExecutionPayment'
hh.PutUint64(uint64(e.ExecutionPayment))
// Field (10) 'BlobKzgCommitments'
{
if size := len(e.BlobKzgCommitments); size > 4096 {
err = ssz.ErrListTooBigFn("--.BlobKzgCommitments", size, 4096)
return
}
subIndx := hh.Index()
for _, i := range e.BlobKzgCommitments {
if len(i) != 48 {
err = ssz.ErrBytesLength
return
}
hh.PutBytes(i)
}
numItems := uint64(len(e.BlobKzgCommitments))
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
// Field (10) 'BlobKzgCommitmentsRoot'
if size := len(e.BlobKzgCommitmentsRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.BlobKzgCommitmentsRoot", size, 32)
return
}
hh.PutBytes(e.BlobKzgCommitmentsRoot)
hh.Merkleize(indx)
return
@@ -270,14 +222,14 @@ func (s *SignedExecutionPayloadBid) MarshalSSZ() ([]byte, error) {
// MarshalSSZTo ssz marshals the SignedExecutionPayloadBid object to a target array
func (s *SignedExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(100)
// Offset (0) 'Message'
dst = ssz.WriteOffset(dst, offset)
// Field (0) 'Message'
if s.Message == nil {
s.Message = new(ExecutionPayloadBid)
}
offset += s.Message.SizeSSZ()
if dst, err = s.Message.MarshalSSZTo(dst); err != nil {
return
}
// Field (1) 'Signature'
if size := len(s.Signature); size != 96 {
@@ -286,11 +238,6 @@ func (s *SignedExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err er
}
dst = append(dst, s.Signature...)
// Field (0) 'Message'
if dst, err = s.Message.MarshalSSZTo(dst); err != nil {
return
}
return
}
@@ -298,51 +245,30 @@ func (s *SignedExecutionPayloadBid) MarshalSSZTo(buf []byte) (dst []byte, err er
func (s *SignedExecutionPayloadBid) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 100 {
if size != 316 {
return ssz.ErrSize
}
tail := buf
var o0 uint64
// Offset (0) 'Message'
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
return ssz.ErrOffset
}
if o0 != 100 {
return ssz.ErrInvalidVariableOffset
}
// Field (1) 'Signature'
if cap(s.Signature) == 0 {
s.Signature = make([]byte, 0, len(buf[4:100]))
}
s.Signature = append(s.Signature, buf[4:100]...)
// Field (0) 'Message'
{
buf = tail[o0:]
if s.Message == nil {
s.Message = new(ExecutionPayloadBid)
}
if err = s.Message.UnmarshalSSZ(buf); err != nil {
return err
}
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the SignedExecutionPayloadBid object
func (s *SignedExecutionPayloadBid) SizeSSZ() (size int) {
size = 100
// Field (0) 'Message'
if s.Message == nil {
s.Message = new(ExecutionPayloadBid)
}
size += s.Message.SizeSSZ()
if err = s.Message.UnmarshalSSZ(buf[0:220]); err != nil {
return err
}
// Field (1) 'Signature'
if cap(s.Signature) == 0 {
s.Signature = make([]byte, 0, len(buf[220:316]))
}
s.Signature = append(s.Signature, buf[220:316]...)
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the SignedExecutionPayloadBid object
func (s *SignedExecutionPayloadBid) SizeSSZ() (size int) {
size = 316
return
}
@@ -816,7 +742,7 @@ func (b *BeaconBlockBodyGloas) MarshalSSZ() ([]byte, error) {
// MarshalSSZTo ssz marshals the BeaconBlockBodyGloas object to a target array
func (b *BeaconBlockBodyGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(392)
offset := int(704)
// Field (0) 'RandaoReveal'
if size := len(b.RandaoReveal); size != 96 {
@@ -878,12 +804,13 @@ func (b *BeaconBlockBodyGloas) MarshalSSZTo(buf []byte) (dst []byte, err error)
dst = ssz.WriteOffset(dst, offset)
offset += len(b.BlsToExecutionChanges) * 172
// Offset (10) 'SignedExecutionPayloadBid'
dst = ssz.WriteOffset(dst, offset)
// Field (10) 'SignedExecutionPayloadBid'
if b.SignedExecutionPayloadBid == nil {
b.SignedExecutionPayloadBid = new(SignedExecutionPayloadBid)
}
offset += b.SignedExecutionPayloadBid.SizeSSZ()
if dst, err = b.SignedExecutionPayloadBid.MarshalSSZTo(dst); err != nil {
return
}
// Offset (11) 'PayloadAttestations'
dst = ssz.WriteOffset(dst, offset)
@@ -969,11 +896,6 @@ func (b *BeaconBlockBodyGloas) MarshalSSZTo(buf []byte) (dst []byte, err error)
}
}
// Field (10) 'SignedExecutionPayloadBid'
if dst, err = b.SignedExecutionPayloadBid.MarshalSSZTo(dst); err != nil {
return
}
// Field (11) 'PayloadAttestations'
if size := len(b.PayloadAttestations); size > 4 {
err = ssz.ErrListTooBigFn("--.PayloadAttestations", size, 4)
@@ -992,12 +914,12 @@ func (b *BeaconBlockBodyGloas) MarshalSSZTo(buf []byte) (dst []byte, err error)
func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 392 {
if size < 704 {
return ssz.ErrSize
}
tail := buf
var o3, o4, o5, o6, o7, o9, o10, o11 uint64
var o3, o4, o5, o6, o7, o9, o11 uint64
// Field (0) 'RandaoReveal'
if cap(b.RandaoReveal) == 0 {
@@ -1024,7 +946,7 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o3 != 392 {
if o3 != 704 {
return ssz.ErrInvalidVariableOffset
}
@@ -1061,13 +983,16 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
// Offset (10) 'SignedExecutionPayloadBid'
if o10 = ssz.ReadOffset(buf[384:388]); o10 > size || o9 > o10 {
return ssz.ErrOffset
// Field (10) 'SignedExecutionPayloadBid'
if b.SignedExecutionPayloadBid == nil {
b.SignedExecutionPayloadBid = new(SignedExecutionPayloadBid)
}
if err = b.SignedExecutionPayloadBid.UnmarshalSSZ(buf[384:700]); err != nil {
return err
}
// Offset (11) 'PayloadAttestations'
if o11 = ssz.ReadOffset(buf[388:392]); o11 > size || o10 > o11 {
if o11 = ssz.ReadOffset(buf[700:704]); o11 > size || o9 > o11 {
return ssz.ErrOffset
}
@@ -1171,7 +1096,7 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
// Field (9) 'BlsToExecutionChanges'
{
buf = tail[o9:o10]
buf = tail[o9:o11]
num, err := ssz.DivideInt2(len(buf), 172, 16)
if err != nil {
return err
@@ -1187,17 +1112,6 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
}
}
// Field (10) 'SignedExecutionPayloadBid'
{
buf = tail[o10:o11]
if b.SignedExecutionPayloadBid == nil {
b.SignedExecutionPayloadBid = new(SignedExecutionPayloadBid)
}
if err = b.SignedExecutionPayloadBid.UnmarshalSSZ(buf); err != nil {
return err
}
}
// Field (11) 'PayloadAttestations'
{
buf = tail[o11:]
@@ -1220,7 +1134,7 @@ func (b *BeaconBlockBodyGloas) UnmarshalSSZ(buf []byte) error {
// SizeSSZ returns the ssz encoded size in bytes for the BeaconBlockBodyGloas object
func (b *BeaconBlockBodyGloas) SizeSSZ() (size int) {
size = 392
size = 704
// Field (3) 'ProposerSlashings'
size += len(b.ProposerSlashings) * 416
@@ -1246,12 +1160,6 @@ func (b *BeaconBlockBodyGloas) SizeSSZ() (size int) {
// Field (9) 'BlsToExecutionChanges'
size += len(b.BlsToExecutionChanges) * 172
// Field (10) 'SignedExecutionPayloadBid'
if b.SignedExecutionPayloadBid == nil {
b.SignedExecutionPayloadBid = new(SignedExecutionPayloadBid)
}
size += b.SignedExecutionPayloadBid.SizeSSZ()
// Field (11) 'PayloadAttestations'
size += len(b.PayloadAttestations) * 202
@@ -1529,7 +1437,7 @@ func (b *BeaconStateGloas) MarshalSSZ() ([]byte, error) {
// MarshalSSZTo ssz marshals the BeaconStateGloas object to a target array
func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(2741117)
offset := int(2741333)
// Field (0) 'GenesisTime'
dst = ssz.MarshalUint64(dst, b.GenesisTime)
@@ -1694,12 +1602,13 @@ func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
return
}
// Offset (24) 'LatestExecutionPayloadBid'
dst = ssz.WriteOffset(dst, offset)
// Field (24) 'LatestExecutionPayloadBid'
if b.LatestExecutionPayloadBid == nil {
b.LatestExecutionPayloadBid = new(ExecutionPayloadBid)
}
offset += b.LatestExecutionPayloadBid.SizeSSZ()
if dst, err = b.LatestExecutionPayloadBid.MarshalSSZTo(dst); err != nil {
return
}
// Field (25) 'NextWithdrawalIndex'
dst = ssz.MarshalUint64(dst, b.NextWithdrawalIndex)
@@ -1857,11 +1766,6 @@ func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = ssz.MarshalUint64(dst, b.InactivityScores[ii])
}
// Field (24) 'LatestExecutionPayloadBid'
if dst, err = b.LatestExecutionPayloadBid.MarshalSSZTo(dst); err != nil {
return
}
// Field (27) 'HistoricalSummaries'
if size := len(b.HistoricalSummaries); size > 16777216 {
err = ssz.ErrListTooBigFn("--.HistoricalSummaries", size, 16777216)
@@ -1946,12 +1850,12 @@ func (b *BeaconStateGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 2741117 {
if size < 2741333 {
return ssz.ErrSize
}
tail := buf
var o7, o9, o11, o12, o15, o16, o21, o24, o27, o34, o35, o36, o38, o42, o44 uint64
var o7, o9, o11, o12, o15, o16, o21, o27, o34, o35, o36, o38, o42, o44 uint64
// Field (0) 'GenesisTime'
b.GenesisTime = ssz.UnmarshallUint64(buf[0:8])
@@ -2004,7 +1908,7 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o7 != 2741117 {
if o7 != 2741333 {
return ssz.ErrInvalidVariableOffset
}
@@ -2110,74 +2014,77 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
return err
}
// Offset (24) 'LatestExecutionPayloadBid'
if o24 = ssz.ReadOffset(buf[2736629:2736633]); o24 > size || o21 > o24 {
return ssz.ErrOffset
// Field (24) 'LatestExecutionPayloadBid'
if b.LatestExecutionPayloadBid == nil {
b.LatestExecutionPayloadBid = new(ExecutionPayloadBid)
}
if err = b.LatestExecutionPayloadBid.UnmarshalSSZ(buf[2736629:2736849]); err != nil {
return err
}
// Field (25) 'NextWithdrawalIndex'
b.NextWithdrawalIndex = ssz.UnmarshallUint64(buf[2736633:2736641])
b.NextWithdrawalIndex = ssz.UnmarshallUint64(buf[2736849:2736857])
// Field (26) 'NextWithdrawalValidatorIndex'
b.NextWithdrawalValidatorIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ValidatorIndex(ssz.UnmarshallUint64(buf[2736641:2736649]))
b.NextWithdrawalValidatorIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.ValidatorIndex(ssz.UnmarshallUint64(buf[2736857:2736865]))
// Offset (27) 'HistoricalSummaries'
if o27 = ssz.ReadOffset(buf[2736649:2736653]); o27 > size || o24 > o27 {
if o27 = ssz.ReadOffset(buf[2736865:2736869]); o27 > size || o21 > o27 {
return ssz.ErrOffset
}
// Field (28) 'DepositRequestsStartIndex'
b.DepositRequestsStartIndex = ssz.UnmarshallUint64(buf[2736653:2736661])
b.DepositRequestsStartIndex = ssz.UnmarshallUint64(buf[2736869:2736877])
// Field (29) 'DepositBalanceToConsume'
b.DepositBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736661:2736669]))
b.DepositBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736877:2736885]))
// Field (30) 'ExitBalanceToConsume'
b.ExitBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736669:2736677]))
b.ExitBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736885:2736893]))
// Field (31) 'EarliestExitEpoch'
b.EarliestExitEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[2736677:2736685]))
b.EarliestExitEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[2736893:2736901]))
// Field (32) 'ConsolidationBalanceToConsume'
b.ConsolidationBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736685:2736693]))
b.ConsolidationBalanceToConsume = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Gwei(ssz.UnmarshallUint64(buf[2736901:2736909]))
// Field (33) 'EarliestConsolidationEpoch'
b.EarliestConsolidationEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[2736693:2736701]))
b.EarliestConsolidationEpoch = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Epoch(ssz.UnmarshallUint64(buf[2736909:2736917]))
// Offset (34) 'PendingDeposits'
if o34 = ssz.ReadOffset(buf[2736701:2736705]); o34 > size || o27 > o34 {
if o34 = ssz.ReadOffset(buf[2736917:2736921]); o34 > size || o27 > o34 {
return ssz.ErrOffset
}
// Offset (35) 'PendingPartialWithdrawals'
if o35 = ssz.ReadOffset(buf[2736705:2736709]); o35 > size || o34 > o35 {
if o35 = ssz.ReadOffset(buf[2736921:2736925]); o35 > size || o34 > o35 {
return ssz.ErrOffset
}
// Offset (36) 'PendingConsolidations'
if o36 = ssz.ReadOffset(buf[2736709:2736713]); o36 > size || o35 > o36 {
if o36 = ssz.ReadOffset(buf[2736925:2736929]); o36 > size || o35 > o36 {
return ssz.ErrOffset
}
// Field (37) 'ProposerLookahead'
b.ProposerLookahead = ssz.ExtendUint64(b.ProposerLookahead, 64)
for ii := 0; ii < 64; ii++ {
b.ProposerLookahead[ii] = ssz.UnmarshallUint64(buf[2736713:2737225][ii*8 : (ii+1)*8])
b.ProposerLookahead[ii] = ssz.UnmarshallUint64(buf[2736929:2737441][ii*8 : (ii+1)*8])
}
// Offset (38) 'Builders'
if o38 = ssz.ReadOffset(buf[2737225:2737229]); o38 > size || o36 > o38 {
if o38 = ssz.ReadOffset(buf[2737441:2737445]); o38 > size || o36 > o38 {
return ssz.ErrOffset
}
// Field (39) 'NextWithdrawalBuilderIndex'
b.NextWithdrawalBuilderIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.BuilderIndex(ssz.UnmarshallUint64(buf[2737229:2737237]))
b.NextWithdrawalBuilderIndex = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.BuilderIndex(ssz.UnmarshallUint64(buf[2737445:2737453]))
// Field (40) 'ExecutionPayloadAvailability'
if cap(b.ExecutionPayloadAvailability) == 0 {
b.ExecutionPayloadAvailability = make([]byte, 0, len(buf[2737237:2738261]))
b.ExecutionPayloadAvailability = make([]byte, 0, len(buf[2737453:2738477]))
}
b.ExecutionPayloadAvailability = append(b.ExecutionPayloadAvailability, buf[2737237:2738261]...)
b.ExecutionPayloadAvailability = append(b.ExecutionPayloadAvailability, buf[2737453:2738477]...)
// Field (41) 'BuilderPendingPayments'
b.BuilderPendingPayments = make([]*BuilderPendingPayment, 64)
@@ -2185,24 +2092,24 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
if b.BuilderPendingPayments[ii] == nil {
b.BuilderPendingPayments[ii] = new(BuilderPendingPayment)
}
if err = b.BuilderPendingPayments[ii].UnmarshalSSZ(buf[2738261:2741077][ii*44 : (ii+1)*44]); err != nil {
if err = b.BuilderPendingPayments[ii].UnmarshalSSZ(buf[2738477:2741293][ii*44 : (ii+1)*44]); err != nil {
return err
}
}
// Offset (42) 'BuilderPendingWithdrawals'
if o42 = ssz.ReadOffset(buf[2741077:2741081]); o42 > size || o38 > o42 {
if o42 = ssz.ReadOffset(buf[2741293:2741297]); o42 > size || o38 > o42 {
return ssz.ErrOffset
}
// Field (43) 'LatestBlockHash'
if cap(b.LatestBlockHash) == 0 {
b.LatestBlockHash = make([]byte, 0, len(buf[2741081:2741113]))
b.LatestBlockHash = make([]byte, 0, len(buf[2741297:2741329]))
}
b.LatestBlockHash = append(b.LatestBlockHash, buf[2741081:2741113]...)
b.LatestBlockHash = append(b.LatestBlockHash, buf[2741297:2741329]...)
// Offset (44) 'PayloadExpectedWithdrawals'
if o44 = ssz.ReadOffset(buf[2741113:2741117]); o44 > size || o42 > o44 {
if o44 = ssz.ReadOffset(buf[2741329:2741333]); o44 > size || o42 > o44 {
return ssz.ErrOffset
}
@@ -2297,7 +2204,7 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
// Field (21) 'InactivityScores'
{
buf = tail[o21:o24]
buf = tail[o21:o27]
num, err := ssz.DivideInt2(len(buf), 8, 1099511627776)
if err != nil {
return err
@@ -2308,17 +2215,6 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
}
}
// Field (24) 'LatestExecutionPayloadBid'
{
buf = tail[o24:o27]
if b.LatestExecutionPayloadBid == nil {
b.LatestExecutionPayloadBid = new(ExecutionPayloadBid)
}
if err = b.LatestExecutionPayloadBid.UnmarshalSSZ(buf); err != nil {
return err
}
}
// Field (27) 'HistoricalSummaries'
{
buf = tail[o27:o34]
@@ -2449,7 +2345,7 @@ func (b *BeaconStateGloas) UnmarshalSSZ(buf []byte) error {
// SizeSSZ returns the ssz encoded size in bytes for the BeaconStateGloas object
func (b *BeaconStateGloas) SizeSSZ() (size int) {
size = 2741117
size = 2741333
// Field (7) 'HistoricalRoots'
size += len(b.HistoricalRoots) * 32
@@ -2472,12 +2368,6 @@ func (b *BeaconStateGloas) SizeSSZ() (size int) {
// Field (21) 'InactivityScores'
size += len(b.InactivityScores) * 8
// Field (24) 'LatestExecutionPayloadBid'
if b.LatestExecutionPayloadBid == nil {
b.LatestExecutionPayloadBid = new(ExecutionPayloadBid)
}
size += b.LatestExecutionPayloadBid.SizeSSZ()
// Field (27) 'HistoricalSummaries'
size += len(b.HistoricalSummaries) * 64
@@ -3091,7 +2981,7 @@ func (d *DataColumnSidecarGloas) MarshalSSZ() ([]byte, error) {
// MarshalSSZTo ssz marshals the DataColumnSidecarGloas object to a target array
func (d *DataColumnSidecarGloas) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(56)
offset := int(60)
// Field (0) 'Index'
dst = ssz.MarshalUint64(dst, d.Index)
@@ -3100,14 +2990,18 @@ func (d *DataColumnSidecarGloas) MarshalSSZTo(buf []byte) (dst []byte, err error
dst = ssz.WriteOffset(dst, offset)
offset += len(d.Column) * 2048
// Offset (2) 'KzgProofs'
// Offset (2) 'KzgCommitments'
dst = ssz.WriteOffset(dst, offset)
offset += len(d.KzgCommitments) * 48
// Offset (3) 'KzgProofs'
dst = ssz.WriteOffset(dst, offset)
offset += len(d.KzgProofs) * 48
// Field (3) 'Slot'
// Field (4) 'Slot'
dst = ssz.MarshalUint64(dst, uint64(d.Slot))
// Field (4) 'BeaconBlockRoot'
// Field (5) 'BeaconBlockRoot'
if size := len(d.BeaconBlockRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.BeaconBlockRoot", size, 32)
return
@@ -3127,7 +3021,20 @@ func (d *DataColumnSidecarGloas) MarshalSSZTo(buf []byte) (dst []byte, err error
dst = append(dst, d.Column[ii]...)
}
// Field (2) 'KzgProofs'
// Field (2) 'KzgCommitments'
if size := len(d.KzgCommitments); size > 4096 {
err = ssz.ErrListTooBigFn("--.KzgCommitments", size, 4096)
return
}
for ii := 0; ii < len(d.KzgCommitments); ii++ {
if size := len(d.KzgCommitments[ii]); size != 48 {
err = ssz.ErrBytesLengthFn("--.KzgCommitments[ii]", size, 48)
return
}
dst = append(dst, d.KzgCommitments[ii]...)
}
// Field (3) 'KzgProofs'
if size := len(d.KzgProofs); size > 4096 {
err = ssz.ErrListTooBigFn("--.KzgProofs", size, 4096)
return
@@ -3147,12 +3054,12 @@ func (d *DataColumnSidecarGloas) MarshalSSZTo(buf []byte) (dst []byte, err error
func (d *DataColumnSidecarGloas) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 56 {
if size < 60 {
return ssz.ErrSize
}
tail := buf
var o1, o2 uint64
var o1, o2, o3 uint64
// Field (0) 'Index'
d.Index = ssz.UnmarshallUint64(buf[0:8])
@@ -3162,23 +3069,28 @@ func (d *DataColumnSidecarGloas) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o1 != 56 {
if o1 != 60 {
return ssz.ErrInvalidVariableOffset
}
// Offset (2) 'KzgProofs'
// Offset (2) 'KzgCommitments'
if o2 = ssz.ReadOffset(buf[12:16]); o2 > size || o1 > o2 {
return ssz.ErrOffset
}
// Field (3) 'Slot'
d.Slot = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[16:24]))
// Field (4) 'BeaconBlockRoot'
if cap(d.BeaconBlockRoot) == 0 {
d.BeaconBlockRoot = make([]byte, 0, len(buf[24:56]))
// Offset (3) 'KzgProofs'
if o3 = ssz.ReadOffset(buf[16:20]); o3 > size || o2 > o3 {
return ssz.ErrOffset
}
d.BeaconBlockRoot = append(d.BeaconBlockRoot, buf[24:56]...)
// Field (4) 'Slot'
d.Slot = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[20:28]))
// Field (5) 'BeaconBlockRoot'
if cap(d.BeaconBlockRoot) == 0 {
d.BeaconBlockRoot = make([]byte, 0, len(buf[28:60]))
}
d.BeaconBlockRoot = append(d.BeaconBlockRoot, buf[28:60]...)
// Field (1) 'Column'
{
@@ -3196,9 +3108,25 @@ func (d *DataColumnSidecarGloas) UnmarshalSSZ(buf []byte) error {
}
}
// Field (2) 'KzgProofs'
// Field (2) 'KzgCommitments'
{
buf = tail[o2:]
buf = tail[o2:o3]
num, err := ssz.DivideInt2(len(buf), 48, 4096)
if err != nil {
return err
}
d.KzgCommitments = make([][]byte, num)
for ii := 0; ii < num; ii++ {
if cap(d.KzgCommitments[ii]) == 0 {
d.KzgCommitments[ii] = make([]byte, 0, len(buf[ii*48:(ii+1)*48]))
}
d.KzgCommitments[ii] = append(d.KzgCommitments[ii], buf[ii*48:(ii+1)*48]...)
}
}
// Field (3) 'KzgProofs'
{
buf = tail[o3:]
num, err := ssz.DivideInt2(len(buf), 48, 4096)
if err != nil {
return err
@@ -3216,12 +3144,15 @@ func (d *DataColumnSidecarGloas) UnmarshalSSZ(buf []byte) error {
// SizeSSZ returns the ssz encoded size in bytes for the DataColumnSidecarGloas object
func (d *DataColumnSidecarGloas) SizeSSZ() (size int) {
size = 56
size = 60
// Field (1) 'Column'
size += len(d.Column) * 2048
// Field (2) 'KzgProofs'
// Field (2) 'KzgCommitments'
size += len(d.KzgCommitments) * 48
// Field (3) 'KzgProofs'
size += len(d.KzgProofs) * 48
return
@@ -3258,7 +3189,26 @@ func (d *DataColumnSidecarGloas) HashTreeRootWith(hh *ssz.Hasher) (err error) {
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
// Field (2) 'KzgProofs'
// Field (2) 'KzgCommitments'
{
if size := len(d.KzgCommitments); size > 4096 {
err = ssz.ErrListTooBigFn("--.KzgCommitments", size, 4096)
return
}
subIndx := hh.Index()
for _, i := range d.KzgCommitments {
if len(i) != 48 {
err = ssz.ErrBytesLength
return
}
hh.PutBytes(i)
}
numItems := uint64(len(d.KzgCommitments))
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
// Field (3) 'KzgProofs'
{
if size := len(d.KzgProofs); size > 4096 {
err = ssz.ErrListTooBigFn("--.KzgProofs", size, 4096)
@@ -3277,10 +3227,10 @@ func (d *DataColumnSidecarGloas) HashTreeRootWith(hh *ssz.Hasher) (err error) {
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
// Field (3) 'Slot'
// Field (4) 'Slot'
hh.PutUint64(uint64(d.Slot))
// Field (4) 'BeaconBlockRoot'
// Field (5) 'BeaconBlockRoot'
if size := len(d.BeaconBlockRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.BeaconBlockRoot", size, 32)
return
@@ -3299,7 +3249,7 @@ func (e *ExecutionPayloadEnvelope) MarshalSSZ() ([]byte, error) {
// MarshalSSZTo ssz marshals the ExecutionPayloadEnvelope object to a target array
func (e *ExecutionPayloadEnvelope) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(88)
offset := int(92)
// Offset (0) 'Payload'
dst = ssz.WriteOffset(dst, offset)
@@ -3328,7 +3278,11 @@ func (e *ExecutionPayloadEnvelope) MarshalSSZTo(buf []byte) (dst []byte, err err
// Field (4) 'Slot'
dst = ssz.MarshalUint64(dst, uint64(e.Slot))
// Field (5) 'StateRoot'
// Offset (5) 'BlobKzgCommitments'
dst = ssz.WriteOffset(dst, offset)
offset += len(e.BlobKzgCommitments) * 48
// Field (6) 'StateRoot'
if size := len(e.StateRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.StateRoot", size, 32)
return
@@ -3345,6 +3299,19 @@ func (e *ExecutionPayloadEnvelope) MarshalSSZTo(buf []byte) (dst []byte, err err
return
}
// Field (5) 'BlobKzgCommitments'
if size := len(e.BlobKzgCommitments); size > 4096 {
err = ssz.ErrListTooBigFn("--.BlobKzgCommitments", size, 4096)
return
}
for ii := 0; ii < len(e.BlobKzgCommitments); ii++ {
if size := len(e.BlobKzgCommitments[ii]); size != 48 {
err = ssz.ErrBytesLengthFn("--.BlobKzgCommitments[ii]", size, 48)
return
}
dst = append(dst, e.BlobKzgCommitments[ii]...)
}
return
}
@@ -3352,19 +3319,19 @@ func (e *ExecutionPayloadEnvelope) MarshalSSZTo(buf []byte) (dst []byte, err err
func (e *ExecutionPayloadEnvelope) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 88 {
if size < 92 {
return ssz.ErrSize
}
tail := buf
var o0, o1 uint64
var o0, o1, o5 uint64
// Offset (0) 'Payload'
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
return ssz.ErrOffset
}
if o0 != 88 {
if o0 != 92 {
return ssz.ErrInvalidVariableOffset
}
@@ -3385,11 +3352,16 @@ func (e *ExecutionPayloadEnvelope) UnmarshalSSZ(buf []byte) error {
// Field (4) 'Slot'
e.Slot = github_com_OffchainLabs_prysm_v7_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[48:56]))
// Field (5) 'StateRoot'
if cap(e.StateRoot) == 0 {
e.StateRoot = make([]byte, 0, len(buf[56:88]))
// Offset (5) 'BlobKzgCommitments'
if o5 = ssz.ReadOffset(buf[56:60]); o5 > size || o1 > o5 {
return ssz.ErrOffset
}
e.StateRoot = append(e.StateRoot, buf[56:88]...)
// Field (6) 'StateRoot'
if cap(e.StateRoot) == 0 {
e.StateRoot = make([]byte, 0, len(buf[60:92]))
}
e.StateRoot = append(e.StateRoot, buf[60:92]...)
// Field (0) 'Payload'
{
@@ -3404,7 +3376,7 @@ func (e *ExecutionPayloadEnvelope) UnmarshalSSZ(buf []byte) error {
// Field (1) 'ExecutionRequests'
{
buf = tail[o1:]
buf = tail[o1:o5]
if e.ExecutionRequests == nil {
e.ExecutionRequests = new(v1.ExecutionRequests)
}
@@ -3412,12 +3384,28 @@ func (e *ExecutionPayloadEnvelope) UnmarshalSSZ(buf []byte) error {
return err
}
}
// Field (5) 'BlobKzgCommitments'
{
buf = tail[o5:]
num, err := ssz.DivideInt2(len(buf), 48, 4096)
if err != nil {
return err
}
e.BlobKzgCommitments = make([][]byte, num)
for ii := 0; ii < num; ii++ {
if cap(e.BlobKzgCommitments[ii]) == 0 {
e.BlobKzgCommitments[ii] = make([]byte, 0, len(buf[ii*48:(ii+1)*48]))
}
e.BlobKzgCommitments[ii] = append(e.BlobKzgCommitments[ii], buf[ii*48:(ii+1)*48]...)
}
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the ExecutionPayloadEnvelope object
func (e *ExecutionPayloadEnvelope) SizeSSZ() (size int) {
size = 88
size = 92
// Field (0) 'Payload'
if e.Payload == nil {
@@ -3431,6 +3419,9 @@ func (e *ExecutionPayloadEnvelope) SizeSSZ() (size int) {
}
size += e.ExecutionRequests.SizeSSZ()
// Field (5) 'BlobKzgCommitments'
size += len(e.BlobKzgCommitments) * 48
return
}
@@ -3466,7 +3457,26 @@ func (e *ExecutionPayloadEnvelope) HashTreeRootWith(hh *ssz.Hasher) (err error)
// Field (4) 'Slot'
hh.PutUint64(uint64(e.Slot))
// Field (5) 'StateRoot'
// Field (5) 'BlobKzgCommitments'
{
if size := len(e.BlobKzgCommitments); size > 4096 {
err = ssz.ErrListTooBigFn("--.BlobKzgCommitments", size, 4096)
return
}
subIndx := hh.Index()
for _, i := range e.BlobKzgCommitments {
if len(i) != 48 {
err = ssz.ErrBytesLength
return
}
hh.PutBytes(i)
}
numItems := uint64(len(e.BlobKzgCommitments))
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
// Field (6) 'StateRoot'
if size := len(e.StateRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.StateRoot", size, 32)
return

Some files were not shown because too many files have changed in this diff Show More