mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-02-10 04:55:09 -05:00
Compare commits
3 Commits
GetVersion
...
gloas/proc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8011396a48 | ||
|
|
d9dae87c74 | ||
|
|
f75c3082d5 |
8
.github/workflows/check-specrefs.yml
vendored
8
.github/workflows/check-specrefs.yml
vendored
@@ -12,11 +12,11 @@ jobs:
|
||||
- name: Check version consistency
|
||||
run: |
|
||||
WORKSPACE_VERSION=$(grep 'consensus_spec_version = ' WORKSPACE | sed 's/.*"\(.*\)"/\1/')
|
||||
ETHSPECIFY_VERSION=$(grep '^version:' .ethspecify.yml | sed 's/version: //')
|
||||
ETHSPECIFY_VERSION=$(grep '^version:' specrefs/.ethspecify.yml | sed 's/version: //')
|
||||
if [ "$WORKSPACE_VERSION" != "$ETHSPECIFY_VERSION" ]; then
|
||||
echo "Version mismatch between WORKSPACE and ethspecify"
|
||||
echo " WORKSPACE: $WORKSPACE_VERSION"
|
||||
echo " .ethspecify.yml: $ETHSPECIFY_VERSION"
|
||||
echo " specrefs/.ethspecify.yml: $ETHSPECIFY_VERSION"
|
||||
exit 1
|
||||
else
|
||||
echo "Versions match: $WORKSPACE_VERSION"
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
run: python3 -mpip install ethspecify
|
||||
|
||||
- name: Update spec references
|
||||
run: ethspecify
|
||||
run: ethspecify process --path=specrefs
|
||||
|
||||
- name: Check for differences
|
||||
run: |
|
||||
@@ -40,4 +40,4 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Check spec references
|
||||
run: ethspecify check
|
||||
run: ethspecify check --path=specrefs
|
||||
|
||||
2
.github/workflows/go.yml
vendored
2
.github/workflows/go.yml
vendored
@@ -2,7 +2,7 @@ name: Go
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, develop ]
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ '*' ]
|
||||
merge_group:
|
||||
|
||||
@@ -33,8 +33,9 @@ formatters:
|
||||
generated: lax
|
||||
paths:
|
||||
- validator/web/site_data.go
|
||||
- .*_test.go
|
||||
- proto
|
||||
- tools/analyzers
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
- examples$
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -273,16 +273,16 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.7.0-alpha.2"
|
||||
consensus_spec_version = "v1.7.0-alpha.1"
|
||||
|
||||
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
|
||||
|
||||
consensus_spec_tests(
|
||||
name = "consensus_spec_tests",
|
||||
flavors = {
|
||||
"general": "sha256-iGQsGZ1cHah+2CSod9jC3kN8Ku4n6KO0hIwfINrn/po=",
|
||||
"minimal": "sha256-TgcYt8N8sXSttdHTGvOa+exUZ1zn1UzlAMz0V7i37xc=",
|
||||
"mainnet": "sha256-LnXyiLoJtrvEvbqLDSAAqpLMdN/lXv92SAgYG8fNjCs=",
|
||||
"general": "sha256-j5R3jA7Oo4OSDMTvpMuD+8RomaCByeFSwtfkq6fL0Zg=",
|
||||
"minimal": "sha256-tdTqByoyswOS4r6OxFmo70y2BP7w1TgEok+gf4cbxB0=",
|
||||
"mainnet": "sha256-5gB4dt6SnSDKzdBc06VedId3NkgvSYyv9n9FRxWKwYI=",
|
||||
},
|
||||
version = consensus_spec_version,
|
||||
)
|
||||
@@ -298,7 +298,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-Y/67Dg393PksZj5rTFNLntiJ6hNdB7Rxbu5gZE2gebY=",
|
||||
integrity = "sha256-J+43DrK1pF658kTXTwMS6zGf4KDjvas++m8w2a8swpg=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"fallback.go",
|
||||
"log.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/api/fallback",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["@com_github_sirupsen_logrus//:go_default_library"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["fallback_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//testing/assert:go_default_library"],
|
||||
)
|
||||
@@ -1,66 +0,0 @@
|
||||
package fallback
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// HostProvider is the subset of connection-provider methods that EnsureReady
|
||||
// needs. Both grpc.GrpcConnectionProvider and rest.RestConnectionProvider
|
||||
// satisfy this interface.
|
||||
type HostProvider interface {
|
||||
Hosts() []string
|
||||
CurrentHost() string
|
||||
SwitchHost(index int) error
|
||||
}
|
||||
|
||||
// ReadyChecker can report whether the current endpoint is ready.
|
||||
// iface.NodeClient satisfies this implicitly.
|
||||
type ReadyChecker interface {
|
||||
IsReady(ctx context.Context) bool
|
||||
}
|
||||
|
||||
// EnsureReady iterates through the configured hosts and returns true as soon as
|
||||
// one responds as ready. It starts from the provider's current host and wraps
|
||||
// around using modular arithmetic, performing failover when a host is not ready.
|
||||
func EnsureReady(ctx context.Context, provider HostProvider, checker ReadyChecker) bool {
|
||||
hosts := provider.Hosts()
|
||||
numHosts := len(hosts)
|
||||
startingHost := provider.CurrentHost()
|
||||
var attemptedHosts []string
|
||||
|
||||
// Find current index
|
||||
currentIdx := 0
|
||||
for i, h := range hosts {
|
||||
if h == startingHost {
|
||||
currentIdx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for i := range numHosts {
|
||||
if checker.IsReady(ctx) {
|
||||
if len(attemptedHosts) > 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
"previous": startingHost,
|
||||
"current": provider.CurrentHost(),
|
||||
"tried": attemptedHosts,
|
||||
}).Info("Switched to responsive beacon node")
|
||||
}
|
||||
return true
|
||||
}
|
||||
attemptedHosts = append(attemptedHosts, provider.CurrentHost())
|
||||
|
||||
// Try next host if not the last iteration
|
||||
if i < numHosts-1 {
|
||||
nextIdx := (currentIdx + i + 1) % numHosts
|
||||
if err := provider.SwitchHost(nextIdx); err != nil {
|
||||
log.WithError(err).Error("Failed to switch host")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.WithField("tried", attemptedHosts).Warn("No responsive beacon node found")
|
||||
return false
|
||||
}
|
||||
@@ -1,94 +0,0 @@
|
||||
package fallback
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
)
|
||||
|
||||
// mockHostProvider is a minimal HostProvider for unit tests.
|
||||
type mockHostProvider struct {
|
||||
hosts []string
|
||||
hostIndex int
|
||||
}
|
||||
|
||||
func (m *mockHostProvider) Hosts() []string { return m.hosts }
|
||||
func (m *mockHostProvider) CurrentHost() string {
|
||||
return m.hosts[m.hostIndex%len(m.hosts)]
|
||||
}
|
||||
func (m *mockHostProvider) SwitchHost(index int) error { m.hostIndex = index; return nil }
|
||||
|
||||
// mockReadyChecker records per-call IsReady results in sequence.
|
||||
type mockReadyChecker struct {
|
||||
results []bool
|
||||
idx int
|
||||
}
|
||||
|
||||
func (m *mockReadyChecker) IsReady(_ context.Context) bool {
|
||||
if m.idx >= len(m.results) {
|
||||
return false
|
||||
}
|
||||
r := m.results[m.idx]
|
||||
m.idx++
|
||||
return r
|
||||
}
|
||||
|
||||
func TestEnsureReady_SingleHostReady(t *testing.T) {
|
||||
provider := &mockHostProvider{hosts: []string{"http://host1:3500"}, hostIndex: 0}
|
||||
checker := &mockReadyChecker{results: []bool{true}}
|
||||
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
|
||||
assert.Equal(t, 0, provider.hostIndex)
|
||||
}
|
||||
|
||||
func TestEnsureReady_SingleHostNotReady(t *testing.T) {
|
||||
provider := &mockHostProvider{hosts: []string{"http://host1:3500"}, hostIndex: 0}
|
||||
checker := &mockReadyChecker{results: []bool{false}}
|
||||
assert.Equal(t, false, EnsureReady(t.Context(), provider, checker))
|
||||
}
|
||||
|
||||
func TestEnsureReady_SingleHostError(t *testing.T) {
|
||||
provider := &mockHostProvider{hosts: []string{"http://host1:3500"}, hostIndex: 0}
|
||||
checker := &mockReadyChecker{results: []bool{false}}
|
||||
assert.Equal(t, false, EnsureReady(t.Context(), provider, checker))
|
||||
}
|
||||
|
||||
func TestEnsureReady_MultipleHostsFirstReady(t *testing.T) {
|
||||
provider := &mockHostProvider{
|
||||
hosts: []string{"http://host1:3500", "http://host2:3500"},
|
||||
hostIndex: 0,
|
||||
}
|
||||
checker := &mockReadyChecker{results: []bool{true}}
|
||||
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
|
||||
assert.Equal(t, 0, provider.hostIndex)
|
||||
}
|
||||
|
||||
func TestEnsureReady_MultipleHostsFailoverToSecond(t *testing.T) {
|
||||
provider := &mockHostProvider{
|
||||
hosts: []string{"http://host1:3500", "http://host2:3500"},
|
||||
hostIndex: 0,
|
||||
}
|
||||
checker := &mockReadyChecker{results: []bool{false, true}}
|
||||
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
|
||||
assert.Equal(t, 1, provider.hostIndex)
|
||||
}
|
||||
|
||||
func TestEnsureReady_MultipleHostsNoneReady(t *testing.T) {
|
||||
provider := &mockHostProvider{
|
||||
hosts: []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"},
|
||||
hostIndex: 0,
|
||||
}
|
||||
checker := &mockReadyChecker{results: []bool{false, false, false}}
|
||||
assert.Equal(t, false, EnsureReady(t.Context(), provider, checker))
|
||||
}
|
||||
|
||||
func TestEnsureReady_WrapAroundFromNonZeroIndex(t *testing.T) {
|
||||
provider := &mockHostProvider{
|
||||
hosts: []string{"http://host0:3500", "http://host1:3500", "http://host2:3500"},
|
||||
hostIndex: 1,
|
||||
}
|
||||
// host1 (start) fails, host2 fails, host0 succeeds
|
||||
checker := &mockReadyChecker{results: []bool{false, false, true}}
|
||||
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
|
||||
assert.Equal(t, 0, provider.hostIndex)
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package fallback
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "api/fallback")
|
||||
@@ -3,16 +3,13 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"grpc_connection_provider.go",
|
||||
"grpcutils.go",
|
||||
"log.go",
|
||||
"mock_grpc_provider.go",
|
||||
"parameters.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/api/grpc",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
@@ -21,17 +18,12 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"grpc_connection_provider_test.go",
|
||||
"grpcutils_test.go",
|
||||
],
|
||||
srcs = ["grpcutils_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//credentials/insecure:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,186 +0,0 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// GrpcConnectionProvider manages gRPC connections for failover support.
|
||||
// It allows switching between different beacon node endpoints when the current one becomes unavailable.
|
||||
// Only one connection is maintained at a time - when switching hosts, the old connection is closed.
|
||||
type GrpcConnectionProvider interface {
|
||||
// CurrentConn returns the currently active gRPC connection.
|
||||
// The connection is created lazily on first call.
|
||||
// Returns nil if the provider has been closed.
|
||||
CurrentConn() *grpc.ClientConn
|
||||
// CurrentHost returns the address of the currently active endpoint.
|
||||
CurrentHost() string
|
||||
// Hosts returns all configured endpoint addresses.
|
||||
Hosts() []string
|
||||
// SwitchHost switches to the endpoint at the given index.
|
||||
// The new connection is created lazily on next CurrentConn() call.
|
||||
SwitchHost(index int) error
|
||||
// ConnectionCounter returns a monotonically increasing counter that increments
|
||||
// each time SwitchHost changes the active endpoint. This allows consumers to
|
||||
// detect connection changes even when the host string returns to a previous value
|
||||
// (e.g., host0 → host1 → host0).
|
||||
ConnectionCounter() uint64
|
||||
// Close closes the current connection.
|
||||
Close()
|
||||
}
|
||||
|
||||
type grpcConnectionProvider struct {
|
||||
// Immutable after construction - no lock needed for reads
|
||||
endpoints []string
|
||||
ctx context.Context
|
||||
dialOpts []grpc.DialOption
|
||||
|
||||
// Current connection state (protected by mutex)
|
||||
currentIndex uint64
|
||||
conn *grpc.ClientConn
|
||||
connCounter uint64
|
||||
|
||||
mu sync.Mutex
|
||||
closed bool
|
||||
}
|
||||
|
||||
// NewGrpcConnectionProvider creates a new connection provider that manages gRPC connections.
|
||||
// The endpoint parameter can be a comma-separated list of addresses (e.g., "host1:4000,host2:4000").
|
||||
// Only one connection is maintained at a time, created lazily on first use.
|
||||
func NewGrpcConnectionProvider(
|
||||
ctx context.Context,
|
||||
endpoint string,
|
||||
dialOpts []grpc.DialOption,
|
||||
) (GrpcConnectionProvider, error) {
|
||||
endpoints := parseEndpoints(endpoint)
|
||||
if len(endpoints) == 0 {
|
||||
return nil, errors.New("no gRPC endpoints provided")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"endpoints": endpoints,
|
||||
"count": len(endpoints),
|
||||
}).Info("Initialized gRPC connection provider")
|
||||
|
||||
return &grpcConnectionProvider{
|
||||
endpoints: endpoints,
|
||||
ctx: ctx,
|
||||
dialOpts: dialOpts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseEndpoints splits a comma-separated endpoint string into individual endpoints.
|
||||
func parseEndpoints(endpoint string) []string {
|
||||
if endpoint == "" {
|
||||
return nil
|
||||
}
|
||||
endpoints := make([]string, 0, 1)
|
||||
for p := range strings.SplitSeq(endpoint, ",") {
|
||||
if p = strings.TrimSpace(p); p != "" {
|
||||
endpoints = append(endpoints, p)
|
||||
}
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) CurrentConn() *grpc.ClientConn {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if p.closed {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return existing connection if available
|
||||
if p.conn != nil {
|
||||
return p.conn
|
||||
}
|
||||
|
||||
// Create connection lazily
|
||||
ep := p.endpoints[p.currentIndex]
|
||||
conn, err := grpc.DialContext(p.ctx, ep, p.dialOpts...)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("endpoint", ep).Error("Failed to create gRPC connection")
|
||||
return nil
|
||||
}
|
||||
|
||||
p.conn = conn
|
||||
log.WithField("endpoint", ep).Debug("Created gRPC connection")
|
||||
return conn
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) CurrentHost() string {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
return p.endpoints[p.currentIndex]
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) Hosts() []string {
|
||||
// Return a copy to maintain immutability
|
||||
hosts := make([]string, len(p.endpoints))
|
||||
copy(hosts, p.endpoints)
|
||||
return hosts
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) SwitchHost(index int) error {
|
||||
if index < 0 || index >= len(p.endpoints) {
|
||||
return errors.Errorf("invalid host index %d, must be between 0 and %d", index, len(p.endpoints)-1)
|
||||
}
|
||||
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if uint64(index) == p.currentIndex {
|
||||
return nil // Already on this host
|
||||
}
|
||||
|
||||
oldHost := p.endpoints[p.currentIndex]
|
||||
oldConn := p.conn
|
||||
|
||||
p.conn = nil // Clear immediately - new connection created lazily
|
||||
p.currentIndex = uint64(index)
|
||||
p.connCounter++
|
||||
|
||||
// Close old connection asynchronously to avoid blocking the caller
|
||||
if oldConn != nil {
|
||||
go func() {
|
||||
if err := oldConn.Close(); err != nil {
|
||||
log.WithError(err).WithField("endpoint", oldHost).Debug("Failed to close previous connection")
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"previousHost": oldHost,
|
||||
"newHost": p.endpoints[index],
|
||||
}).Debug("Switched gRPC endpoint")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) ConnectionCounter() uint64 {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
return p.connCounter
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) Close() {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if p.closed {
|
||||
return
|
||||
}
|
||||
p.closed = true
|
||||
|
||||
if p.conn != nil {
|
||||
if err := p.conn.Close(); err != nil {
|
||||
log.WithError(err).WithField("endpoint", p.endpoints[p.currentIndex]).Debug("Failed to close gRPC connection")
|
||||
}
|
||||
p.conn = nil
|
||||
}
|
||||
}
|
||||
@@ -1,207 +0,0 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
func TestParseEndpoints(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected []string
|
||||
}{
|
||||
{"single endpoint", "localhost:4000", []string{"localhost:4000"}},
|
||||
{"multiple endpoints", "host1:4000,host2:4000,host3:4000", []string{"host1:4000", "host2:4000", "host3:4000"}},
|
||||
{"endpoints with spaces", "host1:4000, host2:4000 , host3:4000", []string{"host1:4000", "host2:4000", "host3:4000"}},
|
||||
{"empty string", "", nil},
|
||||
{"only commas", ",,,", []string{}},
|
||||
{"trailing comma", "host1:4000,host2:4000,", []string{"host1:4000", "host2:4000"}},
|
||||
{"leading comma", ",host1:4000,host2:4000", []string{"host1:4000", "host2:4000"}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := parseEndpoints(tt.input)
|
||||
if !reflect.DeepEqual(tt.expected, got) {
|
||||
t.Errorf("parseEndpoints(%q) = %v, want %v", tt.input, got, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewGrpcConnectionProvider_Errors(t *testing.T) {
|
||||
t.Run("no endpoints", func(t *testing.T) {
|
||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
_, err := NewGrpcConnectionProvider(context.Background(), "", dialOpts)
|
||||
require.ErrorContains(t, "no gRPC endpoints provided", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGrpcConnectionProvider_LazyConnection(t *testing.T) {
|
||||
// Start only one server but configure provider with two endpoints
|
||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
server := grpc.NewServer()
|
||||
go func() { _ = server.Serve(lis) }()
|
||||
defer server.Stop()
|
||||
|
||||
validAddr := lis.Addr().String()
|
||||
invalidAddr := "127.0.0.1:1" // Port 1 is unlikely to be listening
|
||||
|
||||
// Provider should succeed even though second endpoint is invalid (lazy connections)
|
||||
endpoint := validAddr + "," + invalidAddr
|
||||
ctx := context.Background()
|
||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
|
||||
require.NoError(t, err, "Provider creation should succeed with lazy connections")
|
||||
defer func() { provider.Close() }()
|
||||
|
||||
// First endpoint should work
|
||||
conn := provider.CurrentConn()
|
||||
assert.NotNil(t, conn, "First connection should be created lazily")
|
||||
}
|
||||
|
||||
func TestGrpcConnectionProvider_SingleConnectionModel(t *testing.T) {
|
||||
// Create provider with 3 endpoints
|
||||
var addrs []string
|
||||
var servers []*grpc.Server
|
||||
|
||||
for range 3 {
|
||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
server := grpc.NewServer()
|
||||
go func() { _ = server.Serve(lis) }()
|
||||
addrs = append(addrs, lis.Addr().String())
|
||||
servers = append(servers, server)
|
||||
}
|
||||
defer func() {
|
||||
for _, s := range servers {
|
||||
s.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
endpoint := strings.Join(addrs, ",")
|
||||
ctx := context.Background()
|
||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
|
||||
require.NoError(t, err)
|
||||
defer func() { provider.Close() }()
|
||||
|
||||
// Access the internal state to verify single connection behavior
|
||||
p := provider.(*grpcConnectionProvider)
|
||||
|
||||
// Initially no connection
|
||||
p.mu.Lock()
|
||||
assert.Equal(t, (*grpc.ClientConn)(nil), p.conn, "Connection should be nil before access")
|
||||
p.mu.Unlock()
|
||||
|
||||
// Access connection - should create one
|
||||
conn0 := provider.CurrentConn()
|
||||
assert.NotNil(t, conn0)
|
||||
|
||||
p.mu.Lock()
|
||||
assert.NotNil(t, p.conn, "Connection should be created after CurrentConn()")
|
||||
firstConn := p.conn
|
||||
p.mu.Unlock()
|
||||
|
||||
// Call CurrentConn again - should return same connection
|
||||
conn0Again := provider.CurrentConn()
|
||||
assert.Equal(t, conn0, conn0Again, "Should return same connection")
|
||||
|
||||
// Switch to different host - old connection should be closed, new one created lazily
|
||||
require.NoError(t, provider.SwitchHost(1))
|
||||
|
||||
p.mu.Lock()
|
||||
assert.Equal(t, (*grpc.ClientConn)(nil), p.conn, "Connection should be nil after SwitchHost (lazy)")
|
||||
p.mu.Unlock()
|
||||
|
||||
// Get new connection
|
||||
conn1 := provider.CurrentConn()
|
||||
assert.NotNil(t, conn1)
|
||||
assert.NotEqual(t, firstConn, conn1, "Should be a different connection after switching hosts")
|
||||
}
|
||||
|
||||
// testProvider creates a provider with n test servers and returns cleanup function.
|
||||
func testProvider(t *testing.T, n int) (GrpcConnectionProvider, []string, func()) {
|
||||
var addrs []string
|
||||
var cleanups []func()
|
||||
|
||||
for range n {
|
||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
server := grpc.NewServer()
|
||||
go func() { _ = server.Serve(lis) }()
|
||||
addrs = append(addrs, lis.Addr().String())
|
||||
cleanups = append(cleanups, server.Stop)
|
||||
}
|
||||
|
||||
endpoint := strings.Join(addrs, ",")
|
||||
|
||||
ctx := context.Background()
|
||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
|
||||
require.NoError(t, err)
|
||||
|
||||
cleanup := func() {
|
||||
provider.Close()
|
||||
for _, c := range cleanups {
|
||||
c()
|
||||
}
|
||||
}
|
||||
return provider, addrs, cleanup
|
||||
}
|
||||
|
||||
func TestGrpcConnectionProvider(t *testing.T) {
|
||||
provider, addrs, cleanup := testProvider(t, 3)
|
||||
defer cleanup()
|
||||
|
||||
t.Run("initial state", func(t *testing.T) {
|
||||
assert.Equal(t, 3, len(provider.Hosts()))
|
||||
assert.Equal(t, addrs[0], provider.CurrentHost())
|
||||
assert.NotNil(t, provider.CurrentConn())
|
||||
})
|
||||
|
||||
t.Run("SwitchHost", func(t *testing.T) {
|
||||
require.NoError(t, provider.SwitchHost(1))
|
||||
assert.Equal(t, addrs[1], provider.CurrentHost())
|
||||
assert.NotNil(t, provider.CurrentConn()) // New connection created lazily
|
||||
require.NoError(t, provider.SwitchHost(0))
|
||||
assert.Equal(t, addrs[0], provider.CurrentHost())
|
||||
require.ErrorContains(t, "invalid host index", provider.SwitchHost(-1))
|
||||
require.ErrorContains(t, "invalid host index", provider.SwitchHost(3))
|
||||
})
|
||||
|
||||
t.Run("SwitchHost circular", func(t *testing.T) {
|
||||
// Test round-robin style switching using SwitchHost with manual index
|
||||
indices := []int{1, 2, 0, 1} // Simulate circular switching
|
||||
for i, idx := range indices {
|
||||
require.NoError(t, provider.SwitchHost(idx))
|
||||
assert.Equal(t, addrs[idx], provider.CurrentHost(), "iteration %d", i)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Hosts returns copy", func(t *testing.T) {
|
||||
hosts := provider.Hosts()
|
||||
original := hosts[0]
|
||||
hosts[0] = "modified"
|
||||
assert.Equal(t, original, provider.Hosts()[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestGrpcConnectionProvider_Close(t *testing.T) {
|
||||
provider, _, cleanup := testProvider(t, 1)
|
||||
defer cleanup()
|
||||
|
||||
assert.NotNil(t, provider.CurrentConn())
|
||||
provider.Close()
|
||||
assert.Equal(t, (*grpc.ClientConn)(nil), provider.CurrentConn())
|
||||
provider.Close() // Double close is safe
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
package grpc
|
||||
|
||||
import "google.golang.org/grpc"
|
||||
|
||||
// MockGrpcProvider implements GrpcConnectionProvider for testing.
|
||||
type MockGrpcProvider struct {
|
||||
MockConn *grpc.ClientConn
|
||||
MockHosts []string
|
||||
CurrentIndex int
|
||||
ConnCounter uint64
|
||||
}
|
||||
|
||||
func (m *MockGrpcProvider) CurrentConn() *grpc.ClientConn { return m.MockConn }
|
||||
func (m *MockGrpcProvider) CurrentHost() string {
|
||||
if len(m.MockHosts) > 0 {
|
||||
return m.MockHosts[m.CurrentIndex]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func (m *MockGrpcProvider) Hosts() []string { return m.MockHosts }
|
||||
func (m *MockGrpcProvider) SwitchHost(idx int) error {
|
||||
m.CurrentIndex = idx
|
||||
m.ConnCounter++
|
||||
return nil
|
||||
}
|
||||
func (m *MockGrpcProvider) ConnectionCounter() uint64 { return m.ConnCounter }
|
||||
func (m *MockGrpcProvider) Close() {}
|
||||
@@ -1,34 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"log.go",
|
||||
"mock_rest_provider.go",
|
||||
"rest_connection_provider.go",
|
||||
"rest_handler.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/api/rest",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/apiutil:go_default_library",
|
||||
"//api/client:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opentelemetry_go_contrib_instrumentation_net_http_otelhttp//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["rest_connection_provider_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,9 +0,0 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package rest
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "api/rest")
|
||||
@@ -1,46 +0,0 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// MockRestProvider implements RestConnectionProvider for testing.
|
||||
type MockRestProvider struct {
|
||||
MockClient *http.Client
|
||||
MockHandler Handler
|
||||
MockHosts []string
|
||||
HostIndex int
|
||||
}
|
||||
|
||||
func (m *MockRestProvider) HttpClient() *http.Client { return m.MockClient }
|
||||
func (m *MockRestProvider) Handler() Handler { return m.MockHandler }
|
||||
func (m *MockRestProvider) CurrentHost() string {
|
||||
if len(m.MockHosts) > 0 {
|
||||
return m.MockHosts[m.HostIndex%len(m.MockHosts)]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func (m *MockRestProvider) Hosts() []string { return m.MockHosts }
|
||||
func (m *MockRestProvider) SwitchHost(index int) error { m.HostIndex = index; return nil }
|
||||
|
||||
// MockHandler implements Handler for testing.
|
||||
type MockHandler struct {
|
||||
MockHost string
|
||||
}
|
||||
|
||||
func (m *MockHandler) Get(_ context.Context, _ string, _ any) error { return nil }
|
||||
func (m *MockHandler) GetStatusCode(_ context.Context, _ string) (int, error) {
|
||||
return http.StatusOK, nil
|
||||
}
|
||||
func (m *MockHandler) GetSSZ(_ context.Context, _ string) ([]byte, http.Header, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
func (m *MockHandler) Post(_ context.Context, _ string, _ map[string]string, _ *bytes.Buffer, _ any) error {
|
||||
return nil
|
||||
}
|
||||
func (m *MockHandler) PostSSZ(_ context.Context, _ string, _ map[string]string, _ *bytes.Buffer) ([]byte, http.Header, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
func (m *MockHandler) Host() string { return m.MockHost }
|
||||
@@ -1,158 +0,0 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/client"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
)
|
||||
|
||||
// RestConnectionProvider manages HTTP client configuration for REST API with failover support.
|
||||
// It allows switching between different beacon node REST endpoints when the current one becomes unavailable.
|
||||
type RestConnectionProvider interface {
|
||||
// HttpClient returns the configured HTTP client with headers, timeout, and optional tracing.
|
||||
HttpClient() *http.Client
|
||||
// Handler returns the REST handler for making API requests.
|
||||
Handler() Handler
|
||||
// CurrentHost returns the current REST API endpoint URL.
|
||||
CurrentHost() string
|
||||
// Hosts returns all configured REST API endpoint URLs.
|
||||
Hosts() []string
|
||||
// SwitchHost switches to the endpoint at the given index.
|
||||
SwitchHost(index int) error
|
||||
}
|
||||
|
||||
// RestConnectionProviderOption is a functional option for configuring the REST connection provider.
|
||||
type RestConnectionProviderOption func(*restConnectionProvider)
|
||||
|
||||
// WithHttpTimeout sets the HTTP client timeout.
|
||||
func WithHttpTimeout(timeout time.Duration) RestConnectionProviderOption {
|
||||
return func(p *restConnectionProvider) {
|
||||
p.timeout = timeout
|
||||
}
|
||||
}
|
||||
|
||||
// WithHttpHeaders sets custom HTTP headers to include in all requests.
|
||||
func WithHttpHeaders(headers map[string][]string) RestConnectionProviderOption {
|
||||
return func(p *restConnectionProvider) {
|
||||
p.headers = headers
|
||||
}
|
||||
}
|
||||
|
||||
// WithTracing enables OpenTelemetry tracing for HTTP requests.
|
||||
func WithTracing() RestConnectionProviderOption {
|
||||
return func(p *restConnectionProvider) {
|
||||
p.enableTracing = true
|
||||
}
|
||||
}
|
||||
|
||||
type restConnectionProvider struct {
|
||||
endpoints []string
|
||||
httpClient *http.Client
|
||||
restHandler *handler
|
||||
currentIndex atomic.Uint64
|
||||
timeout time.Duration
|
||||
headers map[string][]string
|
||||
enableTracing bool
|
||||
}
|
||||
|
||||
// NewRestConnectionProvider creates a new REST connection provider that manages HTTP client configuration.
|
||||
// The endpoint parameter can be a comma-separated list of URLs (e.g., "http://host1:3500,http://host2:3500").
|
||||
func NewRestConnectionProvider(endpoint string, opts ...RestConnectionProviderOption) (RestConnectionProvider, error) {
|
||||
endpoints := parseEndpoints(endpoint)
|
||||
if len(endpoints) == 0 {
|
||||
return nil, errors.New("no REST API endpoints provided")
|
||||
}
|
||||
|
||||
p := &restConnectionProvider{
|
||||
endpoints: endpoints,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(p)
|
||||
}
|
||||
|
||||
// Build the HTTP transport chain
|
||||
var transport http.RoundTripper = http.DefaultTransport
|
||||
|
||||
// Add custom headers if configured
|
||||
if len(p.headers) > 0 {
|
||||
transport = client.NewCustomHeadersTransport(transport, p.headers)
|
||||
}
|
||||
|
||||
// Add tracing if enabled
|
||||
if p.enableTracing {
|
||||
transport = otelhttp.NewTransport(transport)
|
||||
}
|
||||
|
||||
p.httpClient = &http.Client{
|
||||
Timeout: p.timeout,
|
||||
Transport: transport,
|
||||
}
|
||||
|
||||
// Create the REST handler with the HTTP client and initial host
|
||||
p.restHandler = newHandler(*p.httpClient, endpoints[0])
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"endpoints": endpoints,
|
||||
"count": len(endpoints),
|
||||
}).Info("Initialized REST connection provider")
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// parseEndpoints splits a comma-separated endpoint string into individual endpoints.
|
||||
func parseEndpoints(endpoint string) []string {
|
||||
if endpoint == "" {
|
||||
return nil
|
||||
}
|
||||
endpoints := make([]string, 0, 1)
|
||||
for p := range strings.SplitSeq(endpoint, ",") {
|
||||
if p = strings.TrimSpace(p); p != "" {
|
||||
endpoints = append(endpoints, p)
|
||||
}
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) HttpClient() *http.Client {
|
||||
return p.httpClient
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) Handler() Handler {
|
||||
return p.restHandler
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) CurrentHost() string {
|
||||
return p.endpoints[p.currentIndex.Load()]
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) Hosts() []string {
|
||||
// Return a copy to maintain immutability
|
||||
hosts := make([]string, len(p.endpoints))
|
||||
copy(hosts, p.endpoints)
|
||||
return hosts
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) SwitchHost(index int) error {
|
||||
if index < 0 || index >= len(p.endpoints) {
|
||||
return errors.Errorf("invalid host index %d, must be between 0 and %d", index, len(p.endpoints)-1)
|
||||
}
|
||||
|
||||
oldIdx := p.currentIndex.Load()
|
||||
p.currentIndex.Store(uint64(index))
|
||||
|
||||
// Update the rest handler's host
|
||||
p.restHandler.SwitchHost(p.endpoints[index])
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"previousHost": p.endpoints[oldIdx],
|
||||
"newHost": p.endpoints[index],
|
||||
}).Debug("Switched REST endpoint")
|
||||
return nil
|
||||
}
|
||||
@@ -1,80 +0,0 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestParseEndpoints(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected []string
|
||||
}{
|
||||
{"single endpoint", "http://localhost:3500", []string{"http://localhost:3500"}},
|
||||
{"multiple endpoints", "http://host1:3500,http://host2:3500,http://host3:3500", []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"}},
|
||||
{"endpoints with spaces", "http://host1:3500, http://host2:3500 , http://host3:3500", []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"}},
|
||||
{"empty string", "", nil},
|
||||
{"only commas", ",,,", []string{}},
|
||||
{"trailing comma", "http://host1:3500,http://host2:3500,", []string{"http://host1:3500", "http://host2:3500"}},
|
||||
{"leading comma", ",http://host1:3500,http://host2:3500", []string{"http://host1:3500", "http://host2:3500"}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := parseEndpoints(tt.input)
|
||||
if !reflect.DeepEqual(tt.expected, got) {
|
||||
t.Errorf("parseEndpoints(%q) = %v, want %v", tt.input, got, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewRestConnectionProvider_Errors(t *testing.T) {
|
||||
t.Run("no endpoints", func(t *testing.T) {
|
||||
_, err := NewRestConnectionProvider("")
|
||||
require.ErrorContains(t, "no REST API endpoints provided", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRestConnectionProvider(t *testing.T) {
|
||||
provider, err := NewRestConnectionProvider("http://host1:3500,http://host2:3500,http://host3:3500")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("initial state", func(t *testing.T) {
|
||||
assert.Equal(t, 3, len(provider.Hosts()))
|
||||
assert.Equal(t, "http://host1:3500", provider.CurrentHost())
|
||||
assert.NotNil(t, provider.HttpClient())
|
||||
})
|
||||
|
||||
t.Run("SwitchHost", func(t *testing.T) {
|
||||
require.NoError(t, provider.SwitchHost(1))
|
||||
assert.Equal(t, "http://host2:3500", provider.CurrentHost())
|
||||
require.NoError(t, provider.SwitchHost(0))
|
||||
assert.Equal(t, "http://host1:3500", provider.CurrentHost())
|
||||
require.ErrorContains(t, "invalid host index", provider.SwitchHost(-1))
|
||||
require.ErrorContains(t, "invalid host index", provider.SwitchHost(3))
|
||||
})
|
||||
|
||||
t.Run("Hosts returns copy", func(t *testing.T) {
|
||||
hosts := provider.Hosts()
|
||||
original := hosts[0]
|
||||
hosts[0] = "modified"
|
||||
assert.Equal(t, original, provider.Hosts()[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestRestConnectionProvider_WithOptions(t *testing.T) {
|
||||
headers := map[string][]string{"Authorization": {"Bearer token"}}
|
||||
provider, err := NewRestConnectionProvider(
|
||||
"http://localhost:3500",
|
||||
WithHttpHeaders(headers),
|
||||
WithHttpTimeout(30000000000), // 30 seconds in nanoseconds
|
||||
WithTracing(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, provider.HttpClient())
|
||||
assert.Equal(t, "http://localhost:3500", provider.CurrentHost())
|
||||
}
|
||||
@@ -509,17 +509,17 @@ func (s *SignedBlindedBeaconBlockFulu) SigString() string {
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
type ExecutionPayloadBid struct {
|
||||
ParentBlockHash string `json:"parent_block_hash"`
|
||||
ParentBlockRoot string `json:"parent_block_root"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
BuilderIndex string `json:"builder_index"`
|
||||
Slot string `json:"slot"`
|
||||
Value string `json:"value"`
|
||||
ExecutionPayment string `json:"execution_payment"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
ParentBlockHash string `json:"parent_block_hash"`
|
||||
ParentBlockRoot string `json:"parent_block_root"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
BuilderIndex string `json:"builder_index"`
|
||||
Slot string `json:"slot"`
|
||||
Value string `json:"value"`
|
||||
ExecutionPayment string `json:"execution_payment"`
|
||||
BlobKzgCommitmentsRoot string `json:"blob_kzg_commitments_root"`
|
||||
}
|
||||
|
||||
type SignedExecutionPayloadBid struct {
|
||||
|
||||
@@ -2939,22 +2939,18 @@ func SignedExecutionPayloadBidFromConsensus(b *eth.SignedExecutionPayloadBid) *S
|
||||
}
|
||||
|
||||
func ExecutionPayloadBidFromConsensus(b *eth.ExecutionPayloadBid) *ExecutionPayloadBid {
|
||||
blobKzgCommitments := make([]string, len(b.BlobKzgCommitments))
|
||||
for i := range b.BlobKzgCommitments {
|
||||
blobKzgCommitments[i] = hexutil.Encode(b.BlobKzgCommitments[i])
|
||||
}
|
||||
return &ExecutionPayloadBid{
|
||||
ParentBlockHash: hexutil.Encode(b.ParentBlockHash),
|
||||
ParentBlockRoot: hexutil.Encode(b.ParentBlockRoot),
|
||||
BlockHash: hexutil.Encode(b.BlockHash),
|
||||
PrevRandao: hexutil.Encode(b.PrevRandao),
|
||||
FeeRecipient: hexutil.Encode(b.FeeRecipient),
|
||||
GasLimit: fmt.Sprintf("%d", b.GasLimit),
|
||||
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex),
|
||||
Slot: fmt.Sprintf("%d", b.Slot),
|
||||
Value: fmt.Sprintf("%d", b.Value),
|
||||
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment),
|
||||
BlobKzgCommitments: blobKzgCommitments,
|
||||
ParentBlockHash: hexutil.Encode(b.ParentBlockHash),
|
||||
ParentBlockRoot: hexutil.Encode(b.ParentBlockRoot),
|
||||
BlockHash: hexutil.Encode(b.BlockHash),
|
||||
PrevRandao: hexutil.Encode(b.PrevRandao),
|
||||
FeeRecipient: hexutil.Encode(b.FeeRecipient),
|
||||
GasLimit: fmt.Sprintf("%d", b.GasLimit),
|
||||
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex),
|
||||
Slot: fmt.Sprintf("%d", b.Slot),
|
||||
Value: fmt.Sprintf("%d", b.Value),
|
||||
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment),
|
||||
BlobKzgCommitmentsRoot: hexutil.Encode(b.BlobKzgCommitmentsRoot),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3191,30 +3187,22 @@ func (b *ExecutionPayloadBid) ToConsensus() (*eth.ExecutionPayloadBid, error) {
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayment")
|
||||
}
|
||||
err = slice.VerifyMaxLength(b.BlobKzgCommitments, fieldparams.MaxBlobCommitmentsPerBlock)
|
||||
blobKzgCommitmentsRoot, err := bytesutil.DecodeHexWithLength(b.BlobKzgCommitmentsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "BlobKzgCommitments")
|
||||
}
|
||||
blobKzgCommitments := make([][]byte, len(b.BlobKzgCommitments))
|
||||
for i, commitment := range b.BlobKzgCommitments {
|
||||
kzg, err := bytesutil.DecodeHexWithLength(commitment, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("BlobKzgCommitments[%d]", i))
|
||||
}
|
||||
blobKzgCommitments[i] = kzg
|
||||
return nil, server.NewDecodeError(err, "BlobKzgCommitmentsRoot")
|
||||
}
|
||||
return ð.ExecutionPayloadBid{
|
||||
ParentBlockHash: parentBlockHash,
|
||||
ParentBlockRoot: parentBlockRoot,
|
||||
BlockHash: blockHash,
|
||||
PrevRandao: prevRandao,
|
||||
FeeRecipient: feeRecipient,
|
||||
GasLimit: gasLimit,
|
||||
BuilderIndex: primitives.BuilderIndex(builderIndex),
|
||||
Slot: primitives.Slot(slot),
|
||||
Value: primitives.Gwei(value),
|
||||
ExecutionPayment: primitives.Gwei(executionPayment),
|
||||
BlobKzgCommitments: blobKzgCommitments,
|
||||
ParentBlockHash: parentBlockHash,
|
||||
ParentBlockRoot: parentBlockRoot,
|
||||
BlockHash: blockHash,
|
||||
PrevRandao: prevRandao,
|
||||
FeeRecipient: feeRecipient,
|
||||
GasLimit: gasLimit,
|
||||
BuilderIndex: primitives.BuilderIndex(builderIndex),
|
||||
Slot: primitives.Slot(slot),
|
||||
Value: primitives.Gwei(value),
|
||||
ExecutionPayment: primitives.Gwei(executionPayment),
|
||||
BlobKzgCommitmentsRoot: blobKzgCommitmentsRoot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -63,19 +63,6 @@ type PeerCount struct {
|
||||
Connected string `json:"connected"`
|
||||
Disconnecting string `json:"disconnecting"`
|
||||
}
|
||||
type GetVersionV2Response struct {
|
||||
Data *VersionV2 `json:"data"`
|
||||
}
|
||||
type VersionV2 struct {
|
||||
BeaconNode *ClientVersionV1 `json:"beacon_node"`
|
||||
ExecutionClient *ClientVersionV1 `json:"execution_client"`
|
||||
}
|
||||
type ClientVersionV1 struct {
|
||||
Code string `json:"code"`
|
||||
Name string `json:"name"`
|
||||
Version string `json:"version"`
|
||||
Commit string `json:"commit"`
|
||||
}
|
||||
|
||||
type GetVersionResponse struct {
|
||||
Data *Version `json:"data"`
|
||||
|
||||
@@ -85,7 +85,6 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//io/logs:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/io/logs"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v7/time"
|
||||
@@ -88,45 +87,36 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesis time.Time, daWaitedTime time.Duration) error {
|
||||
startTime, err := slots.StartTime(genesis, block.Slot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get slot start time")
|
||||
return err
|
||||
}
|
||||
parentRoot := block.ParentRoot()
|
||||
blkRoot := fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8])
|
||||
finalizedRoot := fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8])
|
||||
sinceSlotStartTime := prysmTime.Now().Sub(startTime)
|
||||
|
||||
lessFields := logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"block": blkRoot,
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": finalizedRoot,
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"sinceSlotStartTime": sinceSlotStartTime,
|
||||
}
|
||||
moreFields := logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||
"block": blkRoot,
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"justifiedEpoch": justified.Epoch,
|
||||
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": finalizedRoot,
|
||||
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
|
||||
"version": version.String(block.Version()),
|
||||
"sinceSlotStartTime": sinceSlotStartTime,
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
|
||||
"dataAvailabilityWaitedTime": daWaitedTime,
|
||||
}
|
||||
|
||||
level := logs.PackageVerbosity("beacon-chain/blockchain")
|
||||
level := log.Logger.GetLevel()
|
||||
if level >= logrus.DebugLevel {
|
||||
log.WithFields(moreFields).Info("Synced new block")
|
||||
return nil
|
||||
parentRoot := block.ParentRoot()
|
||||
lf := logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"justifiedEpoch": justified.Epoch,
|
||||
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
|
||||
"version": version.String(block.Version()),
|
||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
|
||||
"dataAvailabilityWaitedTime": daWaitedTime,
|
||||
}
|
||||
log.WithFields(lf).Debug("Synced new block")
|
||||
} else {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
}).Info("Synced new block")
|
||||
}
|
||||
|
||||
log.WithFields(lessFields).WithField(logs.LogTargetField, logs.LogTargetUser).Info("Synced new block")
|
||||
log.WithFields(moreFields).WithField(logs.LogTargetField, logs.LogTargetEphemeral).Info("Synced new block")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"payload_attestation.go",
|
||||
"pending_payment.go",
|
||||
"proposer_slashing.go",
|
||||
"withdrawal.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas",
|
||||
visibility = ["//visibility:public"],
|
||||
@@ -38,6 +39,7 @@ go_test(
|
||||
"payload_attestation_test.go",
|
||||
"pending_payment_test.go",
|
||||
"proposer_slashing_test.go",
|
||||
"withdrawal_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
|
||||
@@ -17,56 +17,27 @@ import (
|
||||
)
|
||||
|
||||
// ProcessExecutionPayloadBid processes a signed execution payload bid in the Gloas fork.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// process_execution_payload_bid(state: BeaconState, block: BeaconBlock):
|
||||
//
|
||||
// <spec fn="process_execution_payload_bid" fork="gloas" hash="823c9f3a">
|
||||
// def process_execution_payload_bid(state: BeaconState, block: BeaconBlock) -> None:
|
||||
// signed_bid = block.body.signed_execution_payload_bid
|
||||
// bid = signed_bid.message
|
||||
// builder_index = bid.builder_index
|
||||
// amount = bid.value
|
||||
//
|
||||
// # For self-builds, amount must be zero regardless of withdrawal credential prefix
|
||||
// if builder_index == BUILDER_INDEX_SELF_BUILD:
|
||||
// assert amount == 0
|
||||
// assert signed_bid.signature == bls.G2_POINT_AT_INFINITY
|
||||
// else:
|
||||
// # Verify that the builder is active
|
||||
// assert is_active_builder(state, builder_index)
|
||||
// # Verify that the builder has funds to cover the bid
|
||||
// assert can_builder_cover_bid(state, builder_index, amount)
|
||||
// # Verify that the bid signature is valid
|
||||
// assert verify_execution_payload_bid_signature(state, signed_bid)
|
||||
//
|
||||
// # Verify commitments are under limit
|
||||
// assert (
|
||||
// len(bid.blob_kzg_commitments)
|
||||
// <= get_blob_parameters(get_current_epoch(state)).max_blobs_per_block
|
||||
// )
|
||||
//
|
||||
// # Verify that the bid is for the current slot
|
||||
// assert bid.slot == block.slot
|
||||
// # Verify that the bid is for the right parent block
|
||||
// assert bid.parent_block_hash == state.latest_block_hash
|
||||
// assert bid.parent_block_root == block.parent_root
|
||||
// assert bid.prev_randao == get_randao_mix(state, get_current_epoch(state))
|
||||
//
|
||||
// # Record the pending payment if there is some payment
|
||||
// if amount > 0:
|
||||
// pending_payment = BuilderPendingPayment(
|
||||
// weight=0,
|
||||
// withdrawal=BuilderPendingWithdrawal(
|
||||
// fee_recipient=bid.fee_recipient,
|
||||
// amount=amount,
|
||||
// builder_index=builder_index,
|
||||
// ),
|
||||
// )
|
||||
// state.builder_pending_payments[SLOTS_PER_EPOCH + bid.slot % SLOTS_PER_EPOCH] = (
|
||||
// pending_payment
|
||||
// )
|
||||
//
|
||||
// # Cache the signed execution payload bid
|
||||
// state.latest_execution_payload_bid = bid
|
||||
// </spec>
|
||||
// signed_bid = block.body.signed_execution_payload_bid
|
||||
// bid = signed_bid.message
|
||||
// builder_index = bid.builder_index
|
||||
// amount = bid.value
|
||||
// if builder_index == BUILDER_INDEX_SELF_BUILD:
|
||||
// assert amount == 0
|
||||
// assert signed_bid.signature == G2_POINT_AT_INFINITY
|
||||
// else:
|
||||
// assert is_active_builder(state, builder_index)
|
||||
// assert can_builder_cover_bid(state, builder_index, amount)
|
||||
// assert verify_execution_payload_bid_signature(state, signed_bid)
|
||||
// assert bid.slot == block.slot
|
||||
// assert bid.parent_block_hash == state.latest_block_hash
|
||||
// assert bid.parent_block_root == block.parent_root
|
||||
// assert bid.prev_randao == get_randao_mix(state, get_current_epoch(state))
|
||||
// if amount > 0:
|
||||
// state.builder_pending_payments[...] = BuilderPendingPayment(weight=0, withdrawal=BuilderPendingWithdrawal(fee_recipient=bid.fee_recipient, amount=amount, builder_index=builder_index))
|
||||
// state.latest_execution_payload_bid = bid
|
||||
func ProcessExecutionPayloadBid(st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) error {
|
||||
signedBid, err := block.Body().SignedExecutionPayloadBid()
|
||||
if err != nil {
|
||||
@@ -115,12 +86,6 @@ func ProcessExecutionPayloadBid(st state.BeaconState, block interfaces.ReadOnlyB
|
||||
}
|
||||
}
|
||||
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(block.Slot()))
|
||||
commitmentCount := bid.BlobKzgCommitmentCount()
|
||||
if commitmentCount > uint64(maxBlobsPerBlock) {
|
||||
return fmt.Errorf("bid has %d blob KZG commitments over max %d", commitmentCount, maxBlobsPerBlock)
|
||||
}
|
||||
|
||||
if err := validateBidConsistency(st, bid, block); err != nil {
|
||||
return errors.Wrap(err, "bid consistency validation failed")
|
||||
}
|
||||
|
||||
@@ -184,28 +184,6 @@ func signBid(t *testing.T, sk common.SecretKey, bid *ethpb.ExecutionPayloadBid,
|
||||
return out
|
||||
}
|
||||
|
||||
func blobCommitmentsForSlot(slot primitives.Slot, count int) [][]byte {
|
||||
max := int(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(slot)))
|
||||
if count > max {
|
||||
count = max
|
||||
}
|
||||
commitments := make([][]byte, count)
|
||||
for i := range commitments {
|
||||
commitments[i] = bytes.Repeat([]byte{0xEE}, 48)
|
||||
}
|
||||
return commitments
|
||||
}
|
||||
|
||||
func tooManyBlobCommitmentsForSlot(slot primitives.Slot) [][]byte {
|
||||
max := int(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(slot)))
|
||||
count := max + 1
|
||||
commitments := make([][]byte, count)
|
||||
for i := range commitments {
|
||||
commitments[i] = bytes.Repeat([]byte{0xEE}, 48)
|
||||
}
|
||||
return commitments
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_SelfBuildSuccess(t *testing.T) {
|
||||
slot := primitives.Slot(12)
|
||||
proposerIdx := primitives.ValidatorIndex(0)
|
||||
@@ -216,17 +194,17 @@ func TestProcessExecutionPayloadBid_SelfBuildSuccess(t *testing.T) {
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 0,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 0,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
}
|
||||
signed := ðpb.SignedExecutionPayloadBid{
|
||||
Message: bid,
|
||||
@@ -258,16 +236,16 @@ func TestProcessExecutionPayloadBid_SelfBuildNonZeroAmountFails(t *testing.T) {
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, [48]byte{})
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
|
||||
PrevRandao: randao[:],
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 10,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
|
||||
PrevRandao: randao[:],
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 10,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
|
||||
}
|
||||
signed := ðpb.SignedExecutionPayloadBid{
|
||||
Message: bid,
|
||||
@@ -302,17 +280,17 @@ func TestProcessExecutionPayloadBid_PendingPaymentAndCacheBid(t *testing.T) {
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, balance, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 500_000,
|
||||
ExecutionPayment: 1,
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 500_000,
|
||||
ExecutionPayment: 1,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
}
|
||||
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
@@ -363,17 +341,17 @@ func TestProcessExecutionPayloadBid_BuilderNotActive(t *testing.T) {
|
||||
state = stateIface.(*state_native.BeaconState)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x03}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x04}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 10,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x03}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x04}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 10,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x05}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
@@ -416,17 +394,17 @@ func TestProcessExecutionPayloadBid_CannotCoverBid(t *testing.T) {
|
||||
state = stateIface.(*state_native.BeaconState)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 25,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 25,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
@@ -458,17 +436,17 @@ func TestProcessExecutionPayloadBid_InvalidSignature(t *testing.T) {
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 10,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 10,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
}
|
||||
// Use an invalid signature.
|
||||
invalidSig := [96]byte{1}
|
||||
@@ -485,42 +463,6 @@ func TestProcessExecutionPayloadBid_InvalidSignature(t *testing.T) {
|
||||
require.ErrorContains(t, "bid signature validation failed", err)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_TooManyBlobCommitments(t *testing.T) {
|
||||
slot := primitives.Slot(9)
|
||||
proposerIdx := primitives.ValidatorIndex(0)
|
||||
builderIdx := params.BeaconConfig().BuilderIndexSelfBuild
|
||||
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
|
||||
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
|
||||
pubKey := [48]byte{}
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
BlobKzgCommitments: tooManyBlobCommitmentsForSlot(slot),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
}
|
||||
signed := ðpb.SignedExecutionPayloadBid{
|
||||
Message: bid,
|
||||
Signature: common.InfiniteSignature[:],
|
||||
}
|
||||
|
||||
block := stubBlock{
|
||||
slot: slot,
|
||||
proposer: proposerIdx,
|
||||
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
|
||||
body: stubBlockBody{signedBid: signed},
|
||||
v: version.Gloas,
|
||||
}
|
||||
|
||||
err := ProcessExecutionPayloadBid(state, block)
|
||||
require.ErrorContains(t, "blob KZG commitments over max", err)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_SlotMismatch(t *testing.T) {
|
||||
slot := primitives.Slot(10)
|
||||
builderIdx := primitives.BuilderIndex(1)
|
||||
@@ -536,17 +478,17 @@ func TestProcessExecutionPayloadBid_SlotMismatch(t *testing.T) {
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot + 1, // mismatch
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot + 1, // mismatch
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
@@ -578,17 +520,17 @@ func TestProcessExecutionPayloadBid_ParentHashMismatch(t *testing.T) {
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32), // mismatch
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x33}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
||||
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32), // mismatch
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x33}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
@@ -621,17 +563,17 @@ func TestProcessExecutionPayloadBid_ParentRootMismatch(t *testing.T) {
|
||||
|
||||
parentRoot := bytes.Repeat([]byte{0x22}, 32)
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: parentRoot,
|
||||
BlockHash: bytes.Repeat([]byte{0x33}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: parentRoot,
|
||||
BlockHash: bytes.Repeat([]byte{0x33}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
@@ -663,17 +605,17 @@ func TestProcessExecutionPayloadBid_PrevRandaoMismatch(t *testing.T) {
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x33}, 32),
|
||||
PrevRandao: bytes.Repeat([]byte{0x01}, 32), // mismatch
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x33}, 32),
|
||||
PrevRandao: bytes.Repeat([]byte{0x01}, 32), // mismatch
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
|
||||
@@ -24,21 +24,14 @@ import (
|
||||
)
|
||||
|
||||
// ProcessPayloadAttestations validates payload attestations in a block body.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// process_payload_attestation(state: BeaconState, payload_attestation: PayloadAttestation):
|
||||
//
|
||||
// <spec fn="process_payload_attestation" fork="gloas" hash="f46bf0b0">
|
||||
// def process_payload_attestation(
|
||||
// state: BeaconState, payload_attestation: PayloadAttestation
|
||||
// ) -> None:
|
||||
// data = payload_attestation.data
|
||||
//
|
||||
// # Check that the attestation is for the parent beacon block
|
||||
// assert data.beacon_block_root == state.latest_block_header.parent_root
|
||||
// # Check that the attestation is for the previous slot
|
||||
// assert data.slot + 1 == state.slot
|
||||
// # Verify signature
|
||||
// indexed_payload_attestation = get_indexed_payload_attestation(state, payload_attestation)
|
||||
// assert is_valid_indexed_payload_attestation(state, indexed_payload_attestation)
|
||||
// </spec>
|
||||
// data = payload_attestation.data
|
||||
// assert data.beacon_block_root == state.latest_block_header.parent_root
|
||||
// assert data.slot + 1 == state.slot
|
||||
// indexed = get_indexed_payload_attestation(state, data.slot, payload_attestation)
|
||||
// assert is_valid_indexed_payload_attestation(state, indexed)
|
||||
func ProcessPayloadAttestations(ctx context.Context, st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||
atts, err := body.PayloadAttestations()
|
||||
if err != nil {
|
||||
@@ -97,24 +90,17 @@ func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState
|
||||
}
|
||||
|
||||
// payloadCommittee returns the payload timeliness committee for a given slot for the state.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
|
||||
//
|
||||
// <spec fn="get_ptc" fork="gloas" hash="ae15f761">
|
||||
// def get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
|
||||
// """
|
||||
// Get the payload timeliness committee for the given ``slot``.
|
||||
// """
|
||||
// epoch = compute_epoch_at_slot(slot)
|
||||
// seed = hash(get_seed(state, epoch, DOMAIN_PTC_ATTESTER) + uint_to_bytes(slot))
|
||||
// indices: List[ValidatorIndex] = []
|
||||
// # Concatenate all committees for this slot in order
|
||||
// committees_per_slot = get_committee_count_per_slot(state, epoch)
|
||||
// for i in range(committees_per_slot):
|
||||
// committee = get_beacon_committee(state, slot, CommitteeIndex(i))
|
||||
// indices.extend(committee)
|
||||
// return compute_balance_weighted_selection(
|
||||
// state, indices, seed, size=PTC_SIZE, shuffle_indices=False
|
||||
// )
|
||||
// </spec>
|
||||
// epoch = compute_epoch_at_slot(slot)
|
||||
// seed = hash(get_seed(state, epoch, DOMAIN_PTC_ATTESTER) + uint_to_bytes(slot))
|
||||
// indices = []
|
||||
// committees_per_slot = get_committee_count_per_slot(state, epoch)
|
||||
// for i in range(committees_per_slot):
|
||||
// committee = get_beacon_committee(state, slot, CommitteeIndex(i))
|
||||
// indices.extend(committee)
|
||||
// return compute_balance_weighted_selection(state, indices, seed, size=PTC_SIZE, shuffle_indices=False)
|
||||
func payloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
|
||||
epoch := slots.ToEpoch(slot)
|
||||
seed, err := ptcSeed(st, epoch, slot)
|
||||
@@ -166,35 +152,17 @@ func ptcSeed(st state.ReadOnlyBeaconState, epoch primitives.Epoch, slot primitiv
|
||||
}
|
||||
|
||||
// selectByBalance selects a balance-weighted subset of input candidates.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// compute_balance_weighted_selection(state, indices, seed, size, shuffle_indices):
|
||||
// Note: shuffle_indices is false for PTC.
|
||||
//
|
||||
// <spec fn="compute_balance_weighted_selection" fork="gloas" hash="2c9f1c23">
|
||||
// def compute_balance_weighted_selection(
|
||||
// state: BeaconState,
|
||||
// indices: Sequence[ValidatorIndex],
|
||||
// seed: Bytes32,
|
||||
// size: uint64,
|
||||
// shuffle_indices: bool,
|
||||
// ) -> Sequence[ValidatorIndex]:
|
||||
// """
|
||||
// Return ``size`` indices sampled by effective balance, using ``indices``
|
||||
// as candidates. If ``shuffle_indices`` is ``True``, candidate indices
|
||||
// are themselves sampled from ``indices`` by shuffling it, otherwise
|
||||
// ``indices`` is traversed in order.
|
||||
// """
|
||||
// total = uint64(len(indices))
|
||||
// assert total > 0
|
||||
// selected: List[ValidatorIndex] = []
|
||||
// i = uint64(0)
|
||||
// while len(selected) < size:
|
||||
// next_index = i % total
|
||||
// if shuffle_indices:
|
||||
// next_index = compute_shuffled_index(next_index, total, seed)
|
||||
// candidate_index = indices[next_index]
|
||||
// if compute_balance_weighted_acceptance(state, candidate_index, seed, i):
|
||||
// selected.append(candidate_index)
|
||||
// i += 1
|
||||
// return selected
|
||||
// </spec>
|
||||
// total = len(indices); selected = []; i = 0
|
||||
// while len(selected) < size:
|
||||
// next = i % total
|
||||
// if shuffle_indices: next = compute_shuffled_index(next, total, seed)
|
||||
// if compute_balance_weighted_acceptance(state, indices[next], seed, i):
|
||||
// selected.append(indices[next])
|
||||
// i += 1
|
||||
func selectByBalanceFill(
|
||||
ctx context.Context,
|
||||
st state.ReadOnlyBeaconState,
|
||||
@@ -231,22 +199,15 @@ func selectByBalanceFill(
|
||||
}
|
||||
|
||||
// acceptByBalance determines if a validator is accepted based on its effective balance.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// compute_balance_weighted_acceptance(state, index, seed, i):
|
||||
//
|
||||
// <spec fn="compute_balance_weighted_acceptance" fork="gloas" hash="9954dcd0">
|
||||
// def compute_balance_weighted_acceptance(
|
||||
// state: BeaconState, index: ValidatorIndex, seed: Bytes32, i: uint64
|
||||
// ) -> bool:
|
||||
// """
|
||||
// Return whether to accept the selection of the validator ``index``, with probability
|
||||
// proportional to its ``effective_balance``, and randomness given by ``seed`` and ``i``.
|
||||
// """
|
||||
// MAX_RANDOM_VALUE = 2**16 - 1
|
||||
// random_bytes = hash(seed + uint_to_bytes(i // 16))
|
||||
// offset = i % 16 * 2
|
||||
// random_value = bytes_to_uint64(random_bytes[offset : offset + 2])
|
||||
// effective_balance = state.validators[index].effective_balance
|
||||
// return effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value
|
||||
// </spec>
|
||||
// MAX_RANDOM_VALUE = 2**16 - 1
|
||||
// random_bytes = hash(seed + uint_to_bytes(i // 16))
|
||||
// offset = i % 16 * 2
|
||||
// random_value = bytes_to_uint64(random_bytes[offset:offset+2])
|
||||
// effective_balance = state.validators[index].effective_balance
|
||||
// return effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value
|
||||
func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex, seedBuf []byte, hashFunc func([]byte) [32]byte, maxBalance uint64, round uint64) (bool, error) {
|
||||
// Reuse the seed buffer by overwriting the last 8 bytes with the round counter.
|
||||
binary.LittleEndian.PutUint64(seedBuf[len(seedBuf)-8:], round/16)
|
||||
@@ -263,26 +224,16 @@ func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex
|
||||
}
|
||||
|
||||
// validIndexedPayloadAttestation verifies the signature of an indexed payload attestation.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// is_valid_indexed_payload_attestation(state: BeaconState, indexed_payload_attestation: IndexedPayloadAttestation) -> bool:
|
||||
//
|
||||
// <spec fn="is_valid_indexed_payload_attestation" fork="gloas" hash="d76e0f89">
|
||||
// def is_valid_indexed_payload_attestation(
|
||||
// state: BeaconState, attestation: IndexedPayloadAttestation
|
||||
// ) -> bool:
|
||||
// """
|
||||
// Check if ``attestation`` is non-empty, has sorted indices, and has
|
||||
// a valid aggregate signature.
|
||||
// """
|
||||
// # Verify indices are non-empty and sorted
|
||||
// indices = attestation.attesting_indices
|
||||
// if len(indices) == 0 or not indices == sorted(indices):
|
||||
// return False
|
||||
//
|
||||
// # Verify aggregate signature
|
||||
// pubkeys = [state.validators[i].pubkey for i in indices]
|
||||
// domain = get_domain(state, DOMAIN_PTC_ATTESTER, compute_epoch_at_slot(attestation.data.slot))
|
||||
// signing_root = compute_signing_root(attestation.data, domain)
|
||||
// return bls.FastAggregateVerify(pubkeys, signing_root, attestation.signature)
|
||||
// </spec>
|
||||
// indices = indexed_payload_attestation.attesting_indices
|
||||
// return len(indices) > 0 and indices == sorted(indices) and
|
||||
// bls.FastAggregateVerify(
|
||||
// [state.validators[i].pubkey for i in indices],
|
||||
// compute_signing_root(indexed_payload_attestation.data, get_domain(state, DOMAIN_PTC_ATTESTER, compute_epoch_at_slot(attestation.data.slot)),
|
||||
// indexed_payload_attestation.signature,
|
||||
// )
|
||||
func validIndexedPayloadAttestation(st state.ReadOnlyBeaconState, att *consensus_types.IndexedPayloadAttestation) error {
|
||||
indices := att.AttestingIndices
|
||||
if len(indices) == 0 || !slices.IsSorted(indices) {
|
||||
|
||||
@@ -10,21 +10,17 @@ import (
|
||||
)
|
||||
|
||||
// ProcessBuilderPendingPayments processes the builder pending payments from the previous epoch.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// def process_builder_pending_payments(state: BeaconState) -> None:
|
||||
//
|
||||
// <spec fn="process_builder_pending_payments" fork="gloas" hash="10da48dd">
|
||||
// def process_builder_pending_payments(state: BeaconState) -> None:
|
||||
// """
|
||||
// Processes the builder pending payments from the previous epoch.
|
||||
// """
|
||||
// quorum = get_builder_payment_quorum_threshold(state)
|
||||
// for payment in state.builder_pending_payments[:SLOTS_PER_EPOCH]:
|
||||
// if payment.weight >= quorum:
|
||||
// state.builder_pending_withdrawals.append(payment.withdrawal)
|
||||
// quorum = get_builder_payment_quorum_threshold(state)
|
||||
// for payment in state.builder_pending_payments[:SLOTS_PER_EPOCH]:
|
||||
// if payment.weight >= quorum:
|
||||
// state.builder_pending_withdrawals.append(payment.withdrawal)
|
||||
//
|
||||
// old_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:]
|
||||
// new_payments = [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
|
||||
// state.builder_pending_payments = old_payments + new_payments
|
||||
// </spec>
|
||||
// old_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:]
|
||||
// new_payments = [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
|
||||
// state.builder_pending_payments = old_payments + new_payments
|
||||
func ProcessBuilderPendingPayments(state state.BeaconState) error {
|
||||
quorum, err := builderQuorumThreshold(state)
|
||||
if err != nil {
|
||||
@@ -57,16 +53,12 @@ func ProcessBuilderPendingPayments(state state.BeaconState) error {
|
||||
}
|
||||
|
||||
// builderQuorumThreshold calculates the quorum threshold for builder payments.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// def get_builder_payment_quorum_threshold(state: BeaconState) -> uint64:
|
||||
//
|
||||
// <spec fn="get_builder_payment_quorum_threshold" fork="gloas" hash="a64b7ffb">
|
||||
// def get_builder_payment_quorum_threshold(state: BeaconState) -> uint64:
|
||||
// """
|
||||
// Calculate the quorum threshold for builder payments.
|
||||
// """
|
||||
// per_slot_balance = get_total_active_balance(state) // SLOTS_PER_EPOCH
|
||||
// quorum = per_slot_balance * BUILDER_PAYMENT_THRESHOLD_NUMERATOR
|
||||
// return uint64(quorum // BUILDER_PAYMENT_THRESHOLD_DENOMINATOR)
|
||||
// </spec>
|
||||
// per_slot_balance = get_total_active_balance(state) // SLOTS_PER_EPOCH
|
||||
// quorum = per_slot_balance * BUILDER_PAYMENT_THRESHOLD_NUMERATOR
|
||||
// return uint64(quorum // BUILDER_PAYMENT_THRESHOLD_DENOMINATOR)
|
||||
func builderQuorumThreshold(state state.ReadOnlyBeaconState) (primitives.Gwei, error) {
|
||||
activeBalance, err := helpers.TotalActiveBalance(state)
|
||||
if err != nil {
|
||||
|
||||
@@ -11,20 +11,16 @@ import (
|
||||
)
|
||||
|
||||
// RemoveBuilderPendingPayment removes the pending builder payment for the proposal slot.
|
||||
// Spec v1.7.0 (pseudocode):
|
||||
//
|
||||
// <spec fn="process_proposer_slashing" fork="gloas" lines="22-32" hash="4da721ef">
|
||||
// # [New in Gloas:EIP7732]
|
||||
// # Remove the BuilderPendingPayment corresponding to
|
||||
// # this proposal if it is still in the 2-epoch window.
|
||||
// slot = header_1.slot
|
||||
// proposal_epoch = compute_epoch_at_slot(slot)
|
||||
// if proposal_epoch == get_current_epoch(state):
|
||||
// payment_index = SLOTS_PER_EPOCH + slot % SLOTS_PER_EPOCH
|
||||
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
|
||||
// payment_index = SLOTS_PER_EPOCH + slot % SLOTS_PER_EPOCH
|
||||
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
|
||||
// elif proposal_epoch == get_previous_epoch(state):
|
||||
// payment_index = slot % SLOTS_PER_EPOCH
|
||||
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
|
||||
// </spec>
|
||||
// payment_index = slot % SLOTS_PER_EPOCH
|
||||
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
|
||||
func RemoveBuilderPendingPayment(st state.BeaconState, header *eth.BeaconBlockHeader) error {
|
||||
proposalEpoch := slots.ToEpoch(header.Slot)
|
||||
currentEpoch := time.CurrentEpoch(st)
|
||||
|
||||
105
beacon-chain/core/gloas/withdrawal.go
Normal file
105
beacon-chain/core/gloas/withdrawal.go
Normal file
@@ -0,0 +1,105 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ProcessWithdrawals applies withdrawals to the state for Gloas.
|
||||
//
|
||||
// Spec v1.7.0-alpha.1 (pseudocode):
|
||||
//
|
||||
// def process_withdrawals(
|
||||
//
|
||||
// state: BeaconState,
|
||||
// # [Modified in Gloas:EIP7732]
|
||||
// # Removed `payload`
|
||||
//
|
||||
// ) -> None:
|
||||
//
|
||||
// # [New in Gloas:EIP7732]
|
||||
// # Return early if the parent block is empty
|
||||
// if not is_parent_block_full(state):
|
||||
// return
|
||||
//
|
||||
// # Get expected withdrawals
|
||||
// expected = get_expected_withdrawals(state)
|
||||
//
|
||||
// # Apply expected withdrawals
|
||||
// apply_withdrawals(state, expected.withdrawals)
|
||||
//
|
||||
// # Update withdrawals fields in the state
|
||||
// update_next_withdrawal_index(state, expected.withdrawals)
|
||||
// # [New in Gloas:EIP7732]
|
||||
// update_payload_expected_withdrawals(state, expected.withdrawals)
|
||||
// # [New in Gloas:EIP7732]
|
||||
// update_builder_pending_withdrawals(state, expected.processed_builder_withdrawals_count)
|
||||
// update_pending_partial_withdrawals(state, expected.processed_partial_withdrawals_count)
|
||||
// # [New in Gloas:EIP7732]
|
||||
// update_next_withdrawal_builder_index(state, expected.processed_builders_sweep_count)
|
||||
// update_next_withdrawal_validator_index(state, expected.withdrawals)
|
||||
func ProcessWithdrawals(st state.BeaconState) error {
|
||||
full, err := st.IsParentBlockFull()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get parent block full status")
|
||||
}
|
||||
if !full {
|
||||
return nil
|
||||
}
|
||||
|
||||
expected, err := st.ExpectedWithdrawalsGloas()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get expected withdrawals")
|
||||
}
|
||||
|
||||
if err := st.DecreaseWithdrawalBalances(expected.Withdrawals); err != nil {
|
||||
return errors.Wrap(err, "could not decrease withdrawal balances")
|
||||
}
|
||||
|
||||
if len(expected.Withdrawals) > 0 {
|
||||
if err := st.SetNextWithdrawalIndex(expected.Withdrawals[len(expected.Withdrawals)-1].Index + 1); err != nil {
|
||||
return errors.Wrap(err, "could not set next withdrawal index")
|
||||
}
|
||||
}
|
||||
|
||||
err = st.SetPayloadExpectedWithdrawals(expected.Withdrawals)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not set payload expected withdrawals")
|
||||
}
|
||||
|
||||
err = st.DequeueBuilderPendingWithdrawals(expected.ProcessedBuilderWithdrawalsCount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to dequeue builder pending withdrawals from state")
|
||||
}
|
||||
|
||||
if err := st.DequeuePendingPartialWithdrawals(expected.ProcessedPartialWithdrawalsCount); err != nil {
|
||||
return errors.Wrap(err, "unable to dequeue partial withdrawals from state")
|
||||
}
|
||||
|
||||
err = st.SetNextWithdrawalBuilderIndex(expected.NextWithdrawalBuilderIndex)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not set next withdrawal builder index")
|
||||
}
|
||||
|
||||
var nextValidatorIndex primitives.ValidatorIndex
|
||||
if uint64(len(expected.Withdrawals)) < params.BeaconConfig().MaxWithdrawalsPerPayload {
|
||||
nextValidatorIndex, err = st.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get next withdrawal validator index")
|
||||
}
|
||||
nextValidatorIndex += primitives.ValidatorIndex(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
||||
nextValidatorIndex = nextValidatorIndex % primitives.ValidatorIndex(st.NumValidators())
|
||||
} else {
|
||||
nextValidatorIndex = expected.Withdrawals[len(expected.Withdrawals)-1].ValidatorIndex + 1
|
||||
if nextValidatorIndex == primitives.ValidatorIndex(st.NumValidators()) {
|
||||
nextValidatorIndex = 0
|
||||
}
|
||||
}
|
||||
if err := st.SetNextWithdrawalValidatorIndex(nextValidatorIndex); err != nil {
|
||||
return errors.Wrap(err, "could not set next withdrawal validator index")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
34
beacon-chain/core/gloas/withdrawal_test.go
Normal file
34
beacon-chain/core/gloas/withdrawal_test.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestProcessWithdrawals_ParentBlockNotFull(t *testing.T) {
|
||||
state, err := state_native.InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{})
|
||||
require.NoError(t, err)
|
||||
|
||||
st := &withdrawalsState{BeaconState: state}
|
||||
require.NoError(t, ProcessWithdrawals(st))
|
||||
require.Equal(t, false, st.expectedCalled)
|
||||
}
|
||||
|
||||
type withdrawalsState struct {
|
||||
state.BeaconState
|
||||
expectedCalled bool
|
||||
decreaseCalled bool
|
||||
}
|
||||
|
||||
func (w *withdrawalsState) IsParentBlockFull() (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (w *withdrawalsState) ExpectedWithdrawalsGloas() (state.ExpectedWithdrawalsGloasResult, error) {
|
||||
w.expectedCalled = true
|
||||
return state.ExpectedWithdrawalsGloasResult{}, nil
|
||||
}
|
||||
@@ -143,11 +143,10 @@ func ProcessSlot(ctx context.Context, state state.BeaconState) (state.BeaconStat
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// <spec fn="process_slot" fork="gloas" lines="11-13" hash="62b28839">
|
||||
// Spec v1.6.1 (pseudocode):
|
||||
// # [New in Gloas:EIP7732]
|
||||
// # Unset the next payload availability
|
||||
// state.execution_payload_availability[(state.slot + 1) % SLOTS_PER_HISTORICAL_ROOT] = 0b0
|
||||
// </spec>
|
||||
if state.Version() >= version.Gloas {
|
||||
index := uint64((state.Slot() + 1) % params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
if err := state.UpdateExecutionPayloadAvailabilityAtIndex(index, 0x0); err != nil {
|
||||
|
||||
@@ -78,7 +78,7 @@ func newGloasState(t *testing.T, slot primitives.Slot, availability []byte) stat
|
||||
BlockHash: make([]byte, 32),
|
||||
PrevRandao: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitments: [][]byte{make([]byte, 48)},
|
||||
BlobKzgCommitmentsRoot: make([]byte, 32),
|
||||
},
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
|
||||
@@ -2,7 +2,6 @@ package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
@@ -34,9 +33,6 @@ func (s *Store) LastArchivedRoot(ctx context.Context) [32]byte {
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(stateSlotIndicesBucket)
|
||||
_, blockRoot = bkt.Cursor().Last()
|
||||
if len(blockRoot) > 0 {
|
||||
blockRoot = slices.Clone(blockRoot)
|
||||
}
|
||||
return nil
|
||||
}); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
|
||||
panic(err) // lint:nopanic -- View never returns an error.
|
||||
@@ -55,9 +51,6 @@ func (s *Store) ArchivedPointRoot(ctx context.Context, slot primitives.Slot) [32
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateSlotIndicesBucket)
|
||||
blockRoot = bucket.Get(bytesutil.SlotToBytesBigEndian(slot))
|
||||
if len(blockRoot) > 0 {
|
||||
blockRoot = slices.Clone(blockRoot)
|
||||
}
|
||||
return nil
|
||||
}); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
|
||||
panic(err) // lint:nopanic -- View never returns an error.
|
||||
|
||||
@@ -812,10 +812,7 @@ func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id primitives.Val
|
||||
var addr []byte
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(feeRecipientBucket)
|
||||
stored := bkt.Get(bytesutil.Uint64ToBytesBigEndian(uint64(id)))
|
||||
if len(stored) > 0 {
|
||||
addr = slices.Clone(stored)
|
||||
}
|
||||
addr = bkt.Get(bytesutil.Uint64ToBytesBigEndian(uint64(id)))
|
||||
// IF the fee recipient is not found in the standard fee recipient bucket, then
|
||||
// check the registration bucket. The fee recipient may be there.
|
||||
// This is to resolve imcompatility until we fully migrate to the registration bucket.
|
||||
@@ -829,7 +826,7 @@ func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id primitives.Val
|
||||
if err := decode(ctx, enc, reg); err != nil {
|
||||
return err
|
||||
}
|
||||
addr = slices.Clone(reg.FeeRecipient)
|
||||
addr = reg.FeeRecipient
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
@@ -67,6 +67,7 @@ func getSubscriptionStatusFromDB(t *testing.T, db *Store) bool {
|
||||
return subscribed
|
||||
}
|
||||
|
||||
|
||||
func TestUpdateCustodyInfo(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ package kv
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -18,10 +17,7 @@ func (s *Store) DepositContractAddress(ctx context.Context) ([]byte, error) {
|
||||
var addr []byte
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
chainInfo := tx.Bucket(chainMetadataBucket)
|
||||
stored := chainInfo.Get(depositContractAddressKey)
|
||||
if len(stored) > 0 {
|
||||
addr = slices.Clone(stored)
|
||||
}
|
||||
addr = chainInfo.Get(depositContractAddressKey)
|
||||
return nil
|
||||
}); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
|
||||
panic(err) // lint:nopanic -- View never returns an error.
|
||||
|
||||
@@ -199,7 +199,7 @@ func performValidatorStateMigration(ctx context.Context, bar *progressbar.Progre
|
||||
func stateBucketKeys(stateBucket *bolt.Bucket) ([][]byte, error) {
|
||||
var keys [][]byte
|
||||
if err := stateBucket.ForEach(func(pubKey, v []byte) error {
|
||||
keys = append(keys, bytes.Clone(pubKey))
|
||||
keys = append(keys, pubKey)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -2,7 +2,6 @@ package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
@@ -188,23 +187,20 @@ func (s *Store) getDiff(lvl int, slot uint64) (hdiff.HdiffBytes, error) {
|
||||
return bolt.ErrBucketNotFound
|
||||
}
|
||||
buf := append(key, stateSuffix...)
|
||||
rawStateDiff := bucket.Get(buf)
|
||||
if len(rawStateDiff) == 0 {
|
||||
stateDiff = bucket.Get(buf)
|
||||
if stateDiff == nil {
|
||||
return errors.New("state diff not found")
|
||||
}
|
||||
stateDiff = slices.Clone(rawStateDiff)
|
||||
buf = append(key, validatorSuffix...)
|
||||
rawValidatorDiff := bucket.Get(buf)
|
||||
if len(rawValidatorDiff) == 0 {
|
||||
validatorDiff = bucket.Get(buf)
|
||||
if validatorDiff == nil {
|
||||
return errors.New("validator diff not found")
|
||||
}
|
||||
validatorDiff = slices.Clone(rawValidatorDiff)
|
||||
buf = append(key, balancesSuffix...)
|
||||
rawBalancesDiff := bucket.Get(buf)
|
||||
if len(rawBalancesDiff) == 0 {
|
||||
balancesDiff = bucket.Get(buf)
|
||||
if balancesDiff == nil {
|
||||
return errors.New("balances diff not found")
|
||||
}
|
||||
balancesDiff = slices.Clone(rawBalancesDiff)
|
||||
return nil
|
||||
})
|
||||
|
||||
@@ -228,11 +224,10 @@ func (s *Store) getFullSnapshot(slot uint64) (state.BeaconState, error) {
|
||||
if bucket == nil {
|
||||
return bolt.ErrBucketNotFound
|
||||
}
|
||||
rawEnc := bucket.Get(key)
|
||||
if rawEnc == nil {
|
||||
enc = bucket.Get(key)
|
||||
if enc == nil {
|
||||
return errors.New("state not found")
|
||||
}
|
||||
enc = slices.Clone(rawEnc)
|
||||
return nil
|
||||
})
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
@@ -48,11 +47,7 @@ func (s *Store) StateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.St
|
||||
}
|
||||
var enc []byte
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
rawEnc := tx.Bucket(stateSummaryBucket).Get(blockRoot[:])
|
||||
if len(rawEnc) == 0 {
|
||||
return nil
|
||||
}
|
||||
enc = slices.Clone(rawEnc)
|
||||
enc = tx.Bucket(stateSummaryBucket).Get(blockRoot[:])
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -25,7 +25,6 @@ go_library(
|
||||
"//testing/spectest:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
@@ -100,7 +99,6 @@ go_test(
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api/server/structs:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/execution/types"
|
||||
@@ -100,8 +99,6 @@ const (
|
||||
GetBlobsV1 = "engine_getBlobsV1"
|
||||
// GetBlobsV2 request string for JSON-RPC.
|
||||
GetBlobsV2 = "engine_getBlobsV2"
|
||||
// GetClientVersionV1 is the JSON-RPC method that identifies the execution client.
|
||||
GetClientVersionV1 = "engine_getClientVersionV1"
|
||||
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
|
||||
defaultEngineTimeout = time.Second
|
||||
)
|
||||
@@ -138,7 +135,6 @@ type EngineCaller interface {
|
||||
GetPayload(ctx context.Context, payloadId [8]byte, slot primitives.Slot) (*blocks.GetPayloadResponse, error)
|
||||
ExecutionBlockByHash(ctx context.Context, hash common.Hash, withTxs bool) (*pb.ExecutionBlock, error)
|
||||
GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error)
|
||||
GetClientVersionV1(ctx context.Context) ([]*structs.ClientVersionV1, error)
|
||||
}
|
||||
|
||||
var ErrEmptyBlockHash = errors.New("Block hash is empty 0x0000...")
|
||||
@@ -557,30 +553,6 @@ func (s *Service) GetBlobsV2(ctx context.Context, versionedHashes []common.Hash)
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
func (s *Service) GetClientVersionV1(ctx context.Context) ([]*structs.ClientVersionV1, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetClientVersionV1")
|
||||
defer span.End()
|
||||
|
||||
var result []*structs.ClientVersionV1
|
||||
err := s.rpcClient.CallContext(
|
||||
ctx,
|
||||
&result,
|
||||
GetClientVersionV1,
|
||||
structs.ClientVersionV1{
|
||||
Code: "PM",
|
||||
Name: "Prysm",
|
||||
Version: version.SemanticVersion(),
|
||||
Commit: version.GitCommit()[:8],
|
||||
},
|
||||
)
|
||||
|
||||
if len(result) == 0 {
|
||||
return nil, errors.New("execution client returned no result")
|
||||
}
|
||||
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
// ReconstructFullBlock takes in a blinded beacon block and reconstructs
|
||||
// a beacon block with a full execution payload via the engine API.
|
||||
func (s *Service) ReconstructFullBlock(
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
@@ -1000,61 +999,6 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
t.Run(GetClientVersionV1, func(t *testing.T) {
|
||||
want := []*structs.ClientVersionV1{{
|
||||
Code: "GE",
|
||||
Name: "go-ethereum",
|
||||
Version: "1.15.11-stable",
|
||||
Commit: "36b2371c",
|
||||
}}
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
|
||||
// We expect the JSON string RPC request contains the right method name.
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, GetClientVersionV1,
|
||||
))
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, "\"code\":\"PM\"",
|
||||
))
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, "\"name\":\"Prysm\"",
|
||||
))
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, fmt.Sprintf("\"version\":\"%s\"", version.SemanticVersion()),
|
||||
))
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, fmt.Sprintf("\"commit\":\"%s\"", version.GitCommit()[:8]),
|
||||
))
|
||||
resp := map[string]any{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": want,
|
||||
}
|
||||
err = json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
service := &Service{}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := service.GetClientVersionV1(ctx)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want, resp)
|
||||
})
|
||||
}
|
||||
|
||||
func TestReconstructFullBellatrixBlock(t *testing.T) {
|
||||
|
||||
@@ -13,7 +13,6 @@ go_library(
|
||||
"//visibility:public",
|
||||
],
|
||||
deps = [
|
||||
"//api/server/structs:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/execution/types:go_default_library",
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
@@ -43,8 +42,6 @@ type EngineClient struct {
|
||||
ErrorBlobSidecars error
|
||||
DataColumnSidecars []blocks.VerifiedRODataColumn
|
||||
ErrorDataColumnSidecars error
|
||||
ClientVersion []*structs.ClientVersionV1
|
||||
ErrorClientVersion error
|
||||
}
|
||||
|
||||
// NewPayload --
|
||||
@@ -176,8 +173,3 @@ func (e *EngineClient) GetTerminalBlockHash(ctx context.Context, transitionTime
|
||||
blk = parentBlk
|
||||
}
|
||||
}
|
||||
|
||||
// GetClientVersionV1 --
|
||||
func (e *EngineClient) GetClientVersionV1(context.Context) ([]*structs.ClientVersionV1, error) {
|
||||
return e.ClientVersion, e.ErrorClientVersion
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
"doc.go",
|
||||
"errors.go",
|
||||
"forkchoice.go",
|
||||
"last_root.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"node.go",
|
||||
@@ -50,6 +51,7 @@ go_test(
|
||||
srcs = [
|
||||
"ffg_update_test.go",
|
||||
"forkchoice_test.go",
|
||||
"last_root_test.go",
|
||||
"no_vote_test.go",
|
||||
"node_test.go",
|
||||
"on_tick_test.go",
|
||||
|
||||
@@ -32,6 +32,7 @@ func New() *ForkChoice {
|
||||
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
proposerBoostRoot: [32]byte{},
|
||||
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
slashedIndices: make(map[primitives.ValidatorIndex]bool),
|
||||
receivedBlocksLastEpoch: [fieldparams.SlotsPerEpoch]primitives.Slot{},
|
||||
}
|
||||
|
||||
26
beacon-chain/forkchoice/doubly-linked-tree/last_root.go
Normal file
26
beacon-chain/forkchoice/doubly-linked-tree/last_root.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package doublylinkedtree
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
)
|
||||
|
||||
// LastRoot returns the last canonical block root in the given epoch
|
||||
func (f *ForkChoice) LastRoot(epoch primitives.Epoch) [32]byte {
|
||||
head := f.store.headNode
|
||||
headEpoch := slots.ToEpoch(head.slot)
|
||||
epochEnd, err := slots.EpochEnd(epoch)
|
||||
if err != nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
if headEpoch <= epoch {
|
||||
return head.root
|
||||
}
|
||||
for head != nil && head.slot > epochEnd {
|
||||
head = head.parent
|
||||
}
|
||||
if head == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
return head.root
|
||||
}
|
||||
38
beacon-chain/forkchoice/doubly-linked-tree/last_root_test.go
Normal file
38
beacon-chain/forkchoice/doubly-linked-tree/last_root_test.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package doublylinkedtree
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestLastRoot(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := t.Context()
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, [32]byte{'1'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'2'}, [32]byte{'1'}, [32]byte{'2'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
st, root, err = prepareForkchoiceState(ctx, 3, [32]byte{'3'}, [32]byte{'1'}, [32]byte{'3'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
st, root, err = prepareForkchoiceState(ctx, 32, [32]byte{'4'}, [32]byte{'3'}, [32]byte{'4'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
st, root, err = prepareForkchoiceState(ctx, 33, [32]byte{'5'}, [32]byte{'2'}, [32]byte{'5'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
st, root, err = prepareForkchoiceState(ctx, 34, [32]byte{'6'}, [32]byte{'5'}, [32]byte{'6'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
headNode := f.store.nodeByRoot[[32]byte{'6'}]
|
||||
f.store.headNode = headNode
|
||||
require.Equal(t, [32]byte{'6'}, f.store.headNode.root)
|
||||
require.Equal(t, [32]byte{'2'}, f.LastRoot(0))
|
||||
require.Equal(t, [32]byte{'6'}, f.LastRoot(1))
|
||||
require.Equal(t, [32]byte{'6'}, f.LastRoot(2))
|
||||
}
|
||||
@@ -94,5 +94,6 @@ func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node, invalidRo
|
||||
s.previousProposerBoostScore = 0
|
||||
}
|
||||
delete(s.nodeByRoot, node.root)
|
||||
delete(s.nodeByPayload, node.payloadHash)
|
||||
return invalidRoots, nil
|
||||
}
|
||||
|
||||
@@ -113,6 +113,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
}
|
||||
}
|
||||
|
||||
s.nodeByPayload[payloadHash] = n
|
||||
s.nodeByRoot[root] = n
|
||||
if parent == nil {
|
||||
if s.treeRootNode == nil {
|
||||
@@ -121,6 +122,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
s.highestReceivedNode = n
|
||||
} else {
|
||||
delete(s.nodeByRoot, root)
|
||||
delete(s.nodeByPayload, payloadHash)
|
||||
return nil, errInvalidParentRoot
|
||||
}
|
||||
} else {
|
||||
@@ -189,6 +191,7 @@ func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalized
|
||||
|
||||
node.children = nil
|
||||
delete(s.nodeByRoot, node.root)
|
||||
delete(s.nodeByPayload, node.payloadHash)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -270,6 +273,21 @@ func (f *ForkChoice) HighestReceivedBlockSlot() primitives.Slot {
|
||||
return f.store.highestReceivedNode.slot
|
||||
}
|
||||
|
||||
// HighestReceivedBlockDelay returns the number of slots that the highest
|
||||
// received block was late when receiving it. For example, a block was late by 12 slots,
|
||||
// then this method is expected to return 12.
|
||||
func (f *ForkChoice) HighestReceivedBlockDelay() primitives.Slot {
|
||||
n := f.store.highestReceivedNode
|
||||
if n == nil {
|
||||
return 0
|
||||
}
|
||||
sss, err := slots.SinceSlotStart(n.slot, f.store.genesisTime, n.timestamp)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return primitives.Slot(uint64(sss/time.Second) / params.BeaconConfig().SecondsPerSlot)
|
||||
}
|
||||
|
||||
// ReceivedBlocksLastEpoch returns the number of blocks received in the last epoch
|
||||
func (f *ForkChoice) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
count := uint64(0)
|
||||
|
||||
@@ -128,9 +128,10 @@ func TestStore_Insert(t *testing.T) {
|
||||
// The new node does not have a parent.
|
||||
treeRootNode := &Node{slot: 0, root: indexToHash(0)}
|
||||
nodeByRoot := map[[32]byte]*Node{indexToHash(0): treeRootNode}
|
||||
nodeByPayload := map[[32]byte]*Node{indexToHash(0): treeRootNode}
|
||||
jc := &forkchoicetypes.Checkpoint{Epoch: 0}
|
||||
fc := &forkchoicetypes.Checkpoint{Epoch: 0}
|
||||
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
|
||||
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, nodeByPayload: nodeByPayload, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
|
||||
payloadHash := [32]byte{'a'}
|
||||
ctx := t.Context()
|
||||
_, blk, err := prepareForkchoiceState(ctx, 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1)
|
||||
@@ -237,6 +238,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
s.finalizedCheckpoint.Root = indexToHash(1)
|
||||
require.NoError(t, s.prune(t.Context()))
|
||||
require.Equal(t, len(s.nodeByRoot), 1)
|
||||
require.Equal(t, len(s.nodeByPayload), 1)
|
||||
}
|
||||
|
||||
// This test starts with the following branching diagram
|
||||
@@ -317,6 +319,8 @@ func TestStore_PruneMapsNodes(t *testing.T) {
|
||||
s.finalizedCheckpoint.Root = indexToHash(1)
|
||||
require.NoError(t, s.prune(t.Context()))
|
||||
require.Equal(t, len(s.nodeByRoot), 1)
|
||||
require.Equal(t, len(s.nodeByPayload), 1)
|
||||
|
||||
}
|
||||
|
||||
func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
@@ -335,6 +339,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), count)
|
||||
require.Equal(t, primitives.Slot(1), f.HighestReceivedBlockSlot())
|
||||
require.Equal(t, primitives.Slot(0), f.HighestReceivedBlockDelay())
|
||||
|
||||
// 64
|
||||
// Received block last epoch is 1
|
||||
@@ -347,6 +352,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), count)
|
||||
require.Equal(t, primitives.Slot(64), f.HighestReceivedBlockSlot())
|
||||
require.Equal(t, primitives.Slot(0), f.HighestReceivedBlockDelay())
|
||||
|
||||
// 64 65
|
||||
// Received block last epoch is 2
|
||||
@@ -359,6 +365,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(2), count)
|
||||
require.Equal(t, primitives.Slot(65), f.HighestReceivedBlockSlot())
|
||||
require.Equal(t, primitives.Slot(1), f.HighestReceivedBlockDelay())
|
||||
|
||||
// 64 65 66
|
||||
// Received block last epoch is 3
|
||||
@@ -710,3 +717,17 @@ func TestStore_CleanupInserting(t *testing.T) {
|
||||
require.NotNil(t, f.InsertNode(ctx, st, blk))
|
||||
require.Equal(t, false, f.HasNode(blk.Root()))
|
||||
}
|
||||
|
||||
func TestStore_HighestReceivedBlockDelay(t *testing.T) {
|
||||
f := ForkChoice{
|
||||
store: &Store{
|
||||
genesisTime: time.Unix(0, 0),
|
||||
highestReceivedNode: &Node{
|
||||
slot: 10,
|
||||
timestamp: time.Unix(int64(((10 + 12) * params.BeaconConfig().SecondsPerSlot)), 0), // 12 slots late
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
require.Equal(t, primitives.Slot(12), f.HighestReceivedBlockDelay())
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@ type Store struct {
|
||||
treeRootNode *Node // the root node of the store tree.
|
||||
headNode *Node // last head Node
|
||||
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
|
||||
nodeByPayload map[[fieldparams.RootLength]byte]*Node // nodes indexed by payload Hash
|
||||
slashedIndices map[primitives.ValidatorIndex]bool // the list of equivocating validator indices
|
||||
originRoot [fieldparams.RootLength]byte // The genesis block root
|
||||
genesisTime time.Time
|
||||
|
||||
@@ -67,11 +67,13 @@ type FastGetter interface {
|
||||
HasNode([32]byte) bool
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
HighestReceivedBlockRoot() [32]byte
|
||||
HighestReceivedBlockDelay() primitives.Slot
|
||||
IsCanonical(root [32]byte) bool
|
||||
IsOptimistic(root [32]byte) (bool, error)
|
||||
IsViableForCheckpoint(*forkchoicetypes.Checkpoint) (bool, error)
|
||||
JustifiedCheckpoint() *forkchoicetypes.Checkpoint
|
||||
JustifiedPayloadBlockHash() [32]byte
|
||||
LastRoot(primitives.Epoch) [32]byte
|
||||
NodeCount() int
|
||||
PreviousJustifiedCheckpoint() *forkchoicetypes.Checkpoint
|
||||
ProposerBoost() [fieldparams.RootLength]byte
|
||||
|
||||
@@ -121,6 +121,13 @@ func (ro *ROForkChoice) HighestReceivedBlockRoot() [32]byte {
|
||||
return ro.getter.HighestReceivedBlockRoot()
|
||||
}
|
||||
|
||||
// HighestReceivedBlockDelay delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) HighestReceivedBlockDelay() primitives.Slot {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.HighestReceivedBlockDelay()
|
||||
}
|
||||
|
||||
// ReceivedBlocksLastEpoch delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
ro.l.RLock()
|
||||
@@ -156,6 +163,13 @@ func (ro *ROForkChoice) Slot(root [32]byte) (primitives.Slot, error) {
|
||||
return ro.getter.Slot(root)
|
||||
}
|
||||
|
||||
// LastRoot delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) LastRoot(e primitives.Epoch) [32]byte {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.LastRoot(e)
|
||||
}
|
||||
|
||||
// DependentRoot delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) DependentRoot(epoch primitives.Epoch) ([32]byte, error) {
|
||||
ro.l.RLock()
|
||||
|
||||
@@ -30,6 +30,7 @@ const (
|
||||
nodeCountCalled
|
||||
highestReceivedBlockSlotCalled
|
||||
highestReceivedBlockRootCalled
|
||||
highestReceivedBlockDelayCalled
|
||||
receivedBlocksLastEpochCalled
|
||||
weightCalled
|
||||
isOptimisticCalled
|
||||
@@ -117,6 +118,11 @@ func TestROLocking(t *testing.T) {
|
||||
call: highestReceivedBlockSlotCalled,
|
||||
cb: func(g FastGetter) { g.HighestReceivedBlockSlot() },
|
||||
},
|
||||
{
|
||||
name: "highestReceivedBlockDelayCalled",
|
||||
call: highestReceivedBlockDelayCalled,
|
||||
cb: func(g FastGetter) { g.HighestReceivedBlockDelay() },
|
||||
},
|
||||
{
|
||||
name: "receivedBlocksLastEpochCalled",
|
||||
call: receivedBlocksLastEpochCalled,
|
||||
@@ -142,6 +148,11 @@ func TestROLocking(t *testing.T) {
|
||||
call: slotCalled,
|
||||
cb: func(g FastGetter) { _, err := g.Slot([32]byte{}); _discard(t, err) },
|
||||
},
|
||||
{
|
||||
name: "lastRootCalled",
|
||||
call: lastRootCalled,
|
||||
cb: func(g FastGetter) { g.LastRoot(0) },
|
||||
},
|
||||
{
|
||||
name: "targetRootForEpochCalled",
|
||||
call: targetRootForEpochCalled,
|
||||
@@ -254,6 +265,11 @@ func (ro *mockROForkchoice) HighestReceivedBlockRoot() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) HighestReceivedBlockDelay() primitives.Slot {
|
||||
ro.calls = append(ro.calls, highestReceivedBlockDelayCalled)
|
||||
return 0
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
ro.calls = append(ro.calls, receivedBlocksLastEpochCalled)
|
||||
return 0, nil
|
||||
@@ -279,6 +295,11 @@ func (ro *mockROForkchoice) Slot(_ [32]byte) (primitives.Slot, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) LastRoot(_ primitives.Epoch) [32]byte {
|
||||
ro.calls = append(ro.calls, lastRootCalled)
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// DependentRoot impoements FastGetter.
|
||||
func (ro *mockROForkchoice) DependentRoot(_ primitives.Epoch) ([32]byte, error) {
|
||||
ro.calls = append(ro.calls, dependentRootCalled)
|
||||
|
||||
@@ -134,20 +134,10 @@ type BeaconNode struct {
|
||||
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
// every required service to the node.
|
||||
func New(cliCtx *cli.Context, cancel context.CancelFunc, optFuncs []func(*cli.Context) ([]Option, error), opts ...Option) (*BeaconNode, error) {
|
||||
func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*BeaconNode, error) {
|
||||
if err := configureBeacon(cliCtx); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set beacon configuration options")
|
||||
}
|
||||
|
||||
for _, of := range optFuncs {
|
||||
ofo, err := of(cliCtx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ofo != nil {
|
||||
opts = append(opts, ofo...)
|
||||
}
|
||||
}
|
||||
ctx := cliCtx.Context
|
||||
|
||||
beacon := &BeaconNode{
|
||||
|
||||
@@ -59,7 +59,7 @@ func TestNodeClose_OK(t *testing.T) {
|
||||
WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
|
||||
}
|
||||
|
||||
node, err := New(ctx, cancel, nil, options...)
|
||||
node, err := New(ctx, cancel, options...)
|
||||
require.NoError(t, err)
|
||||
|
||||
node.Close()
|
||||
@@ -87,7 +87,7 @@ func TestNodeStart_Ok(t *testing.T) {
|
||||
WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
|
||||
}
|
||||
|
||||
node, err := New(ctx, cancel, nil, options...)
|
||||
node, err := New(ctx, cancel, options...)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, node.lcStore)
|
||||
node.services = &runtime.ServiceRegistry{}
|
||||
@@ -116,7 +116,7 @@ func TestNodeStart_SyncChecker(t *testing.T) {
|
||||
WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
|
||||
}
|
||||
|
||||
node, err := New(ctx, cancel, nil, options...)
|
||||
node, err := New(ctx, cancel, options...)
|
||||
require.NoError(t, err)
|
||||
go func() {
|
||||
node.Start()
|
||||
@@ -151,7 +151,7 @@ func TestClearDB(t *testing.T) {
|
||||
WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
|
||||
}
|
||||
|
||||
_, err = New(context, cancel, nil, options...)
|
||||
_, err = New(context, cancel, options...)
|
||||
require.NoError(t, err)
|
||||
require.LogsContain(t, hook, "Removing database")
|
||||
}
|
||||
|
||||
@@ -138,9 +138,6 @@ func connect(a, b host.Host) error {
|
||||
func (p *TestP2P) ReceiveRPC(topic string, msg proto.Message) {
|
||||
h, err := libp2p.New(libp2p.ResourceManager(&network.NullResourceManager{}))
|
||||
require.NoError(p.t, err)
|
||||
p.t.Cleanup(func() {
|
||||
require.NoError(p.t, h.Close())
|
||||
})
|
||||
if err := connect(h, p.BHost); err != nil {
|
||||
p.t.Fatalf("Failed to connect two peers for RPC: %v", err)
|
||||
}
|
||||
@@ -172,9 +169,6 @@ func (p *TestP2P) ReceiveRPC(topic string, msg proto.Message) {
|
||||
func (p *TestP2P) ReceivePubSub(topic string, msg proto.Message) {
|
||||
h, err := libp2p.New(libp2p.ResourceManager(&network.NullResourceManager{}))
|
||||
require.NoError(p.t, err)
|
||||
p.t.Cleanup(func() {
|
||||
require.NoError(p.t, h.Close())
|
||||
})
|
||||
ps, err := pubsub.NewFloodSub(context.Background(), h,
|
||||
pubsub.WithMessageSigning(false),
|
||||
pubsub.WithStrictSignatureVerification(false),
|
||||
|
||||
@@ -405,7 +405,6 @@ func (s *Service) nodeEndpoints() []endpoint {
|
||||
MetadataProvider: s.cfg.MetadataProvider,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
ExecutionChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
|
||||
ExecutionEngineCaller: s.cfg.ExecutionEngineCaller,
|
||||
}
|
||||
|
||||
const namespace = "node"
|
||||
@@ -470,16 +469,6 @@ func (s *Service) nodeEndpoints() []endpoint {
|
||||
handler: server.GetVersion,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v2/node/version",
|
||||
name: namespace + ".GetVersionV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.GetVersionV2,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/node/health",
|
||||
name: namespace + ".GetHealth",
|
||||
|
||||
@@ -26,8 +26,8 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits/mock"
|
||||
p2pMock "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
|
||||
@@ -84,6 +84,7 @@ func TestGetSpec(t *testing.T) {
|
||||
config.FuluForkVersion = []byte("FuluForkVersion")
|
||||
config.FuluForkEpoch = 109
|
||||
config.GloasForkEpoch = 110
|
||||
config.MaxBuildersPerWithdrawalsSweep = 112
|
||||
config.BLSWithdrawalPrefixByte = byte('b')
|
||||
config.ETH1AddressWithdrawalPrefixByte = byte('c')
|
||||
config.GenesisDelay = 24
|
||||
@@ -220,7 +221,7 @@ func TestGetSpec(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||
data, ok := resp.Data.(map[string]any)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, 192, len(data))
|
||||
assert.Equal(t, 193, len(data))
|
||||
for k, v := range data {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
switch k {
|
||||
@@ -302,6 +303,8 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "109", v)
|
||||
case "GLOAS_FORK_EPOCH":
|
||||
assert.Equal(t, "110", v)
|
||||
case "MAX_BUILDERS_PER_WITHDRAWALS_SWEEP":
|
||||
assert.Equal(t, "112", v)
|
||||
case "MIN_ANCHOR_POW_BLOCK_DIFFICULTY":
|
||||
assert.Equal(t, "1000", v)
|
||||
case "BLS_WITHDRAWAL_PREFIX":
|
||||
|
||||
@@ -5,7 +5,6 @@ go_library(
|
||||
srcs = [
|
||||
"handlers.go",
|
||||
"handlers_peers.go",
|
||||
"log.go",
|
||||
"server.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/node",
|
||||
@@ -31,7 +30,6 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -46,7 +44,6 @@ go_test(
|
||||
deps = [
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/execution/testing:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
|
||||
@@ -116,38 +116,6 @@ func (*Server) GetVersion(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
// GetVersionV2 Retrieves structured information about the version of the beacon node and its attached
|
||||
// execution client in the same format as used on the Engine API
|
||||
func (s *Server) GetVersionV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "node.GetVersionV2")
|
||||
defer span.End()
|
||||
|
||||
var elData *structs.ClientVersionV1
|
||||
elDataList, err := s.ExecutionEngineCaller.GetClientVersionV1(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("endpoint", "GetVersionV2").Debug("Could not get execution client version")
|
||||
} else {
|
||||
elData = elDataList[0]
|
||||
}
|
||||
|
||||
commit := version.GitCommit()
|
||||
if len(commit) >= 8 {
|
||||
commit = commit[:8]
|
||||
}
|
||||
resp := &structs.GetVersionV2Response{
|
||||
Data: &structs.VersionV2{
|
||||
BeaconNode: &structs.ClientVersionV1{
|
||||
Code: "PM",
|
||||
Name: "Prysm",
|
||||
Version: version.SemanticVersion(),
|
||||
Commit: commit,
|
||||
},
|
||||
ExecutionClient: elData,
|
||||
},
|
||||
}
|
||||
httputil.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
// GetHealth returns node health status in http status codes. Useful for load balancers.
|
||||
func (s *Server) GetHealth(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "node.GetHealth")
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
mock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
|
||||
mockengine "github.com/OffchainLabs/prysm/v7/beacon-chain/execution/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
mockp2p "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/testutil"
|
||||
@@ -91,40 +90,6 @@ func TestGetVersion(t *testing.T) {
|
||||
assert.StringContains(t, arch, resp.Data.Version)
|
||||
}
|
||||
|
||||
func TestGetVersionV2(t *testing.T) {
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/node/version", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s := &Server{
|
||||
ExecutionEngineCaller: &mockengine.EngineClient{
|
||||
ClientVersion: []*structs.ClientVersionV1{{
|
||||
Code: "EL",
|
||||
Name: "ExecutionClient",
|
||||
Version: "v1.0.0",
|
||||
Commit: "abcdef12",
|
||||
}},
|
||||
},
|
||||
}
|
||||
s.GetVersionV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
|
||||
resp := &structs.GetVersionV2Response{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
require.NotNil(t, resp.Data.BeaconNode)
|
||||
require.NotNil(t, resp.Data.ExecutionClient)
|
||||
require.Equal(t, "EL", resp.Data.ExecutionClient.Code)
|
||||
require.Equal(t, "ExecutionClient", resp.Data.ExecutionClient.Name)
|
||||
require.Equal(t, "v1.0.0", resp.Data.ExecutionClient.Version)
|
||||
require.Equal(t, "abcdef12", resp.Data.ExecutionClient.Commit)
|
||||
require.Equal(t, "PM", resp.Data.BeaconNode.Code)
|
||||
require.Equal(t, "Prysm", resp.Data.BeaconNode.Name)
|
||||
require.Equal(t, version.SemanticVersion(), resp.Data.BeaconNode.Version)
|
||||
require.Equal(t, true, len(resp.Data.BeaconNode.Commit) <= 8)
|
||||
}
|
||||
|
||||
func TestGetHealth(t *testing.T) {
|
||||
checker := &syncmock.Sync{}
|
||||
optimisticFetcher := &mock.ChainService{Optimistic: false}
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package node
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/rpc/eth/node")
|
||||
@@ -26,5 +26,4 @@ type Server struct {
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
ExecutionChainInfoFetcher execution.ChainInfoFetcher
|
||||
ExecutionEngineCaller execution.EngineCaller
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package state
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -13,6 +14,10 @@ type writeOnlyGloasFields interface {
|
||||
RotateBuilderPendingPayments() error
|
||||
AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal) error
|
||||
UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val byte) error
|
||||
SetPayloadExpectedWithdrawals(withdrawals []*enginev1.Withdrawal) error
|
||||
DecreaseWithdrawalBalances(withdrawals []*enginev1.Withdrawal) error
|
||||
DequeueBuilderPendingWithdrawals(num uint64) error
|
||||
SetNextWithdrawalBuilderIndex(idx primitives.BuilderIndex) error
|
||||
}
|
||||
|
||||
type readOnlyGloasFields interface {
|
||||
@@ -21,4 +26,15 @@ type readOnlyGloasFields interface {
|
||||
CanBuilderCoverBid(primitives.BuilderIndex, primitives.Gwei) (bool, error)
|
||||
LatestBlockHash() ([32]byte, error)
|
||||
BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error)
|
||||
IsParentBlockFull() (bool, error)
|
||||
ExpectedWithdrawalsGloas() (ExpectedWithdrawalsGloasResult, error)
|
||||
}
|
||||
|
||||
// ExpectedWithdrawalsGloasResult bundles the expected withdrawals and related counters
|
||||
// for the Gloas fork to avoid positional return mistakes.
|
||||
type ExpectedWithdrawalsGloasResult struct {
|
||||
Withdrawals []*enginev1.Withdrawal
|
||||
ProcessedBuilderWithdrawalsCount uint64
|
||||
ProcessedPartialWithdrawalsCount uint64
|
||||
NextWithdrawalBuilderIndex primitives.BuilderIndex
|
||||
}
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
)
|
||||
|
||||
// LatestBlockHash returns the hash of the latest execution block.
|
||||
@@ -46,20 +50,14 @@ func (b *BeaconState) BuilderPubkey(builderIndex primitives.BuilderIndex) ([fiel
|
||||
}
|
||||
|
||||
// IsActiveBuilder returns true if the builder placement is finalized and it has not initiated exit.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// def is_active_builder(state: BeaconState, builder_index: BuilderIndex) -> bool:
|
||||
//
|
||||
// <spec fn="is_active_builder" fork="gloas" hash="1a599fb2">
|
||||
// def is_active_builder(state: BeaconState, builder_index: BuilderIndex) -> bool:
|
||||
// """
|
||||
// Check if the builder at ``builder_index`` is active for the given ``state``.
|
||||
// """
|
||||
// builder = state.builders[builder_index]
|
||||
// return (
|
||||
// # Placement in builder list is finalized
|
||||
// builder.deposit_epoch < state.finalized_checkpoint.epoch
|
||||
// # Has not initiated exit
|
||||
// and builder.withdrawable_epoch == FAR_FUTURE_EPOCH
|
||||
// )
|
||||
// </spec>
|
||||
// builder = state.builders[builder_index]
|
||||
// return (
|
||||
// builder.deposit_epoch < state.finalized_checkpoint.epoch
|
||||
// and builder.withdrawable_epoch == FAR_FUTURE_EPOCH
|
||||
// )
|
||||
func (b *BeaconState) IsActiveBuilder(builderIndex primitives.BuilderIndex) (bool, error) {
|
||||
if b.version < version.Gloas {
|
||||
return false, errNotSupported("IsActiveBuilder", b.version)
|
||||
@@ -78,18 +76,15 @@ func (b *BeaconState) IsActiveBuilder(builderIndex primitives.BuilderIndex) (boo
|
||||
}
|
||||
|
||||
// CanBuilderCoverBid returns true if the builder has enough balance to cover the given bid amount.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// def can_builder_cover_bid(state: BeaconState, builder_index: BuilderIndex, bid_amount: Gwei) -> bool:
|
||||
//
|
||||
// <spec fn="can_builder_cover_bid" fork="gloas" hash="9e3f2d7c">
|
||||
// def can_builder_cover_bid(
|
||||
// state: BeaconState, builder_index: BuilderIndex, bid_amount: Gwei
|
||||
// ) -> bool:
|
||||
// builder_balance = state.builders[builder_index].balance
|
||||
// pending_withdrawals_amount = get_pending_balance_to_withdraw_for_builder(state, builder_index)
|
||||
// min_balance = MIN_DEPOSIT_AMOUNT + pending_withdrawals_amount
|
||||
// if builder_balance < min_balance:
|
||||
// return False
|
||||
// return builder_balance - min_balance >= bid_amount
|
||||
// </spec>
|
||||
// builder_balance = state.builders[builder_index].balance
|
||||
// pending_withdrawals_amount = get_pending_balance_to_withdraw_for_builder(state, builder_index)
|
||||
// min_balance = MIN_DEPOSIT_AMOUNT + pending_withdrawals_amount
|
||||
// if builder_balance < min_balance:
|
||||
// return False
|
||||
// return builder_balance - min_balance >= bid_amount
|
||||
func (b *BeaconState) CanBuilderCoverBid(builderIndex primitives.BuilderIndex, bidAmount primitives.Gwei) (bool, error) {
|
||||
if b.version < version.Gloas {
|
||||
return false, errNotSupported("CanBuilderCoverBid", b.version)
|
||||
@@ -156,3 +151,220 @@ func (b *BeaconState) BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment,
|
||||
|
||||
return b.builderPendingPaymentsVal(), nil
|
||||
}
|
||||
|
||||
// IsParentBlockFull returns true if the last committed payload bid was fulfilled with a payload,
|
||||
// which can only happen when both beacon block and payload were present.
|
||||
// This function must be called on a beacon state before processing the execution payload bid in the block.
|
||||
// Spec v1.7.0-alpha.2 (pseudocode):
|
||||
// def is_parent_block_full(state: BeaconState) -> bool:
|
||||
//
|
||||
// return state.latest_execution_payload_bid.block_hash == state.latest_block_hash
|
||||
func (b *BeaconState) IsParentBlockFull() (bool, error) {
|
||||
if b.version < version.Gloas {
|
||||
return false, errNotSupported("IsParentBlockFull", b.version)
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
if b.latestExecutionPayloadBid == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return bytes.Equal(b.latestExecutionPayloadBid.BlockHash, b.latestBlockHash), nil
|
||||
}
|
||||
|
||||
// ExpectedWithdrawalsGloas returns the withdrawals that a proposer will need to pack in the next block
|
||||
// applied to the current state. It is also used by validators to check that the execution payload carried
|
||||
// the right number of withdrawals.
|
||||
//
|
||||
// Spec v1.7.0-alpha.1:
|
||||
//
|
||||
// def get_expected_withdrawals(state: BeaconState) -> ExpectedWithdrawals:
|
||||
// withdrawal_index = state.next_withdrawal_index
|
||||
// withdrawals: List[Withdrawal] = []
|
||||
//
|
||||
// # [New in Gloas:EIP7732]
|
||||
// # Get builder withdrawals
|
||||
// builder_withdrawals, withdrawal_index, processed_builder_withdrawals_count = (
|
||||
// get_builder_withdrawals(state, withdrawal_index, withdrawals)
|
||||
// )
|
||||
// withdrawals.extend(builder_withdrawals)
|
||||
//
|
||||
// # Get partial withdrawals
|
||||
// partial_withdrawals, withdrawal_index, processed_partial_withdrawals_count = (
|
||||
// get_pending_partial_withdrawals(state, withdrawal_index, withdrawals)
|
||||
// )
|
||||
// withdrawals.extend(partial_withdrawals)
|
||||
//
|
||||
// # [New in Gloas:EIP7732]
|
||||
// # Get builders sweep withdrawals
|
||||
// builders_sweep_withdrawals, withdrawal_index, processed_builders_sweep_count = (
|
||||
// get_builders_sweep_withdrawals(state, withdrawal_index, withdrawals)
|
||||
// )
|
||||
// withdrawals.extend(builders_sweep_withdrawals)
|
||||
//
|
||||
// # Get validators sweep withdrawals
|
||||
// validators_sweep_withdrawals, withdrawal_index, processed_validators_sweep_count = (
|
||||
// get_validators_sweep_withdrawals(state, withdrawal_index, withdrawals)
|
||||
// )
|
||||
// withdrawals.extend(validators_sweep_withdrawals)
|
||||
//
|
||||
// return ExpectedWithdrawals(
|
||||
// withdrawals,
|
||||
// # [New in Gloas:EIP7732]
|
||||
// processed_builder_withdrawals_count,
|
||||
// processed_partial_withdrawals_count,
|
||||
// # [New in Gloas:EIP7732]
|
||||
// processed_builders_sweep_count,
|
||||
// processed_validators_sweep_count,
|
||||
// )
|
||||
func (b *BeaconState) ExpectedWithdrawalsGloas() (state.ExpectedWithdrawalsGloasResult, error) {
|
||||
if b.version < version.Gloas {
|
||||
return state.ExpectedWithdrawalsGloasResult{}, errNotSupported("ExpectedWithdrawalsGloas", b.version)
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
withdrawals := make([]*enginev1.Withdrawal, 0, cfg.MaxWithdrawalsPerPayload)
|
||||
withdrawalIndex := b.nextWithdrawalIndex
|
||||
|
||||
withdrawalIndex, processedBuilderWithdrawalsCount, err := b.appendBuilderWithdrawals(withdrawalIndex, &withdrawals)
|
||||
if err != nil {
|
||||
return state.ExpectedWithdrawalsGloasResult{}, err
|
||||
}
|
||||
|
||||
withdrawalIndex, processedPartialWithdrawalsCount, err := b.appendPendingPartialWithdrawals(withdrawalIndex, &withdrawals)
|
||||
if err != nil {
|
||||
return state.ExpectedWithdrawalsGloasResult{}, err
|
||||
}
|
||||
|
||||
withdrawalIndex, nextBuilderIndex, err := b.appendBuildersSweepWithdrawals(withdrawalIndex, &withdrawals)
|
||||
if err != nil {
|
||||
return state.ExpectedWithdrawalsGloasResult{}, err
|
||||
}
|
||||
|
||||
err = b.appendValidatorsSweepWithdrawals(withdrawalIndex, &withdrawals)
|
||||
if err != nil {
|
||||
return state.ExpectedWithdrawalsGloasResult{}, err
|
||||
}
|
||||
|
||||
return state.ExpectedWithdrawalsGloasResult{
|
||||
Withdrawals: withdrawals,
|
||||
ProcessedBuilderWithdrawalsCount: processedBuilderWithdrawalsCount,
|
||||
ProcessedPartialWithdrawalsCount: processedPartialWithdrawalsCount,
|
||||
NextWithdrawalBuilderIndex: nextBuilderIndex,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// appendBuilderWithdrawals returns builder pending withdrawals, the updated withdrawal index,
|
||||
// and the processed count, following spec v1.7.0-alpha.2:
|
||||
//
|
||||
// def get_builder_withdrawals(state, withdrawal_index, prior_withdrawals):
|
||||
// withdrawals_limit = MAX_WITHDRAWALS_PER_PAYLOAD - 1
|
||||
// assert len(prior_withdrawals) <= withdrawals_limit
|
||||
// processed_count = 0
|
||||
// withdrawals = []
|
||||
// for withdrawal in state.builder_pending_withdrawals:
|
||||
// if len(prior_withdrawals + withdrawals) >= withdrawals_limit:
|
||||
// break
|
||||
// withdrawals.append(Withdrawal(
|
||||
// index=withdrawal_index,
|
||||
// validator_index=convert_builder_index_to_validator_index(withdrawal.builder_index),
|
||||
// address=withdrawal.fee_recipient,
|
||||
// amount=withdrawal.amount,
|
||||
// ))
|
||||
// withdrawal_index += 1
|
||||
// processed_count += 1
|
||||
// return withdrawals, withdrawal_index, processed_count
|
||||
func (b *BeaconState) appendBuilderWithdrawals(withdrawalIndex uint64, withdrawals *[]*enginev1.Withdrawal) (uint64, uint64, error) {
|
||||
cfg := params.BeaconConfig()
|
||||
withdrawalsLimit := int(cfg.MaxWithdrawalsPerPayload - 1)
|
||||
ws := *withdrawals
|
||||
if len(ws) > withdrawalsLimit {
|
||||
return withdrawalIndex, 0, fmt.Errorf("prior withdrawals length %d exceeds limit %d", len(ws), withdrawalsLimit)
|
||||
}
|
||||
|
||||
var processedCount uint64
|
||||
for _, w := range b.builderPendingWithdrawals {
|
||||
if len(ws) >= withdrawalsLimit {
|
||||
break
|
||||
}
|
||||
|
||||
ws = append(ws, &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: w.BuilderIndex.ToValidatorIndex(),
|
||||
Address: w.FeeRecipient,
|
||||
Amount: uint64(w.Amount),
|
||||
})
|
||||
withdrawalIndex++
|
||||
processedCount++
|
||||
}
|
||||
|
||||
*withdrawals = ws
|
||||
return withdrawalIndex, processedCount, nil
|
||||
}
|
||||
|
||||
// appendBuildersSweepWithdrawals returns builder sweep withdrawals, the updated withdrawal index,
|
||||
// and the processed count, following spec v1.7.0-alpha.2:
|
||||
//
|
||||
// def get_builders_sweep_withdrawals(state, withdrawal_index, prior_withdrawals):
|
||||
// epoch = get_current_epoch(state)
|
||||
// builders_limit = min(len(state.builders), MAX_BUILDERS_PER_WITHDRAWALS_SWEEP)
|
||||
// withdrawals_limit = MAX_WITHDRAWALS_PER_PAYLOAD - 1
|
||||
// assert len(prior_withdrawals) <= withdrawals_limit
|
||||
// processed_count = 0
|
||||
// withdrawals = []
|
||||
// builder_index = state.next_withdrawal_builder_index
|
||||
// for _ in range(builders_limit):
|
||||
// if len(prior_withdrawals + withdrawals) >= withdrawals_limit:
|
||||
// break
|
||||
// builder = state.builders[builder_index]
|
||||
// if builder.withdrawable_epoch <= epoch and builder.balance > 0:
|
||||
// withdrawals.append(Withdrawal(
|
||||
// index=withdrawal_index,
|
||||
// validator_index=convert_builder_index_to_validator_index(builder_index),
|
||||
// address=builder.execution_address,
|
||||
// amount=builder.balance,
|
||||
// ))
|
||||
// withdrawal_index += 1
|
||||
// builder_index = BuilderIndex((builder_index + 1) % len(state.builders))
|
||||
// processed_count += 1
|
||||
// return withdrawals, withdrawal_index, processed_count
|
||||
func (b *BeaconState) appendBuildersSweepWithdrawals(withdrawalIndex uint64, withdrawals *[]*enginev1.Withdrawal) (uint64, primitives.BuilderIndex, error) {
|
||||
cfg := params.BeaconConfig()
|
||||
withdrawalsLimit := int(cfg.MaxWithdrawalsPerPayload - 1)
|
||||
if len(*withdrawals) > withdrawalsLimit {
|
||||
return withdrawalIndex, 0, fmt.Errorf("prior withdrawals length %d exceeds limit %d", len(*withdrawals), withdrawalsLimit)
|
||||
}
|
||||
|
||||
ws := *withdrawals
|
||||
|
||||
buildersLimit := min(len(b.builders), int(cfg.MaxBuildersPerWithdrawalsSweep))
|
||||
|
||||
builderIndex := b.nextWithdrawalBuilderIndex
|
||||
epoch := slots.ToEpoch(b.slot)
|
||||
for range buildersLimit {
|
||||
if len(ws) >= withdrawalsLimit {
|
||||
break
|
||||
}
|
||||
|
||||
builder := b.builders[builderIndex]
|
||||
if builder != nil && builder.WithdrawableEpoch <= epoch && builder.Balance > 0 {
|
||||
ws = append(ws, &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: builderIndex.ToValidatorIndex(),
|
||||
Address: builder.ExecutionAddress,
|
||||
Amount: uint64(builder.Balance),
|
||||
})
|
||||
withdrawalIndex++
|
||||
}
|
||||
|
||||
builderIndex = primitives.BuilderIndex((uint64(builderIndex) + 1) % uint64(len(b.builders)))
|
||||
}
|
||||
|
||||
*withdrawals = ws
|
||||
return withdrawalIndex, builderIndex, nil
|
||||
}
|
||||
|
||||
@@ -1,26 +1,27 @@
|
||||
package state_native_test
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
)
|
||||
|
||||
func TestLatestBlockHash(t *testing.T) {
|
||||
t.Run("returns error before gloas", func(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
st := &BeaconState{version: version.Fulu}
|
||||
_, err := st.LatestBlockHash()
|
||||
require.ErrorContains(t, "is not supported", err)
|
||||
})
|
||||
|
||||
t.Run("returns zero hash when unset", func(t *testing.T) {
|
||||
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{})
|
||||
st, err := InitializeFromProtoGloas(ðpb.BeaconStateGloas{})
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := st.LatestBlockHash()
|
||||
@@ -33,7 +34,7 @@ func TestLatestBlockHash(t *testing.T) {
|
||||
var want [32]byte
|
||||
copy(want[:], hashBytes)
|
||||
|
||||
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
st, err := InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
LatestBlockHash: hashBytes,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -46,17 +47,14 @@ func TestLatestBlockHash(t *testing.T) {
|
||||
|
||||
func TestBuilderPubkey(t *testing.T) {
|
||||
t.Run("returns error before gloas", func(t *testing.T) {
|
||||
stIface, _ := util.DeterministicGenesisState(t, 1)
|
||||
native, ok := stIface.(*state_native.BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
_, err := native.BuilderPubkey(0)
|
||||
st := &BeaconState{version: version.Fulu}
|
||||
_, err := st.BuilderPubkey(0)
|
||||
require.ErrorContains(t, "is not supported", err)
|
||||
})
|
||||
|
||||
t.Run("returns pubkey copy", func(t *testing.T) {
|
||||
pubkey := bytes.Repeat([]byte{0xAA}, 48)
|
||||
stIface, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
stIface, err := InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
Builders: []*ethpb.Builder{
|
||||
{
|
||||
Pubkey: pubkey,
|
||||
@@ -80,12 +78,12 @@ func TestBuilderPubkey(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("out of range returns error", func(t *testing.T) {
|
||||
stIface, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
stIface, err := InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
Builders: []*ethpb.Builder{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
st := stIface.(*state_native.BeaconState)
|
||||
st := stIface.(*BeaconState)
|
||||
_, err = st.BuilderPubkey(1)
|
||||
require.ErrorContains(t, "out of range", err)
|
||||
})
|
||||
@@ -93,7 +91,7 @@ func TestBuilderPubkey(t *testing.T) {
|
||||
|
||||
func TestBuilderHelpers(t *testing.T) {
|
||||
t.Run("is active builder", func(t *testing.T) {
|
||||
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
st, err := InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
Builders: []*ethpb.Builder{
|
||||
{
|
||||
Balance: 10,
|
||||
@@ -120,7 +118,7 @@ func TestBuilderHelpers(t *testing.T) {
|
||||
},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 2},
|
||||
}
|
||||
stInactive, err := state_native.InitializeFromProtoGloas(stProto)
|
||||
stInactive, err := InitializeFromProtoGloas(stProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
active, err = stInactive.IsActiveBuilder(0)
|
||||
@@ -129,7 +127,7 @@ func TestBuilderHelpers(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("can builder cover bid", func(t *testing.T) {
|
||||
stIface, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
stIface, err := InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
Builders: []*ethpb.Builder{
|
||||
{
|
||||
Balance: primitives.Gwei(params.BeaconConfig().MinDepositAmount + 50),
|
||||
@@ -147,7 +145,7 @@ func TestBuilderHelpers(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
st := stIface.(*state_native.BeaconState)
|
||||
st := stIface.(*BeaconState)
|
||||
ok, err := st.CanBuilderCoverBid(0, 20)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
@@ -159,10 +157,245 @@ func TestBuilderHelpers(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBuilderPendingPayments_UnsupportedVersion(t *testing.T) {
|
||||
stIface, err := state_native.InitializeFromProtoElectra(ðpb.BeaconStateElectra{})
|
||||
stIface, err := InitializeFromProtoElectra(ðpb.BeaconStateElectra{})
|
||||
require.NoError(t, err)
|
||||
st := stIface.(*state_native.BeaconState)
|
||||
st := stIface.(*BeaconState)
|
||||
|
||||
_, err = st.BuilderPendingPayments()
|
||||
require.ErrorContains(t, "BuilderPendingPayments", err)
|
||||
}
|
||||
|
||||
func TestIsParentBlockFull(t *testing.T) {
|
||||
t.Run("returns error before gloas", func(t *testing.T) {
|
||||
st := &BeaconState{version: version.Fulu}
|
||||
_, err := st.IsParentBlockFull()
|
||||
require.ErrorContains(t, "is not supported", err)
|
||||
})
|
||||
|
||||
t.Run("returns false when bid is nil", func(t *testing.T) {
|
||||
st := &BeaconState{version: version.Gloas}
|
||||
got, err := st.IsParentBlockFull()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, got)
|
||||
})
|
||||
|
||||
t.Run("returns true when hashes match", func(t *testing.T) {
|
||||
hash := bytes.Repeat([]byte{0xAB}, 32)
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
latestExecutionPayloadBid: ðpb.ExecutionPayloadBid{
|
||||
BlockHash: hash,
|
||||
},
|
||||
latestBlockHash: hash,
|
||||
}
|
||||
|
||||
got, err := st.IsParentBlockFull()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, got)
|
||||
})
|
||||
|
||||
t.Run("returns false when hashes differ", func(t *testing.T) {
|
||||
hash := bytes.Repeat([]byte{0xAB}, 32)
|
||||
other := bytes.Repeat([]byte{0xCD}, 32)
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
latestExecutionPayloadBid: ðpb.ExecutionPayloadBid{
|
||||
BlockHash: hash,
|
||||
},
|
||||
latestBlockHash: other,
|
||||
}
|
||||
|
||||
got, err := st.IsParentBlockFull()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, got)
|
||||
})
|
||||
}
|
||||
|
||||
func TestAppendBuilderWithdrawals(t *testing.T) {
|
||||
t.Run("errors when prior withdrawals exceed limit", func(t *testing.T) {
|
||||
st := &BeaconState{}
|
||||
limit := params.BeaconConfig().MaxWithdrawalsPerPayload - 1
|
||||
withdrawals := make([]*enginev1.Withdrawal, limit+1)
|
||||
|
||||
nextIndex, processed, err := st.appendBuilderWithdrawals(5, &withdrawals)
|
||||
require.ErrorContains(t, "exceeds limit", err)
|
||||
require.Equal(t, uint64(5), nextIndex)
|
||||
require.Equal(t, uint64(0), processed)
|
||||
require.Equal(t, int(limit+1), len(withdrawals))
|
||||
})
|
||||
|
||||
t.Run("appends builder withdrawals and increments index", func(t *testing.T) {
|
||||
st := &BeaconState{
|
||||
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{
|
||||
{BuilderIndex: 1, FeeRecipient: []byte{0x01}, Amount: 11},
|
||||
{BuilderIndex: 2, FeeRecipient: []byte{0x02}, Amount: 22},
|
||||
{BuilderIndex: 3, FeeRecipient: []byte{0x03}, Amount: 33},
|
||||
},
|
||||
}
|
||||
withdrawals := []*enginev1.Withdrawal{
|
||||
{Index: 7, ValidatorIndex: 9, Address: []byte{0xAA}, Amount: 99},
|
||||
}
|
||||
|
||||
nextIndex, processed, err := st.appendBuilderWithdrawals(10, &withdrawals)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(13), nextIndex)
|
||||
require.Equal(t, uint64(3), processed)
|
||||
require.Equal(t, 4, len(withdrawals))
|
||||
|
||||
require.DeepEqual(t, &enginev1.Withdrawal{
|
||||
Index: 10,
|
||||
ValidatorIndex: primitives.BuilderIndex(1).ToValidatorIndex(),
|
||||
Address: []byte{0x01},
|
||||
Amount: 11,
|
||||
}, withdrawals[1])
|
||||
require.DeepEqual(t, &enginev1.Withdrawal{
|
||||
Index: 11,
|
||||
ValidatorIndex: primitives.BuilderIndex(2).ToValidatorIndex(),
|
||||
Address: []byte{0x02},
|
||||
Amount: 22,
|
||||
}, withdrawals[2])
|
||||
require.DeepEqual(t, &enginev1.Withdrawal{
|
||||
Index: 12,
|
||||
ValidatorIndex: primitives.BuilderIndex(3).ToValidatorIndex(),
|
||||
Address: []byte{0x03},
|
||||
Amount: 33,
|
||||
}, withdrawals[3])
|
||||
})
|
||||
|
||||
t.Run("respects per-payload limit", func(t *testing.T) {
|
||||
limit := params.BeaconConfig().MaxWithdrawalsPerPayload - 1
|
||||
st := &BeaconState{
|
||||
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{
|
||||
{BuilderIndex: 4, FeeRecipient: []byte{0x04}, Amount: 44},
|
||||
{BuilderIndex: 5, FeeRecipient: []byte{0x05}, Amount: 55},
|
||||
},
|
||||
}
|
||||
withdrawals := make([]*enginev1.Withdrawal, limit-1)
|
||||
|
||||
nextIndex, processed, err := st.appendBuilderWithdrawals(20, &withdrawals)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(21), nextIndex)
|
||||
require.Equal(t, uint64(1), processed)
|
||||
require.Equal(t, int(limit), len(withdrawals))
|
||||
require.DeepEqual(t, &enginev1.Withdrawal{
|
||||
Index: 20,
|
||||
ValidatorIndex: primitives.BuilderIndex(4).ToValidatorIndex(),
|
||||
Address: []byte{0x04},
|
||||
Amount: 44,
|
||||
}, withdrawals[len(withdrawals)-1])
|
||||
})
|
||||
|
||||
t.Run("does not append when already at limit", func(t *testing.T) {
|
||||
limit := params.BeaconConfig().MaxWithdrawalsPerPayload - 1
|
||||
if limit == 0 {
|
||||
t.Skip("withdrawals limit too small")
|
||||
}
|
||||
st := &BeaconState{
|
||||
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{
|
||||
{BuilderIndex: 6, FeeRecipient: []byte{0x06}, Amount: 66},
|
||||
},
|
||||
}
|
||||
withdrawals := make([]*enginev1.Withdrawal, limit)
|
||||
|
||||
nextIndex, processed, err := st.appendBuilderWithdrawals(30, &withdrawals)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(30), nextIndex)
|
||||
require.Equal(t, uint64(0), processed)
|
||||
require.Equal(t, int(limit), len(withdrawals))
|
||||
})
|
||||
}
|
||||
|
||||
func TestAppendBuildersSweepWithdrawals(t *testing.T) {
|
||||
t.Run("errors when prior withdrawals exceed limit", func(t *testing.T) {
|
||||
st := &BeaconState{}
|
||||
limit := params.BeaconConfig().MaxWithdrawalsPerPayload - 1
|
||||
withdrawals := make([]*enginev1.Withdrawal, limit+1)
|
||||
|
||||
nextIndex, nextBuilderIndex, err := st.appendBuildersSweepWithdrawals(5, &withdrawals)
|
||||
require.ErrorContains(t, "exceeds limit", err)
|
||||
require.Equal(t, uint64(5), nextIndex)
|
||||
require.Equal(t, primitives.BuilderIndex(0), nextBuilderIndex)
|
||||
require.Equal(t, int(limit+1), len(withdrawals))
|
||||
})
|
||||
|
||||
t.Run("appends eligible builders, skips ineligible", func(t *testing.T) {
|
||||
epoch := primitives.Epoch(3)
|
||||
st := &BeaconState{
|
||||
slot: slots.UnsafeEpochStart(epoch),
|
||||
nextWithdrawalBuilderIndex: 2,
|
||||
builders: []*ethpb.Builder{
|
||||
{ExecutionAddress: []byte{0x01}, Balance: 0, WithdrawableEpoch: epoch},
|
||||
{ExecutionAddress: []byte{0x02}, Balance: 10, WithdrawableEpoch: epoch + 1},
|
||||
{ExecutionAddress: []byte{0x03}, Balance: 20, WithdrawableEpoch: epoch},
|
||||
},
|
||||
}
|
||||
withdrawals := []*enginev1.Withdrawal{}
|
||||
|
||||
nextIndex, nextBuilderIndex, err := st.appendBuildersSweepWithdrawals(100, &withdrawals)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(101), nextIndex)
|
||||
require.Equal(t, primitives.BuilderIndex(2), nextBuilderIndex)
|
||||
require.Equal(t, 1, len(withdrawals))
|
||||
require.DeepEqual(t, &enginev1.Withdrawal{
|
||||
Index: 100,
|
||||
ValidatorIndex: primitives.BuilderIndex(2).ToValidatorIndex(),
|
||||
Address: []byte{0x03},
|
||||
Amount: 20,
|
||||
}, withdrawals[0])
|
||||
})
|
||||
|
||||
t.Run("respects max builders per sweep", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
max := int(cfg.MaxBuildersPerWithdrawalsSweep)
|
||||
epoch := primitives.Epoch(1)
|
||||
builders := make([]*ethpb.Builder, max+2)
|
||||
for i := range builders {
|
||||
builders[i] = ðpb.Builder{
|
||||
ExecutionAddress: []byte{byte(i + 1)},
|
||||
Balance: 1,
|
||||
WithdrawableEpoch: epoch,
|
||||
}
|
||||
}
|
||||
start := len(builders) - 1
|
||||
st := &BeaconState{
|
||||
slot: slots.UnsafeEpochStart(epoch),
|
||||
nextWithdrawalBuilderIndex: primitives.BuilderIndex(start),
|
||||
builders: builders,
|
||||
}
|
||||
withdrawals := []*enginev1.Withdrawal{}
|
||||
|
||||
nextIndex, nextBuilderIndex, err := st.appendBuildersSweepWithdrawals(7, &withdrawals)
|
||||
require.NoError(t, err)
|
||||
limit := int(cfg.MaxWithdrawalsPerPayload - 1)
|
||||
expectedCount := min(max, limit)
|
||||
require.Equal(t, uint64(7)+uint64(expectedCount), nextIndex)
|
||||
require.Equal(t, expectedCount, len(withdrawals))
|
||||
expectedNext := primitives.BuilderIndex((uint64(start) + uint64(expectedCount)) % uint64(len(builders)))
|
||||
require.Equal(t, expectedNext, nextBuilderIndex)
|
||||
})
|
||||
|
||||
t.Run("stops when payload limit reached", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
limit := cfg.MaxWithdrawalsPerPayload - 1
|
||||
if limit < 1 {
|
||||
t.Skip("withdrawals limit too small")
|
||||
}
|
||||
epoch := primitives.Epoch(2)
|
||||
builders := []*ethpb.Builder{
|
||||
{ExecutionAddress: []byte{0x0A}, Balance: 3, WithdrawableEpoch: epoch},
|
||||
{ExecutionAddress: []byte{0x0B}, Balance: 4, WithdrawableEpoch: epoch},
|
||||
}
|
||||
st := &BeaconState{
|
||||
slot: slots.UnsafeEpochStart(epoch),
|
||||
nextWithdrawalBuilderIndex: 0,
|
||||
builders: builders,
|
||||
}
|
||||
withdrawals := make([]*enginev1.Withdrawal, limit)
|
||||
|
||||
nextIndex, nextBuilderIndex, err := st.appendBuildersSweepWithdrawals(20, &withdrawals)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(20), nextIndex)
|
||||
require.Equal(t, int(limit), len(withdrawals))
|
||||
require.Equal(t, primitives.BuilderIndex(0), nextBuilderIndex)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -133,11 +133,22 @@ func (b *BeaconState) appendPendingPartialWithdrawals(withdrawalIndex uint64, wi
|
||||
return withdrawalIndex, 0, nil
|
||||
}
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
withdrawalsLimit := min(
|
||||
len(*withdrawals)+int(cfg.MaxPendingPartialsPerWithdrawalsSweep),
|
||||
int(cfg.MaxWithdrawalsPerPayload-1),
|
||||
)
|
||||
if len(*withdrawals) > withdrawalsLimit {
|
||||
return withdrawalIndex, 0, fmt.Errorf("prior withdrawals length %d exceeds limit %d", len(*withdrawals), withdrawalsLimit)
|
||||
}
|
||||
|
||||
ws := *withdrawals
|
||||
epoch := slots.ToEpoch(b.slot)
|
||||
var processedPartialWithdrawalsCount uint64
|
||||
for _, w := range b.pendingPartialWithdrawals {
|
||||
if w.WithdrawableEpoch > epoch || len(ws) >= int(params.BeaconConfig().MaxPendingPartialsPerWithdrawalsSweep) {
|
||||
isWithdrawable := w.WithdrawableEpoch <= epoch
|
||||
hasReachedLimit := len(ws) >= withdrawalsLimit
|
||||
if !isWithdrawable || hasReachedLimit {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -149,7 +160,7 @@ func (b *BeaconState) appendPendingPartialWithdrawals(withdrawalIndex uint64, wi
|
||||
if err != nil {
|
||||
return withdrawalIndex, 0, fmt.Errorf("could not retrieve balance at index %d: %w", w.Index, err)
|
||||
}
|
||||
hasSufficientEffectiveBalance := v.EffectiveBalance() >= params.BeaconConfig().MinActivationBalance
|
||||
hasSufficientEffectiveBalance := v.EffectiveBalance() >= cfg.MinActivationBalance
|
||||
var totalWithdrawn uint64
|
||||
for _, wi := range ws {
|
||||
if wi.ValidatorIndex == w.Index {
|
||||
@@ -160,9 +171,9 @@ func (b *BeaconState) appendPendingPartialWithdrawals(withdrawalIndex uint64, wi
|
||||
if err != nil {
|
||||
return withdrawalIndex, 0, errors.Wrapf(err, "failed to subtract balance %d with total withdrawn %d", vBal, totalWithdrawn)
|
||||
}
|
||||
hasExcessBalance := balance > params.BeaconConfig().MinActivationBalance
|
||||
if v.ExitEpoch() == params.BeaconConfig().FarFutureEpoch && hasSufficientEffectiveBalance && hasExcessBalance {
|
||||
amount := min(balance-params.BeaconConfig().MinActivationBalance, w.Amount)
|
||||
hasExcessBalance := balance > cfg.MinActivationBalance
|
||||
if v.ExitEpoch() == cfg.FarFutureEpoch && hasSufficientEffectiveBalance && hasExcessBalance {
|
||||
amount := min(balance-cfg.MinActivationBalance, w.Amount)
|
||||
ws = append(ws, &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: w.Index,
|
||||
@@ -183,7 +194,7 @@ func (b *BeaconState) appendValidatorsSweepWithdrawals(withdrawalIndex uint64, w
|
||||
validatorIndex := b.nextWithdrawalValidatorIndex
|
||||
validatorsLen := b.validatorsLen()
|
||||
epoch := slots.ToEpoch(b.slot)
|
||||
bound := min(uint64(validatorsLen), params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
||||
bound := min(validatorsLen, int(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep))
|
||||
for range bound {
|
||||
val, err := b.validatorAtIndexReadOnly(validatorIndex)
|
||||
if err != nil {
|
||||
@@ -222,7 +233,7 @@ func (b *BeaconState) appendValidatorsSweepWithdrawals(withdrawalIndex uint64, w
|
||||
})
|
||||
withdrawalIndex++
|
||||
}
|
||||
if uint64(len(ws)) == params.BeaconConfig().MaxWithdrawalsPerPayload {
|
||||
if len(ws) == int(params.BeaconConfig().MaxWithdrawalsPerPayload) {
|
||||
break
|
||||
}
|
||||
validatorIndex += 1
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
|
||||
@@ -8,8 +9,10 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
pkgerrors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// RotateBuilderPendingPayments rotates the queue by dropping slots per epoch payments from the
|
||||
@@ -82,20 +85,20 @@ func (b *BeaconState) SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid)
|
||||
parentBlockRoot := h.ParentBlockRoot()
|
||||
blockHash := h.BlockHash()
|
||||
randao := h.PrevRandao()
|
||||
blobKzgCommitments := h.BlobKzgCommitments()
|
||||
blobKzgCommitmentsRoot := h.BlobKzgCommitmentsRoot()
|
||||
feeRecipient := h.FeeRecipient()
|
||||
b.latestExecutionPayloadBid = ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: parentBlockHash[:],
|
||||
ParentBlockRoot: parentBlockRoot[:],
|
||||
BlockHash: blockHash[:],
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: h.GasLimit(),
|
||||
BuilderIndex: h.BuilderIndex(),
|
||||
Slot: h.Slot(),
|
||||
Value: h.Value(),
|
||||
ExecutionPayment: h.ExecutionPayment(),
|
||||
BlobKzgCommitments: blobKzgCommitments,
|
||||
FeeRecipient: feeRecipient[:],
|
||||
ParentBlockHash: parentBlockHash[:],
|
||||
ParentBlockRoot: parentBlockRoot[:],
|
||||
BlockHash: blockHash[:],
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: h.GasLimit(),
|
||||
BuilderIndex: h.BuilderIndex(),
|
||||
Slot: h.Slot(),
|
||||
Value: h.Value(),
|
||||
ExecutionPayment: h.ExecutionPayment(),
|
||||
BlobKzgCommitmentsRoot: blobKzgCommitmentsRoot[:],
|
||||
FeeRecipient: feeRecipient[:],
|
||||
}
|
||||
b.markFieldAsDirty(types.LatestExecutionPayloadBid)
|
||||
|
||||
@@ -161,3 +164,154 @@ func (b *BeaconState) UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val
|
||||
b.markFieldAsDirty(types.ExecutionPayloadAvailability)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetPayloadExpectedWithdrawals stores the expected withdrawals for the next payload.
|
||||
func (b *BeaconState) SetPayloadExpectedWithdrawals(withdrawals []*enginev1.Withdrawal) error {
|
||||
if b.version < version.Gloas {
|
||||
return errNotSupported("SetPayloadExpectedWithdrawals", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.payloadExpectedWithdrawals = withdrawals
|
||||
b.markFieldAsDirty(types.PayloadExpectedWithdrawals)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DequeueBuilderPendingWithdrawals removes processed builder withdrawals from the front of the queue.
|
||||
func (b *BeaconState) DequeueBuilderPendingWithdrawals(n uint64) error {
|
||||
if b.version < version.Gloas {
|
||||
return errNotSupported("DequeueBuilderPendingWithdrawals", b.version)
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if n > uint64(len(b.builderPendingWithdrawals)) {
|
||||
return errors.New("cannot dequeue more builder withdrawals than are in the queue")
|
||||
}
|
||||
|
||||
if b.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs() > 1 {
|
||||
withdrawals := make([]*ethpb.BuilderPendingWithdrawal, len(b.builderPendingWithdrawals))
|
||||
copy(withdrawals, b.builderPendingWithdrawals)
|
||||
b.builderPendingWithdrawals = withdrawals
|
||||
b.sharedFieldReferences[types.BuilderPendingWithdrawals].MinusRef()
|
||||
b.sharedFieldReferences[types.BuilderPendingWithdrawals] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
b.builderPendingWithdrawals = b.builderPendingWithdrawals[n:]
|
||||
b.markFieldAsDirty(types.BuilderPendingWithdrawals)
|
||||
b.rebuildTrie[types.BuilderPendingWithdrawals] = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetNextWithdrawalBuilderIndex sets the next builder index for the withdrawals sweep.
|
||||
func (b *BeaconState) SetNextWithdrawalBuilderIndex(index primitives.BuilderIndex) error {
|
||||
if b.version < version.Gloas {
|
||||
return errNotSupported("SetNextWithdrawalBuilderIndex", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.nextWithdrawalBuilderIndex = index
|
||||
b.markFieldAsDirty(types.NextWithdrawalBuilderIndex)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DecreaseWithdrawalBalances applies withdrawal balance decreases for validators and builders.
|
||||
// This method holds the state lock for the full batch to avoid lock churn.
|
||||
func (b *BeaconState) DecreaseWithdrawalBalances(withdrawals []*enginev1.Withdrawal) error {
|
||||
if b.version < version.Gloas {
|
||||
return errNotSupported("DecreaseWithdrawalBalances", b.version)
|
||||
}
|
||||
if len(withdrawals) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
var (
|
||||
balanceIndices []uint64
|
||||
builderIndices []uint64
|
||||
)
|
||||
|
||||
for _, withdrawal := range withdrawals {
|
||||
if withdrawal == nil {
|
||||
return errors.New("withdrawal is nil")
|
||||
}
|
||||
if withdrawal.Amount == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if withdrawal.ValidatorIndex.IsBuilderIndex() {
|
||||
builderIndex := withdrawal.ValidatorIndex.ToBuilderIndex()
|
||||
if err := b.decreaseBuilderBalanceLockFree(builderIndex, withdrawal.Amount); err != nil {
|
||||
return err
|
||||
}
|
||||
builderIndices = append(builderIndices, uint64(builderIndex))
|
||||
continue
|
||||
}
|
||||
|
||||
balAtIdx, err := b.balanceAtIndex(withdrawal.ValidatorIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newBal := decreaseBalanceWithVal(balAtIdx, withdrawal.Amount)
|
||||
if err := b.balancesMultiValue.UpdateAt(b, uint64(withdrawal.ValidatorIndex), newBal); err != nil {
|
||||
return pkgerrors.Wrap(err, "could not update balances")
|
||||
}
|
||||
balanceIndices = append(balanceIndices, uint64(withdrawal.ValidatorIndex))
|
||||
}
|
||||
|
||||
if len(balanceIndices) > 0 {
|
||||
b.markFieldAsDirty(types.Balances)
|
||||
b.addDirtyIndices(types.Balances, balanceIndices)
|
||||
}
|
||||
if len(builderIndices) > 0 {
|
||||
b.markFieldAsDirty(types.Builders)
|
||||
b.addDirtyIndices(types.Builders, builderIndices)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) decreaseBuilderBalanceLockFree(builderIndex primitives.BuilderIndex, amount uint64) error {
|
||||
idx := uint64(builderIndex)
|
||||
if idx >= uint64(len(b.builders)) {
|
||||
return fmt.Errorf("builder index %d out of range (len=%d)", builderIndex, len(b.builders))
|
||||
}
|
||||
|
||||
if b.sharedFieldReferences[types.Builders].Refs() > 1 {
|
||||
builders := make([]*ethpb.Builder, len(b.builders))
|
||||
copy(builders, b.builders)
|
||||
b.builders = builders
|
||||
b.sharedFieldReferences[types.Builders].MinusRef()
|
||||
b.sharedFieldReferences[types.Builders] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
builder := b.builders[idx]
|
||||
bal := uint64(builder.Balance)
|
||||
if amount >= bal {
|
||||
builder.Balance = 0
|
||||
} else {
|
||||
builder.Balance = primitives.Gwei(bal - amount)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func decreaseBalanceWithVal(currBalance, delta uint64) uint64 {
|
||||
if delta > currBalance {
|
||||
return 0
|
||||
}
|
||||
return currBalance - delta
|
||||
}
|
||||
|
||||
@@ -8,23 +8,24 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
type testExecutionPayloadBid struct {
|
||||
parentBlockHash [32]byte
|
||||
parentBlockRoot [32]byte
|
||||
blockHash [32]byte
|
||||
prevRandao [32]byte
|
||||
blobKzgCommitments [][]byte
|
||||
feeRecipient [20]byte
|
||||
gasLimit uint64
|
||||
builderIndex primitives.BuilderIndex
|
||||
slot primitives.Slot
|
||||
value primitives.Gwei
|
||||
executionPayment primitives.Gwei
|
||||
parentBlockHash [32]byte
|
||||
parentBlockRoot [32]byte
|
||||
blockHash [32]byte
|
||||
prevRandao [32]byte
|
||||
blobKzgCommitmentsRoot [32]byte
|
||||
feeRecipient [20]byte
|
||||
gasLimit uint64
|
||||
builderIndex primitives.BuilderIndex
|
||||
slot primitives.Slot
|
||||
value primitives.Gwei
|
||||
executionPayment primitives.Gwei
|
||||
}
|
||||
|
||||
func (t testExecutionPayloadBid) ParentBlockHash() [32]byte { return t.parentBlockHash }
|
||||
@@ -40,12 +41,9 @@ func (t testExecutionPayloadBid) Value() primitives.Gwei { return t.value }
|
||||
func (t testExecutionPayloadBid) ExecutionPayment() primitives.Gwei {
|
||||
return t.executionPayment
|
||||
}
|
||||
func (t testExecutionPayloadBid) BlobKzgCommitments() [][]byte { return t.blobKzgCommitments }
|
||||
func (t testExecutionPayloadBid) BlobKzgCommitmentCount() uint64 {
|
||||
return uint64(len(t.blobKzgCommitments))
|
||||
}
|
||||
func (t testExecutionPayloadBid) FeeRecipient() [20]byte { return t.feeRecipient }
|
||||
func (t testExecutionPayloadBid) IsNil() bool { return false }
|
||||
func (t testExecutionPayloadBid) BlobKzgCommitmentsRoot() [32]byte { return t.blobKzgCommitmentsRoot }
|
||||
func (t testExecutionPayloadBid) FeeRecipient() [20]byte { return t.feeRecipient }
|
||||
func (t testExecutionPayloadBid) IsNil() bool { return false }
|
||||
|
||||
func TestSetExecutionPayloadBid(t *testing.T) {
|
||||
t.Run("previous fork returns expected error", func(t *testing.T) {
|
||||
@@ -60,7 +58,7 @@ func TestSetExecutionPayloadBid(t *testing.T) {
|
||||
parentBlockRoot = [32]byte(bytes.Repeat([]byte{0xCD}, 32))
|
||||
blockHash = [32]byte(bytes.Repeat([]byte{0xEF}, 32))
|
||||
prevRandao = [32]byte(bytes.Repeat([]byte{0x11}, 32))
|
||||
blobCommitments = [][]byte{bytes.Repeat([]byte{0x22}, 48)}
|
||||
blobRoot = [32]byte(bytes.Repeat([]byte{0x22}, 32))
|
||||
feeRecipient [20]byte
|
||||
)
|
||||
copy(feeRecipient[:], bytes.Repeat([]byte{0x33}, len(feeRecipient)))
|
||||
@@ -69,17 +67,17 @@ func TestSetExecutionPayloadBid(t *testing.T) {
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
}
|
||||
bid := testExecutionPayloadBid{
|
||||
parentBlockHash: parentBlockHash,
|
||||
parentBlockRoot: parentBlockRoot,
|
||||
blockHash: blockHash,
|
||||
prevRandao: prevRandao,
|
||||
blobKzgCommitments: blobCommitments,
|
||||
feeRecipient: feeRecipient,
|
||||
gasLimit: 123,
|
||||
builderIndex: 7,
|
||||
slot: 9,
|
||||
value: 11,
|
||||
executionPayment: 22,
|
||||
parentBlockHash: parentBlockHash,
|
||||
parentBlockRoot: parentBlockRoot,
|
||||
blockHash: blockHash,
|
||||
prevRandao: prevRandao,
|
||||
blobKzgCommitmentsRoot: blobRoot,
|
||||
feeRecipient: feeRecipient,
|
||||
gasLimit: 123,
|
||||
builderIndex: 7,
|
||||
slot: 9,
|
||||
value: 11,
|
||||
executionPayment: 22,
|
||||
}
|
||||
|
||||
require.NoError(t, st.SetExecutionPayloadBid(bid))
|
||||
@@ -89,7 +87,7 @@ func TestSetExecutionPayloadBid(t *testing.T) {
|
||||
require.DeepEqual(t, parentBlockRoot[:], st.latestExecutionPayloadBid.ParentBlockRoot)
|
||||
require.DeepEqual(t, blockHash[:], st.latestExecutionPayloadBid.BlockHash)
|
||||
require.DeepEqual(t, prevRandao[:], st.latestExecutionPayloadBid.PrevRandao)
|
||||
require.DeepEqual(t, blobCommitments, st.latestExecutionPayloadBid.BlobKzgCommitments)
|
||||
require.DeepEqual(t, blobRoot[:], st.latestExecutionPayloadBid.BlobKzgCommitmentsRoot)
|
||||
require.DeepEqual(t, feeRecipient[:], st.latestExecutionPayloadBid.FeeRecipient)
|
||||
require.Equal(t, uint64(123), st.latestExecutionPayloadBid.GasLimit)
|
||||
require.Equal(t, primitives.BuilderIndex(7), st.latestExecutionPayloadBid.BuilderIndex)
|
||||
@@ -232,6 +230,235 @@ func TestRotateBuilderPendingPayments_UnsupportedVersion(t *testing.T) {
|
||||
require.ErrorContains(t, "RotateBuilderPendingPayments", err)
|
||||
}
|
||||
|
||||
func TestSetPayloadExpectedWithdrawals(t *testing.T) {
|
||||
t.Run("previous fork returns expected error", func(t *testing.T) {
|
||||
st := &BeaconState{version: version.Fulu}
|
||||
err := st.SetPayloadExpectedWithdrawals([]*enginev1.Withdrawal{})
|
||||
require.ErrorContains(t, "SetPayloadExpectedWithdrawals", err)
|
||||
})
|
||||
|
||||
t.Run("allows nil input and marks dirty", func(t *testing.T) {
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
}
|
||||
|
||||
require.NoError(t, st.SetPayloadExpectedWithdrawals(nil))
|
||||
require.Equal(t, true, st.payloadExpectedWithdrawals == nil)
|
||||
require.Equal(t, true, st.dirtyFields[types.PayloadExpectedWithdrawals])
|
||||
})
|
||||
|
||||
t.Run("sets and marks dirty", func(t *testing.T) {
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
payloadExpectedWithdrawals: []*enginev1.Withdrawal{{Index: 1}, {Index: 2}},
|
||||
}
|
||||
|
||||
withdrawals := []*enginev1.Withdrawal{{Index: 3}}
|
||||
require.NoError(t, st.SetPayloadExpectedWithdrawals(withdrawals))
|
||||
|
||||
require.DeepEqual(t, withdrawals, st.payloadExpectedWithdrawals)
|
||||
require.Equal(t, true, st.dirtyFields[types.PayloadExpectedWithdrawals])
|
||||
})
|
||||
}
|
||||
|
||||
func TestDecreaseWithdrawalBalances(t *testing.T) {
|
||||
t.Run("previous fork returns expected error", func(t *testing.T) {
|
||||
st := &BeaconState{version: version.Fulu}
|
||||
err := st.DecreaseWithdrawalBalances([]*enginev1.Withdrawal{{}})
|
||||
require.ErrorContains(t, "DecreaseWithdrawalBalances", err)
|
||||
})
|
||||
|
||||
t.Run("rejects nil withdrawal", func(t *testing.T) {
|
||||
st := &BeaconState{version: version.Gloas}
|
||||
err := st.DecreaseWithdrawalBalances([]*enginev1.Withdrawal{nil})
|
||||
require.ErrorContains(t, "withdrawal is nil", err)
|
||||
})
|
||||
|
||||
t.Run("no-op on empty input", func(t *testing.T) {
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
dirtyIndices: make(map[types.FieldIndex][]uint64),
|
||||
rebuildTrie: make(map[types.FieldIndex]bool),
|
||||
}
|
||||
|
||||
require.NoError(t, st.DecreaseWithdrawalBalances(nil))
|
||||
require.Equal(t, 0, len(st.dirtyFields))
|
||||
require.Equal(t, 0, len(st.dirtyIndices))
|
||||
})
|
||||
|
||||
t.Run("updates validator and builder balances and tracks dirty indices", func(t *testing.T) {
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
dirtyIndices: make(map[types.FieldIndex][]uint64),
|
||||
rebuildTrie: make(map[types.FieldIndex]bool),
|
||||
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
|
||||
types.Builders: stateutil.NewRef(1),
|
||||
},
|
||||
balancesMultiValue: NewMultiValueBalances([]uint64{100, 200, 300}),
|
||||
builders: []*ethpb.Builder{
|
||||
{Balance: 1000},
|
||||
{Balance: 50},
|
||||
},
|
||||
}
|
||||
|
||||
withdrawals := []*enginev1.Withdrawal{
|
||||
{ValidatorIndex: primitives.ValidatorIndex(1), Amount: 20},
|
||||
{ValidatorIndex: primitives.BuilderIndex(1).ToValidatorIndex(), Amount: 30},
|
||||
{ValidatorIndex: primitives.ValidatorIndex(2), Amount: 400},
|
||||
{ValidatorIndex: primitives.BuilderIndex(0).ToValidatorIndex(), Amount: 2000},
|
||||
{ValidatorIndex: primitives.ValidatorIndex(0), Amount: 0},
|
||||
}
|
||||
|
||||
require.NoError(t, st.DecreaseWithdrawalBalances(withdrawals))
|
||||
|
||||
require.DeepEqual(t, []uint64{100, 180, 0}, st.Balances())
|
||||
require.Equal(t, primitives.Gwei(0), st.builders[0].Balance)
|
||||
require.Equal(t, primitives.Gwei(20), st.builders[1].Balance)
|
||||
|
||||
require.Equal(t, true, st.dirtyFields[types.Balances])
|
||||
require.Equal(t, true, st.dirtyFields[types.Builders])
|
||||
require.DeepEqual(t, []uint64{1, 2}, st.dirtyIndices[types.Balances])
|
||||
require.DeepEqual(t, []uint64{1, 0}, st.dirtyIndices[types.Builders])
|
||||
})
|
||||
|
||||
t.Run("returns error on builder index out of range", func(t *testing.T) {
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
dirtyIndices: make(map[types.FieldIndex][]uint64),
|
||||
rebuildTrie: make(map[types.FieldIndex]bool),
|
||||
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
|
||||
types.Builders: stateutil.NewRef(1),
|
||||
},
|
||||
builders: []*ethpb.Builder{{Balance: 5}},
|
||||
}
|
||||
|
||||
err := st.DecreaseWithdrawalBalances([]*enginev1.Withdrawal{
|
||||
{ValidatorIndex: primitives.BuilderIndex(2).ToValidatorIndex(), Amount: 1},
|
||||
})
|
||||
require.ErrorContains(t, "out of range", err)
|
||||
require.Equal(t, false, st.dirtyFields[types.Builders])
|
||||
require.Equal(t, 0, len(st.dirtyIndices[types.Builders]))
|
||||
})
|
||||
}
|
||||
|
||||
func TestDequeueBuilderPendingWithdrawals(t *testing.T) {
|
||||
t.Run("previous fork returns expected error", func(t *testing.T) {
|
||||
st := &BeaconState{version: version.Fulu}
|
||||
err := st.DequeueBuilderPendingWithdrawals(1)
|
||||
require.ErrorContains(t, "DequeueBuilderPendingWithdrawals", err)
|
||||
})
|
||||
|
||||
t.Run("returns error when dequeueing more than length", func(t *testing.T) {
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
|
||||
types.BuilderPendingWithdrawals: stateutil.NewRef(1),
|
||||
},
|
||||
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{{Amount: 1}},
|
||||
}
|
||||
|
||||
err := st.DequeueBuilderPendingWithdrawals(2)
|
||||
require.ErrorContains(t, "cannot dequeue more builder withdrawals", err)
|
||||
require.Equal(t, 1, len(st.builderPendingWithdrawals))
|
||||
require.Equal(t, false, st.dirtyFields[types.BuilderPendingWithdrawals])
|
||||
})
|
||||
|
||||
t.Run("no-op on zero", func(t *testing.T) {
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
|
||||
types.BuilderPendingWithdrawals: stateutil.NewRef(1),
|
||||
},
|
||||
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{{Amount: 1}},
|
||||
}
|
||||
|
||||
require.NoError(t, st.DequeueBuilderPendingWithdrawals(0))
|
||||
require.Equal(t, 1, len(st.builderPendingWithdrawals))
|
||||
require.Equal(t, false, st.dirtyFields[types.BuilderPendingWithdrawals])
|
||||
require.Equal(t, false, st.rebuildTrie[types.BuilderPendingWithdrawals])
|
||||
})
|
||||
|
||||
t.Run("dequeues and marks dirty", func(t *testing.T) {
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
|
||||
types.BuilderPendingWithdrawals: stateutil.NewRef(1),
|
||||
},
|
||||
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{
|
||||
{Amount: 1},
|
||||
{Amount: 2},
|
||||
{Amount: 3},
|
||||
},
|
||||
rebuildTrie: make(map[types.FieldIndex]bool),
|
||||
}
|
||||
|
||||
require.NoError(t, st.DequeueBuilderPendingWithdrawals(2))
|
||||
require.Equal(t, 1, len(st.builderPendingWithdrawals))
|
||||
require.Equal(t, primitives.Gwei(3), st.builderPendingWithdrawals[0].Amount)
|
||||
require.Equal(t, true, st.dirtyFields[types.BuilderPendingWithdrawals])
|
||||
require.Equal(t, true, st.rebuildTrie[types.BuilderPendingWithdrawals])
|
||||
})
|
||||
|
||||
t.Run("copy-on-write preserves shared state", func(t *testing.T) {
|
||||
sharedRef := stateutil.NewRef(2)
|
||||
sharedWithdrawals := []*ethpb.BuilderPendingWithdrawal{
|
||||
{Amount: 1},
|
||||
{Amount: 2},
|
||||
{Amount: 3},
|
||||
}
|
||||
|
||||
st1 := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
|
||||
types.BuilderPendingWithdrawals: sharedRef,
|
||||
},
|
||||
builderPendingWithdrawals: sharedWithdrawals,
|
||||
rebuildTrie: make(map[types.FieldIndex]bool),
|
||||
}
|
||||
st2 := &BeaconState{
|
||||
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
|
||||
types.BuilderPendingWithdrawals: sharedRef,
|
||||
},
|
||||
builderPendingWithdrawals: sharedWithdrawals,
|
||||
}
|
||||
|
||||
require.NoError(t, st1.DequeueBuilderPendingWithdrawals(2))
|
||||
require.Equal(t, primitives.Gwei(3), st1.builderPendingWithdrawals[0].Amount)
|
||||
require.Equal(t, 3, len(st2.builderPendingWithdrawals))
|
||||
require.Equal(t, primitives.Gwei(1), st2.builderPendingWithdrawals[0].Amount)
|
||||
require.Equal(t, uint(1), st1.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs())
|
||||
require.Equal(t, uint(1), st2.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs())
|
||||
})
|
||||
}
|
||||
|
||||
func TestSetNextWithdrawalBuilderIndex(t *testing.T) {
|
||||
t.Run("previous fork returns expected error", func(t *testing.T) {
|
||||
st := &BeaconState{version: version.Fulu}
|
||||
err := st.SetNextWithdrawalBuilderIndex(1)
|
||||
require.ErrorContains(t, "SetNextWithdrawalBuilderIndex", err)
|
||||
})
|
||||
|
||||
t.Run("sets and marks dirty", func(t *testing.T) {
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
}
|
||||
|
||||
require.NoError(t, st.SetNextWithdrawalBuilderIndex(7))
|
||||
require.Equal(t, primitives.BuilderIndex(7), st.nextWithdrawalBuilderIndex)
|
||||
require.Equal(t, true, st.dirtyFields[types.NextWithdrawalBuilderIndex])
|
||||
})
|
||||
}
|
||||
|
||||
func TestAppendBuilderPendingWithdrawal_CopyOnWrite(t *testing.T) {
|
||||
wd := ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
|
||||
@@ -1027,10 +1027,10 @@ func TestGetVerifyingStateEdgeCases(t *testing.T) {
|
||||
sc: signatureCache,
|
||||
sr: &mockStateByRooter{sbr: sbrErrorIfCalled(t)}, // Should not be called
|
||||
hsp: &mockHeadStateProvider{
|
||||
headRoot: parentRoot[:], // Same as parent
|
||||
headSlot: 32, // Epoch 1
|
||||
headState: fuluState.Copy(), // HeadState (not ReadOnly) for ProcessSlots
|
||||
headStateReadOnly: nil, // Should not use ReadOnly path
|
||||
headRoot: parentRoot[:], // Same as parent
|
||||
headSlot: 32, // Epoch 1
|
||||
headState: fuluState.Copy(), // HeadState (not ReadOnly) for ProcessSlots
|
||||
headStateReadOnly: nil, // Should not use ReadOnly path
|
||||
},
|
||||
fc: &mockForkchoicer{
|
||||
// Return same root for both to simulate same chain
|
||||
@@ -1045,8 +1045,8 @@ func TestGetVerifyingStateEdgeCases(t *testing.T) {
|
||||
// Wrap to detect HeadState call
|
||||
originalHsp := initializer.shared.hsp.(*mockHeadStateProvider)
|
||||
wrappedHsp := &mockHeadStateProvider{
|
||||
headRoot: originalHsp.headRoot,
|
||||
headSlot: originalHsp.headSlot,
|
||||
headRoot: originalHsp.headRoot,
|
||||
headSlot: originalHsp.headSlot,
|
||||
headState: originalHsp.headState,
|
||||
}
|
||||
initializer.shared.hsp = &headStateCallTracker{
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- Set beacon node options after reading the config file.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Fixed
|
||||
|
||||
- Fixed a bug where `cmd/beacon-chain/execution` was being ignored by `hack/gen-logs.sh` due to a `.gitignore` rule.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- Fixed the logging issue described in #16314.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- New beacon API endpoint `eth/v2/node/version`.
|
||||
@@ -1,7 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- gRPC fallback now matches rest api implementation and will also check and connect to only synced nodes.
|
||||
|
||||
### Removed
|
||||
|
||||
- gRPC resolver for load balancing, the new implementation matches rest api's so we should remove the resolver so it's handled the same way for consistency.
|
||||
@@ -1,11 +0,0 @@
|
||||
### Ignored
|
||||
|
||||
- moved finding healthy node logic to connection provider and other various cleanup on naming.
|
||||
|
||||
### Changed
|
||||
|
||||
- Improved node fallback logs.
|
||||
|
||||
### Fixed
|
||||
|
||||
- a potential race condition when switching hosts quickly and reconnecting to same host on an old connection.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- Improved integrations with ethspecify so specrefs can be used throughout the codebase.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Ignored
|
||||
- Remove unused `HighestBlockDelay` method in forkchoice.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Ignored
|
||||
- Remove unused method in forkchoice.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Ignored
|
||||
- Remove unused map in forkchoice.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Fixed
|
||||
|
||||
- Fixed some database slices that were used outside of a read transaction. See [bbolt README](https://github.com/etcd-io/bbolt/blob/7b38172caf8cde993d187be4b8738fbe9266fde8/README.md?plain=1#L852) for more on this caveat.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Ignored
|
||||
|
||||
- Updated golangci to run lint on tests too.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Ignored
|
||||
- Close opened host in test helpers
|
||||
@@ -1,3 +0,0 @@
|
||||
### Ignored
|
||||
|
||||
- Add handy documentation for SSZ Query package (`encoding/ssz/query`).
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- Moved blob KZG commitments into `ExecutionPayloadBid` and removed them from `ExecutionPayloadEnvelope` for Gloas.
|
||||
2
changelog/tt_process-withdrawals-gloas.md
Normal file
2
changelog/tt_process-withdrawals-gloas.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Added
|
||||
- add modified process withdrawals for gloas
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- Add read only wrapper for execution payload envelope for gloas
|
||||
@@ -1,2 +0,0 @@
|
||||
### Ignored
|
||||
- Run go fmt
|
||||
@@ -1,9 +1,5 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package execution
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "cmd/beacon-chain/execution")
|
||||
var log = logrus.WithField("prefix", "execution")
|
||||
|
||||
@@ -188,8 +188,8 @@ func before(ctx *cli.Context) error {
|
||||
return errors.Wrap(err, "failed to parse log vmodule")
|
||||
}
|
||||
|
||||
// set the global logging level and data
|
||||
logs.SetLoggingLevelAndData(verbosityLevel, vmodule, maxLevel, ctx.Bool(flags.DisableEphemeralLogFile.Name))
|
||||
// set the global logging level to allow for the highest verbosity requested
|
||||
logs.SetLoggingLevel(max(verbosityLevel, maxLevel))
|
||||
|
||||
format := ctx.String(cmd.LogFormat.Name)
|
||||
switch format {
|
||||
@@ -210,7 +210,6 @@ func before(ctx *cli.Context) error {
|
||||
Formatter: formatter,
|
||||
Writer: os.Stderr,
|
||||
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
|
||||
Identifier: logs.LogTargetUser,
|
||||
})
|
||||
case "fluentd":
|
||||
f := joonix.NewFormatter()
|
||||
@@ -368,8 +367,17 @@ func startNode(ctx *cli.Context, cancel context.CancelFunc) error {
|
||||
backfill.BeaconNodeOptions,
|
||||
das.BeaconNodeOptions,
|
||||
}
|
||||
for _, of := range optFuncs {
|
||||
ofo, err := of(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ofo != nil {
|
||||
opts = append(opts, ofo...)
|
||||
}
|
||||
}
|
||||
|
||||
beacon, err := node.New(ctx, cancel, optFuncs, opts...)
|
||||
beacon, err := node.New(ctx, cancel, opts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to start beacon node: %w", err)
|
||||
}
|
||||
|
||||
@@ -164,8 +164,8 @@ func main() {
|
||||
return errors.Wrap(err, "failed to parse log vmodule")
|
||||
}
|
||||
|
||||
// set the global logging level and data
|
||||
logs.SetLoggingLevelAndData(verbosityLevel, vmodule, maxLevel, ctx.Bool(flags.DisableEphemeralLogFile.Name))
|
||||
// set the global logging level to allow for the highest verbosity requested
|
||||
logs.SetLoggingLevel(max(maxLevel, verbosityLevel))
|
||||
|
||||
logFileName := ctx.String(cmd.LogFileName.Name)
|
||||
|
||||
@@ -188,7 +188,6 @@ func main() {
|
||||
Formatter: formatter,
|
||||
Writer: os.Stderr,
|
||||
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
|
||||
Identifier: logs.LogTargetUser,
|
||||
})
|
||||
case "fluentd":
|
||||
f := joonix.NewFormatter()
|
||||
|
||||
@@ -130,6 +130,7 @@ type BeaconChainConfig struct {
|
||||
MaxWithdrawalsPerPayload uint64 `yaml:"MAX_WITHDRAWALS_PER_PAYLOAD" spec:"true"` // MaxWithdrawalsPerPayload defines the maximum number of withdrawals in a block.
|
||||
MaxBlsToExecutionChanges uint64 `yaml:"MAX_BLS_TO_EXECUTION_CHANGES" spec:"true"` // MaxBlsToExecutionChanges defines the maximum number of BLS-to-execution-change objects in a block.
|
||||
MaxValidatorsPerWithdrawalsSweep uint64 `yaml:"MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP" spec:"true"` // MaxValidatorsPerWithdrawalsSweep bounds the size of the sweep searching for withdrawals per slot.
|
||||
MaxBuildersPerWithdrawalsSweep uint64 `yaml:"MAX_BUILDERS_PER_WITHDRAWALS_SWEEP" spec:"true"` // MaxBuildersPerWithdrawalsSweep bounds the size of the builder withdrawals sweep per slot.
|
||||
|
||||
// BLS domain values.
|
||||
DomainBeaconProposer [4]byte `yaml:"DOMAIN_BEACON_PROPOSER" spec:"true"` // DomainBeaconProposer defines the BLS signature domain for beacon proposal verification.
|
||||
|
||||
@@ -194,6 +194,7 @@ func ConfigToYaml(cfg *BeaconChainConfig) []byte {
|
||||
fmt.Sprintf("SHARD_COMMITTEE_PERIOD: %d", cfg.ShardCommitteePeriod),
|
||||
fmt.Sprintf("MIN_VALIDATOR_WITHDRAWABILITY_DELAY: %d", cfg.MinValidatorWithdrawabilityDelay),
|
||||
fmt.Sprintf("MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: %d", cfg.MaxValidatorsPerWithdrawalsSweep),
|
||||
fmt.Sprintf("MAX_BUILDERS_PER_WITHDRAWALS_SWEEP: %d", cfg.MaxBuildersPerWithdrawalsSweep),
|
||||
fmt.Sprintf("MAX_SEED_LOOKAHEAD: %d", cfg.MaxSeedLookahead),
|
||||
fmt.Sprintf("EJECTION_BALANCE: %d", cfg.EjectionBalance),
|
||||
fmt.Sprintf("MIN_PER_EPOCH_CHURN_LIMIT: %d", cfg.MinPerEpochChurnLimit),
|
||||
|
||||
@@ -175,6 +175,7 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
MaxWithdrawalsPerPayload: 16,
|
||||
MaxBlsToExecutionChanges: 16,
|
||||
MaxValidatorsPerWithdrawalsSweep: 16384,
|
||||
MaxBuildersPerWithdrawalsSweep: 16384,
|
||||
|
||||
// BLS domain values.
|
||||
DomainBeaconProposer: bytesutil.Uint32ToBytes4(0x00000000),
|
||||
|
||||
@@ -94,6 +94,7 @@ func compareConfigs(t *testing.T, expected, actual *params.BeaconChainConfig) {
|
||||
require.DeepEqual(t, expected.MaxDeposits, actual.MaxDeposits)
|
||||
require.DeepEqual(t, expected.MaxVoluntaryExits, actual.MaxVoluntaryExits)
|
||||
require.DeepEqual(t, expected.MaxWithdrawalsPerPayload, actual.MaxWithdrawalsPerPayload)
|
||||
require.DeepEqual(t, expected.MaxBuildersPerWithdrawalsSweep, actual.MaxBuildersPerWithdrawalsSweep)
|
||||
require.DeepEqual(t, expected.DomainBeaconProposer, actual.DomainBeaconProposer)
|
||||
require.DeepEqual(t, expected.DomainRandao, actual.DomainRandao)
|
||||
require.DeepEqual(t, expected.DomainBeaconAttester, actual.DomainBeaconAttester)
|
||||
|
||||
@@ -4,7 +4,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"execution.go",
|
||||
"execution_payload_envelope.go",
|
||||
"factory.go",
|
||||
"get_payload.go",
|
||||
"getters.go",
|
||||
@@ -46,7 +45,6 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"execution_payload_envelope_test.go",
|
||||
"execution_test.go",
|
||||
"factory_test.go",
|
||||
"getters_test.go",
|
||||
|
||||
@@ -1,131 +0,0 @@
|
||||
package blocks
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
field_params "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type signedExecutionPayloadEnvelope struct {
|
||||
s *ethpb.SignedExecutionPayloadEnvelope
|
||||
}
|
||||
|
||||
type executionPayloadEnvelope struct {
|
||||
p *ethpb.ExecutionPayloadEnvelope
|
||||
}
|
||||
|
||||
// WrappedROSignedExecutionPayloadEnvelope wraps a signed execution payload envelope proto in a read-only interface.
|
||||
func WrappedROSignedExecutionPayloadEnvelope(s *ethpb.SignedExecutionPayloadEnvelope) (interfaces.ROSignedExecutionPayloadEnvelope, error) {
|
||||
w := signedExecutionPayloadEnvelope{s: s}
|
||||
if w.IsNil() {
|
||||
return nil, consensus_types.ErrNilObjectWrapped
|
||||
}
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// WrappedROExecutionPayloadEnvelope wraps an execution payload envelope proto in a read-only interface.
|
||||
func WrappedROExecutionPayloadEnvelope(p *ethpb.ExecutionPayloadEnvelope) (interfaces.ROExecutionPayloadEnvelope, error) {
|
||||
w := &executionPayloadEnvelope{p: p}
|
||||
if w.IsNil() {
|
||||
return nil, consensus_types.ErrNilObjectWrapped
|
||||
}
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Envelope returns the execution payload envelope as a read-only interface.
|
||||
func (s signedExecutionPayloadEnvelope) Envelope() (interfaces.ROExecutionPayloadEnvelope, error) {
|
||||
return WrappedROExecutionPayloadEnvelope(s.s.Message)
|
||||
}
|
||||
|
||||
// Signature returns the BLS signature as a 96-byte array.
|
||||
func (s signedExecutionPayloadEnvelope) Signature() [field_params.BLSSignatureLength]byte {
|
||||
return [field_params.BLSSignatureLength]byte(s.s.Signature)
|
||||
}
|
||||
|
||||
// IsNil reports whether the signed envelope or its contents are invalid.
|
||||
func (s signedExecutionPayloadEnvelope) IsNil() bool {
|
||||
if s.s == nil {
|
||||
return true
|
||||
}
|
||||
if len(s.s.Signature) != field_params.BLSSignatureLength {
|
||||
return true
|
||||
}
|
||||
if len(s.s.Message.BeaconBlockRoot) != field_params.RootLength {
|
||||
return true
|
||||
}
|
||||
if len(s.s.Message.StateRoot) != field_params.RootLength {
|
||||
return true
|
||||
}
|
||||
if s.s.Message.ExecutionRequests == nil {
|
||||
return true
|
||||
}
|
||||
if s.s.Message.Payload == nil {
|
||||
return true
|
||||
}
|
||||
w := executionPayloadEnvelope{p: s.s.Message}
|
||||
return w.IsNil()
|
||||
}
|
||||
|
||||
// SigningRoot computes the signing root for the signed envelope with the provided domain.
|
||||
func (s signedExecutionPayloadEnvelope) SigningRoot(domain []byte) (root [32]byte, err error) {
|
||||
return signing.ComputeSigningRoot(s.s.Message, domain)
|
||||
}
|
||||
|
||||
// Proto returns the underlying protobuf message.
|
||||
func (s signedExecutionPayloadEnvelope) Proto() proto.Message {
|
||||
return s.s
|
||||
}
|
||||
|
||||
// IsNil reports whether the envelope or its required fields are invalid.
|
||||
func (p *executionPayloadEnvelope) IsNil() bool {
|
||||
if p.p == nil {
|
||||
return true
|
||||
}
|
||||
if p.p.Payload == nil {
|
||||
return true
|
||||
}
|
||||
if len(p.p.BeaconBlockRoot) != field_params.RootLength {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsBlinded reports whether the envelope contains a blinded payload.
|
||||
func (p *executionPayloadEnvelope) IsBlinded() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Execution returns the execution payload as a read-only interface.
|
||||
func (p *executionPayloadEnvelope) Execution() (interfaces.ExecutionData, error) {
|
||||
return WrappedExecutionPayloadDeneb(p.p.Payload)
|
||||
}
|
||||
|
||||
// ExecutionRequests returns the execution requests attached to the envelope.
|
||||
func (p *executionPayloadEnvelope) ExecutionRequests() *enginev1.ExecutionRequests {
|
||||
return ethpb.CopyExecutionRequests(p.p.ExecutionRequests)
|
||||
}
|
||||
|
||||
// BuilderIndex returns the proposer/builder index for the envelope.
|
||||
func (p *executionPayloadEnvelope) BuilderIndex() primitives.BuilderIndex {
|
||||
return p.p.BuilderIndex
|
||||
}
|
||||
|
||||
// BeaconBlockRoot returns the beacon block root referenced by the envelope.
|
||||
func (p *executionPayloadEnvelope) BeaconBlockRoot() [field_params.RootLength]byte {
|
||||
return [field_params.RootLength]byte(p.p.BeaconBlockRoot)
|
||||
}
|
||||
|
||||
// Slot returns the slot of the envelope.
|
||||
func (p *executionPayloadEnvelope) Slot() primitives.Slot {
|
||||
return p.p.Slot
|
||||
}
|
||||
|
||||
// StateRoot returns the state root carried by the envelope.
|
||||
func (p *executionPayloadEnvelope) StateRoot() [field_params.RootLength]byte {
|
||||
return [field_params.RootLength]byte(p.p.StateRoot)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user