mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-24 04:38:07 -05:00
Compare commits
38 Commits
mv-core-re
...
gRPC-fallb
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fe7b2bf20e | ||
|
|
7cc6ded31a | ||
|
|
5fd3300fdb | ||
|
|
f74a9cb3ec | ||
|
|
3d903d5d75 | ||
|
|
5f335b1b58 | ||
|
|
1f7f7c6833 | ||
|
|
4e952354d1 | ||
|
|
5268da43f1 | ||
|
|
a6fc327cfb | ||
|
|
cf04b457a6 | ||
|
|
4586b0accf | ||
|
|
3a71ad2ec1 | ||
|
|
588766e520 | ||
|
|
ecc19bc6ed | ||
|
|
164d2d50fd | ||
|
|
1355a9ff4d | ||
|
|
34478f30c8 | ||
|
|
214b4428e6 | ||
|
|
db82f3cc9d | ||
|
|
9c874037d1 | ||
|
|
9bb231fb3b | ||
|
|
6432140603 | ||
|
|
5e0a9ff992 | ||
|
|
0bfd661baf | ||
|
|
b21acc0bbb | ||
|
|
f6f65987c6 | ||
|
|
d26cdd74ee | ||
|
|
d1905cb018 | ||
|
|
9f828bdd88 | ||
|
|
17413b52ed | ||
|
|
a651e7f0ac | ||
|
|
3e1cb45e92 | ||
|
|
fc2dcb0e88 | ||
|
|
888db581dd | ||
|
|
f1d2ee72e2 | ||
|
|
31f18b9f60 | ||
|
|
6462c997e9 |
@@ -3,13 +3,16 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"grpc_connection_provider.go",
|
||||
"grpcutils.go",
|
||||
"log.go",
|
||||
"mock_grpc_provider.go",
|
||||
"parameters.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/api/grpc",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
@@ -18,12 +21,17 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["grpcutils_test.go"],
|
||||
srcs = [
|
||||
"grpc_connection_provider_test.go",
|
||||
"grpcutils_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//credentials/insecure:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
172
api/grpc/grpc_connection_provider.go
Normal file
172
api/grpc/grpc_connection_provider.go
Normal file
@@ -0,0 +1,172 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
pkgErrors "github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// GrpcConnectionProvider manages gRPC connections for failover support.
|
||||
// It allows switching between different beacon node endpoints when the current one becomes unavailable.
|
||||
// Only one connection is maintained at a time - when switching hosts, the old connection is closed.
|
||||
type GrpcConnectionProvider interface {
|
||||
// CurrentConn returns the currently active gRPC connection.
|
||||
// The connection is created lazily on first call.
|
||||
// Returns nil if the provider has been closed.
|
||||
CurrentConn() *grpc.ClientConn
|
||||
// CurrentHost returns the address of the currently active endpoint.
|
||||
CurrentHost() string
|
||||
// Hosts returns all configured endpoint addresses.
|
||||
Hosts() []string
|
||||
// SetHost switches to the endpoint at the given index.
|
||||
// The new connection is created lazily on next CurrentConn() call.
|
||||
SetHost(index int) error
|
||||
// Close closes the current connection.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type grpcConnectionProvider struct {
|
||||
// Immutable after construction - no lock needed for reads
|
||||
endpoints []string
|
||||
ctx context.Context
|
||||
dialOpts []grpc.DialOption
|
||||
|
||||
// Current connection state (protected by mu)
|
||||
currentIndex uint64
|
||||
conn *grpc.ClientConn
|
||||
|
||||
mu sync.Mutex
|
||||
closed atomic.Bool
|
||||
}
|
||||
|
||||
// NewGrpcConnectionProvider creates a new connection provider that manages gRPC connections.
|
||||
// The endpoint parameter can be a comma-separated list of addresses (e.g., "host1:4000,host2:4000").
|
||||
// Only one connection is maintained at a time, created lazily on first use.
|
||||
func NewGrpcConnectionProvider(
|
||||
ctx context.Context,
|
||||
endpoint string,
|
||||
dialOpts []grpc.DialOption,
|
||||
) (GrpcConnectionProvider, error) {
|
||||
endpoints := parseEndpoints(endpoint)
|
||||
if len(endpoints) == 0 {
|
||||
return nil, pkgErrors.New("no gRPC endpoints provided")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"endpoints": endpoints,
|
||||
"count": len(endpoints),
|
||||
}).Info("Initialized gRPC connection provider with multiple endpoints")
|
||||
|
||||
return &grpcConnectionProvider{
|
||||
endpoints: endpoints,
|
||||
ctx: ctx,
|
||||
dialOpts: dialOpts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseEndpoints splits a comma-separated endpoint string into individual endpoints.
|
||||
func parseEndpoints(endpoint string) []string {
|
||||
if endpoint == "" {
|
||||
return nil
|
||||
}
|
||||
var endpoints []string
|
||||
for p := range strings.SplitSeq(endpoint, ",") {
|
||||
if p = strings.TrimSpace(p); p != "" {
|
||||
endpoints = append(endpoints, p)
|
||||
}
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) CurrentConn() *grpc.ClientConn {
|
||||
if p.closed.Load() {
|
||||
return nil
|
||||
}
|
||||
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
// Return existing connection if available
|
||||
if p.conn != nil {
|
||||
return p.conn
|
||||
}
|
||||
|
||||
// Create connection lazily
|
||||
ep := p.endpoints[p.currentIndex]
|
||||
conn, err := grpc.DialContext(p.ctx, ep, p.dialOpts...)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("endpoint", ep).Error("Failed to create gRPC connection")
|
||||
return nil
|
||||
}
|
||||
|
||||
p.conn = conn
|
||||
log.WithField("endpoint", ep).Debug("Created gRPC connection")
|
||||
return conn
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) CurrentHost() string {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
return p.endpoints[p.currentIndex]
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) Hosts() []string {
|
||||
// Return a copy to maintain immutability
|
||||
hosts := make([]string, len(p.endpoints))
|
||||
copy(hosts, p.endpoints)
|
||||
return hosts
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) SetHost(index int) error {
|
||||
if index < 0 || index >= len(p.endpoints) {
|
||||
return pkgErrors.Errorf("invalid host index %d, must be between 0 and %d", index, len(p.endpoints)-1)
|
||||
}
|
||||
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if uint64(index) == p.currentIndex {
|
||||
return nil // Already on this host
|
||||
}
|
||||
|
||||
oldHost := p.endpoints[p.currentIndex]
|
||||
|
||||
// Close existing connection if any
|
||||
if p.conn != nil {
|
||||
if err := p.conn.Close(); err != nil {
|
||||
log.WithError(err).WithField("endpoint", oldHost).Debug("Failed to close previous connection")
|
||||
}
|
||||
p.conn = nil
|
||||
}
|
||||
|
||||
p.currentIndex = uint64(index)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"previousHost": oldHost,
|
||||
"newHost": p.endpoints[index],
|
||||
}).Debug("Switched gRPC endpoint")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) Close() error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if p.closed.Load() {
|
||||
return nil
|
||||
}
|
||||
p.closed.Store(true)
|
||||
|
||||
if p.conn != nil {
|
||||
if err := p.conn.Close(); err != nil {
|
||||
return pkgErrors.Wrapf(err, "failed to close connection to %s", p.endpoints[p.currentIndex])
|
||||
}
|
||||
p.conn = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
203
api/grpc/grpc_connection_provider_test.go
Normal file
203
api/grpc/grpc_connection_provider_test.go
Normal file
@@ -0,0 +1,203 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
func TestParseEndpoints(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected []string
|
||||
}{
|
||||
{"single endpoint", "localhost:4000", []string{"localhost:4000"}},
|
||||
{"multiple endpoints", "host1:4000,host2:4000,host3:4000", []string{"host1:4000", "host2:4000", "host3:4000"}},
|
||||
{"endpoints with spaces", "host1:4000, host2:4000 , host3:4000", []string{"host1:4000", "host2:4000", "host3:4000"}},
|
||||
{"empty string", "", nil},
|
||||
{"only commas", ",,,", nil},
|
||||
{"trailing comma", "host1:4000,host2:4000,", []string{"host1:4000", "host2:4000"}},
|
||||
{"leading comma", ",host1:4000,host2:4000", []string{"host1:4000", "host2:4000"}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.DeepEqual(t, tt.expected, parseEndpoints(tt.input))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewGrpcConnectionProvider_Errors(t *testing.T) {
|
||||
t.Run("no endpoints", func(t *testing.T) {
|
||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
_, err := NewGrpcConnectionProvider(context.Background(), "", dialOpts)
|
||||
require.ErrorContains(t, "no gRPC endpoints provided", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGrpcConnectionProvider_LazyConnection(t *testing.T) {
|
||||
// Start only one server but configure provider with two endpoints
|
||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
server := grpc.NewServer()
|
||||
go func() { _ = server.Serve(lis) }()
|
||||
defer server.Stop()
|
||||
|
||||
validAddr := lis.Addr().String()
|
||||
invalidAddr := "127.0.0.1:1" // Port 1 is unlikely to be listening
|
||||
|
||||
// Provider should succeed even though second endpoint is invalid (lazy connections)
|
||||
endpoint := validAddr + "," + invalidAddr
|
||||
ctx := context.Background()
|
||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
|
||||
require.NoError(t, err, "Provider creation should succeed with lazy connections")
|
||||
defer func() { _ = provider.Close() }()
|
||||
|
||||
// First endpoint should work
|
||||
conn := provider.CurrentConn()
|
||||
assert.NotNil(t, conn, "First connection should be created lazily")
|
||||
}
|
||||
|
||||
func TestGrpcConnectionProvider_SingleConnectionModel(t *testing.T) {
|
||||
// Create provider with 3 endpoints
|
||||
var addrs []string
|
||||
var servers []*grpc.Server
|
||||
|
||||
for range 3 {
|
||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
server := grpc.NewServer()
|
||||
go func() { _ = server.Serve(lis) }()
|
||||
addrs = append(addrs, lis.Addr().String())
|
||||
servers = append(servers, server)
|
||||
}
|
||||
defer func() {
|
||||
for _, s := range servers {
|
||||
s.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
endpoint := strings.Join(addrs, ",")
|
||||
ctx := context.Background()
|
||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = provider.Close() }()
|
||||
|
||||
// Access the internal state to verify single connection behavior
|
||||
p := provider.(*grpcConnectionProvider)
|
||||
|
||||
// Initially no connection
|
||||
p.mu.Lock()
|
||||
assert.Equal(t, (*grpc.ClientConn)(nil), p.conn, "Connection should be nil before access")
|
||||
p.mu.Unlock()
|
||||
|
||||
// Access connection - should create one
|
||||
conn0 := provider.CurrentConn()
|
||||
assert.NotNil(t, conn0)
|
||||
|
||||
p.mu.Lock()
|
||||
assert.NotNil(t, p.conn, "Connection should be created after CurrentConn()")
|
||||
firstConn := p.conn
|
||||
p.mu.Unlock()
|
||||
|
||||
// Call CurrentConn again - should return same connection
|
||||
conn0Again := provider.CurrentConn()
|
||||
assert.Equal(t, conn0, conn0Again, "Should return same connection")
|
||||
|
||||
// Switch to different host - old connection should be closed, new one created lazily
|
||||
require.NoError(t, provider.SetHost(1))
|
||||
|
||||
p.mu.Lock()
|
||||
assert.Equal(t, (*grpc.ClientConn)(nil), p.conn, "Connection should be nil after SetHost (lazy)")
|
||||
p.mu.Unlock()
|
||||
|
||||
// Get new connection
|
||||
conn1 := provider.CurrentConn()
|
||||
assert.NotNil(t, conn1)
|
||||
assert.NotEqual(t, firstConn, conn1, "Should be a different connection after switching hosts")
|
||||
}
|
||||
|
||||
// testProvider creates a provider with n test servers and returns cleanup function.
|
||||
func testProvider(t *testing.T, n int) (GrpcConnectionProvider, []string, func()) {
|
||||
var addrs []string
|
||||
var cleanups []func()
|
||||
|
||||
for range n {
|
||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
server := grpc.NewServer()
|
||||
go func() { _ = server.Serve(lis) }()
|
||||
addrs = append(addrs, lis.Addr().String())
|
||||
cleanups = append(cleanups, server.Stop)
|
||||
}
|
||||
|
||||
endpoint := strings.Join(addrs, ",")
|
||||
|
||||
ctx := context.Background()
|
||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
|
||||
require.NoError(t, err)
|
||||
|
||||
cleanup := func() {
|
||||
_ = provider.Close()
|
||||
for _, c := range cleanups {
|
||||
c()
|
||||
}
|
||||
}
|
||||
return provider, addrs, cleanup
|
||||
}
|
||||
|
||||
func TestGrpcConnectionProvider(t *testing.T) {
|
||||
provider, addrs, cleanup := testProvider(t, 3)
|
||||
defer cleanup()
|
||||
|
||||
t.Run("initial state", func(t *testing.T) {
|
||||
assert.Equal(t, 3, len(provider.Hosts()))
|
||||
assert.Equal(t, addrs[0], provider.CurrentHost())
|
||||
assert.NotNil(t, provider.CurrentConn())
|
||||
})
|
||||
|
||||
t.Run("SetHost", func(t *testing.T) {
|
||||
require.NoError(t, provider.SetHost(1))
|
||||
assert.Equal(t, addrs[1], provider.CurrentHost())
|
||||
assert.NotNil(t, provider.CurrentConn()) // New connection created lazily
|
||||
require.NoError(t, provider.SetHost(0))
|
||||
assert.Equal(t, addrs[0], provider.CurrentHost())
|
||||
require.ErrorContains(t, "invalid host index", provider.SetHost(-1))
|
||||
require.ErrorContains(t, "invalid host index", provider.SetHost(3))
|
||||
})
|
||||
|
||||
t.Run("SetHost circular", func(t *testing.T) {
|
||||
// Test round-robin style switching using SetHost with manual index
|
||||
indices := []int{1, 2, 0, 1} // Simulate circular switching
|
||||
for i, idx := range indices {
|
||||
require.NoError(t, provider.SetHost(idx))
|
||||
assert.Equal(t, addrs[idx], provider.CurrentHost(), "iteration %d", i)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Hosts returns copy", func(t *testing.T) {
|
||||
hosts := provider.Hosts()
|
||||
original := hosts[0]
|
||||
hosts[0] = "modified"
|
||||
assert.Equal(t, original, provider.Hosts()[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestGrpcConnectionProvider_Close(t *testing.T) {
|
||||
provider, _, cleanup := testProvider(t, 1)
|
||||
defer cleanup()
|
||||
|
||||
assert.NotNil(t, provider.CurrentConn())
|
||||
require.NoError(t, provider.Close())
|
||||
assert.Equal(t, (*grpc.ClientConn)(nil), provider.CurrentConn())
|
||||
require.NoError(t, provider.Close()) // Double close is safe
|
||||
}
|
||||
20
api/grpc/mock_grpc_provider.go
Normal file
20
api/grpc/mock_grpc_provider.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package grpc
|
||||
|
||||
import "google.golang.org/grpc"
|
||||
|
||||
// MockGrpcProvider implements GrpcConnectionProvider for testing.
|
||||
type MockGrpcProvider struct {
|
||||
MockConn *grpc.ClientConn
|
||||
MockHosts []string
|
||||
}
|
||||
|
||||
func (m *MockGrpcProvider) CurrentConn() *grpc.ClientConn { return m.MockConn }
|
||||
func (m *MockGrpcProvider) CurrentHost() string {
|
||||
if len(m.MockHosts) > 0 {
|
||||
return m.MockHosts[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func (m *MockGrpcProvider) Hosts() []string { return m.MockHosts }
|
||||
func (m *MockGrpcProvider) SetHost(int) error { return nil }
|
||||
func (m *MockGrpcProvider) Close() error { return nil }
|
||||
33
api/rest/BUILD.bazel
Normal file
33
api/rest/BUILD.bazel
Normal file
@@ -0,0 +1,33 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"mock_rest_provider.go",
|
||||
"rest_connection_provider.go",
|
||||
"rest_handler.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/api/rest",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/apiutil:go_default_library",
|
||||
"//api/client:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opentelemetry_go_contrib_instrumentation_net_http_otelhttp//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["rest_connection_provider_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
23
api/rest/mock_rest_provider.go
Normal file
23
api/rest/mock_rest_provider.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package rest
|
||||
|
||||
import "net/http"
|
||||
|
||||
// MockRestProvider implements RestConnectionProvider for testing.
|
||||
type MockRestProvider struct {
|
||||
MockClient *http.Client
|
||||
MockHandler RestHandler
|
||||
MockHosts []string
|
||||
HostIndex int
|
||||
}
|
||||
|
||||
func (m *MockRestProvider) HttpClient() *http.Client { return m.MockClient }
|
||||
func (m *MockRestProvider) RestHandler() RestHandler { return m.MockHandler }
|
||||
func (m *MockRestProvider) CurrentHost() string {
|
||||
if len(m.MockHosts) > 0 {
|
||||
return m.MockHosts[m.HostIndex%len(m.MockHosts)]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func (m *MockRestProvider) Hosts() []string { return m.MockHosts }
|
||||
func (m *MockRestProvider) SetHost(index int) error { m.HostIndex = index; return nil }
|
||||
func (m *MockRestProvider) NextHost() { m.HostIndex = (m.HostIndex + 1) % len(m.MockHosts) }
|
||||
177
api/rest/rest_connection_provider.go
Normal file
177
api/rest/rest_connection_provider.go
Normal file
@@ -0,0 +1,177 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/client"
|
||||
pkgErrors "github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "rest")
|
||||
|
||||
// RestConnectionProvider manages HTTP client configuration for REST API with failover support.
|
||||
// It allows switching between different beacon node REST endpoints when the current one becomes unavailable.
|
||||
type RestConnectionProvider interface {
|
||||
// HttpClient returns the configured HTTP client with headers, timeout, and optional tracing.
|
||||
HttpClient() *http.Client
|
||||
// RestHandler returns the REST handler for making API requests.
|
||||
RestHandler() RestHandler
|
||||
// CurrentHost returns the current REST API endpoint URL.
|
||||
CurrentHost() string
|
||||
// Hosts returns all configured REST API endpoint URLs.
|
||||
Hosts() []string
|
||||
// SetHost switches to the endpoint at the given index.
|
||||
SetHost(index int) error
|
||||
// NextHost switches to the next endpoint in round-robin fashion.
|
||||
NextHost()
|
||||
}
|
||||
|
||||
// RestConnectionProviderOption is a functional option for configuring the REST connection provider.
|
||||
type RestConnectionProviderOption func(*restConnectionProvider)
|
||||
|
||||
// WithHttpTimeout sets the HTTP client timeout.
|
||||
func WithHttpTimeout(timeout time.Duration) RestConnectionProviderOption {
|
||||
return func(p *restConnectionProvider) {
|
||||
p.timeout = timeout
|
||||
}
|
||||
}
|
||||
|
||||
// WithHttpHeaders sets custom HTTP headers to include in all requests.
|
||||
func WithHttpHeaders(headers map[string][]string) RestConnectionProviderOption {
|
||||
return func(p *restConnectionProvider) {
|
||||
p.headers = headers
|
||||
}
|
||||
}
|
||||
|
||||
// WithTracing enables OpenTelemetry tracing for HTTP requests.
|
||||
func WithTracing() RestConnectionProviderOption {
|
||||
return func(p *restConnectionProvider) {
|
||||
p.enableTracing = true
|
||||
}
|
||||
}
|
||||
|
||||
type restConnectionProvider struct {
|
||||
endpoints []string
|
||||
httpClient *http.Client
|
||||
restHandler RestHandler
|
||||
currentIndex atomic.Uint64
|
||||
timeout time.Duration
|
||||
headers map[string][]string
|
||||
enableTracing bool
|
||||
}
|
||||
|
||||
// NewRestConnectionProvider creates a new REST connection provider that manages HTTP client configuration.
|
||||
// The endpoint parameter can be a comma-separated list of URLs (e.g., "http://host1:3500,http://host2:3500").
|
||||
func NewRestConnectionProvider(endpoint string, opts ...RestConnectionProviderOption) (RestConnectionProvider, error) {
|
||||
endpoints := parseEndpoints(endpoint)
|
||||
if len(endpoints) == 0 {
|
||||
return nil, pkgErrors.New("no REST API endpoints provided")
|
||||
}
|
||||
|
||||
p := &restConnectionProvider{
|
||||
endpoints: endpoints,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(p)
|
||||
}
|
||||
|
||||
// Build the HTTP transport chain
|
||||
var transport http.RoundTripper = http.DefaultTransport
|
||||
|
||||
// Add custom headers if configured
|
||||
if len(p.headers) > 0 {
|
||||
transport = client.NewCustomHeadersTransport(transport, p.headers)
|
||||
}
|
||||
|
||||
// Add tracing if enabled
|
||||
if p.enableTracing {
|
||||
transport = otelhttp.NewTransport(transport)
|
||||
}
|
||||
|
||||
p.httpClient = &http.Client{
|
||||
Timeout: p.timeout,
|
||||
Transport: transport,
|
||||
}
|
||||
|
||||
// Create the REST handler with the HTTP client and initial host
|
||||
p.restHandler = newRestHandler(*p.httpClient, endpoints[0])
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"endpoints": endpoints,
|
||||
"count": len(endpoints),
|
||||
}).Info("Initialized REST connection provider with endpoints")
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// parseEndpoints splits a comma-separated endpoint string into individual endpoints.
|
||||
func parseEndpoints(endpoint string) []string {
|
||||
if endpoint == "" {
|
||||
return nil
|
||||
}
|
||||
var endpoints []string
|
||||
for p := range strings.SplitSeq(endpoint, ",") {
|
||||
if p = strings.TrimSpace(p); p != "" {
|
||||
endpoints = append(endpoints, p)
|
||||
}
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) HttpClient() *http.Client {
|
||||
return p.httpClient
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) RestHandler() RestHandler {
|
||||
return p.restHandler
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) CurrentHost() string {
|
||||
idx := p.currentIndex.Load() % uint64(len(p.endpoints))
|
||||
return p.endpoints[idx]
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) Hosts() []string {
|
||||
// Return a copy to maintain immutability
|
||||
hosts := make([]string, len(p.endpoints))
|
||||
copy(hosts, p.endpoints)
|
||||
return hosts
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) SetHost(index int) error {
|
||||
if index < 0 || index >= len(p.endpoints) {
|
||||
return pkgErrors.Errorf("invalid host index %d, must be between 0 and %d", index, len(p.endpoints)-1)
|
||||
}
|
||||
|
||||
oldIdx := p.currentIndex.Load()
|
||||
p.currentIndex.Store(uint64(index))
|
||||
|
||||
// Update the rest handler's host
|
||||
p.restHandler.SetHost(p.endpoints[index])
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"previousHost": p.endpoints[oldIdx%uint64(len(p.endpoints))],
|
||||
"newHost": p.endpoints[index],
|
||||
}).Debug("Trying REST endpoint")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) NextHost() {
|
||||
oldIdx := p.currentIndex.Load()
|
||||
newIdx := (oldIdx + 1) % uint64(len(p.endpoints))
|
||||
p.currentIndex.Store(newIdx)
|
||||
|
||||
// Update the rest handler's host
|
||||
p.restHandler.SetHost(p.endpoints[newIdx])
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"previousHost": p.endpoints[oldIdx],
|
||||
"newHost": p.endpoints[newIdx],
|
||||
}).Debug("Switched to next REST endpoint")
|
||||
}
|
||||
85
api/rest/rest_connection_provider_test.go
Normal file
85
api/rest/rest_connection_provider_test.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestParseEndpoints(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected []string
|
||||
}{
|
||||
{"single endpoint", "http://localhost:3500", []string{"http://localhost:3500"}},
|
||||
{"multiple endpoints", "http://host1:3500,http://host2:3500,http://host3:3500", []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"}},
|
||||
{"endpoints with spaces", "http://host1:3500, http://host2:3500 , http://host3:3500", []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"}},
|
||||
{"empty string", "", nil},
|
||||
{"only commas", ",,,", nil},
|
||||
{"trailing comma", "http://host1:3500,http://host2:3500,", []string{"http://host1:3500", "http://host2:3500"}},
|
||||
{"leading comma", ",http://host1:3500,http://host2:3500", []string{"http://host1:3500", "http://host2:3500"}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.DeepEqual(t, tt.expected, parseEndpoints(tt.input))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewRestConnectionProvider_Errors(t *testing.T) {
|
||||
t.Run("no endpoints", func(t *testing.T) {
|
||||
_, err := NewRestConnectionProvider("")
|
||||
require.ErrorContains(t, "no REST API endpoints provided", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRestConnectionProvider(t *testing.T) {
|
||||
provider, err := NewRestConnectionProvider("http://host1:3500,http://host2:3500,http://host3:3500")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("initial state", func(t *testing.T) {
|
||||
assert.Equal(t, 3, len(provider.Hosts()))
|
||||
assert.Equal(t, "http://host1:3500", provider.CurrentHost())
|
||||
assert.NotNil(t, provider.HttpClient())
|
||||
})
|
||||
|
||||
t.Run("SetHost", func(t *testing.T) {
|
||||
require.NoError(t, provider.SetHost(1))
|
||||
assert.Equal(t, "http://host2:3500", provider.CurrentHost())
|
||||
require.NoError(t, provider.SetHost(0))
|
||||
assert.Equal(t, "http://host1:3500", provider.CurrentHost())
|
||||
require.ErrorContains(t, "invalid host index", provider.SetHost(-1))
|
||||
require.ErrorContains(t, "invalid host index", provider.SetHost(3))
|
||||
})
|
||||
|
||||
t.Run("NextHost circular", func(t *testing.T) {
|
||||
require.NoError(t, provider.SetHost(0)) // Reset to start
|
||||
expected := []string{"http://host2:3500", "http://host3:3500", "http://host1:3500", "http://host2:3500"}
|
||||
for i, exp := range expected {
|
||||
provider.NextHost()
|
||||
assert.Equal(t, exp, provider.CurrentHost(), "iteration %d", i)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Hosts returns copy", func(t *testing.T) {
|
||||
hosts := provider.Hosts()
|
||||
original := hosts[0]
|
||||
hosts[0] = "modified"
|
||||
assert.Equal(t, original, provider.Hosts()[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestRestConnectionProvider_WithOptions(t *testing.T) {
|
||||
headers := map[string][]string{"Authorization": {"Bearer token"}}
|
||||
provider, err := NewRestConnectionProvider(
|
||||
"http://localhost:3500",
|
||||
WithHttpHeaders(headers),
|
||||
WithHttpTimeout(30000000000), // 30 seconds in nanoseconds
|
||||
WithTracing(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, provider.HttpClient())
|
||||
assert.Equal(t, "http://localhost:3500", provider.CurrentHost())
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package beacon_api
|
||||
package rest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
|
||||
type reqOption func(*http.Request)
|
||||
|
||||
// RestHandler defines the interface for making REST API requests.
|
||||
type RestHandler interface {
|
||||
Get(ctx context.Context, endpoint string, resp any) error
|
||||
GetStatusCode(ctx context.Context, endpoint string) (int, error)
|
||||
@@ -32,26 +33,31 @@ type RestHandler interface {
|
||||
SetHost(host string)
|
||||
}
|
||||
|
||||
type BeaconApiRestHandler struct {
|
||||
type restHandler struct {
|
||||
client http.Client
|
||||
host string
|
||||
reqOverrides []reqOption
|
||||
}
|
||||
|
||||
// NewBeaconApiRestHandler returns a RestHandler
|
||||
func NewBeaconApiRestHandler(client http.Client, host string) RestHandler {
|
||||
brh := &BeaconApiRestHandler{
|
||||
// newRestHandler returns a RestHandler (internal use)
|
||||
func newRestHandler(client http.Client, host string) RestHandler {
|
||||
return NewRestHandler(client, host)
|
||||
}
|
||||
|
||||
// NewRestHandler returns a RestHandler
|
||||
func NewRestHandler(client http.Client, host string) RestHandler {
|
||||
rh := &restHandler{
|
||||
client: client,
|
||||
host: host,
|
||||
}
|
||||
brh.appendAcceptOverride()
|
||||
return brh
|
||||
rh.appendAcceptOverride()
|
||||
return rh
|
||||
}
|
||||
|
||||
// appendAcceptOverride enables the Accept header to be customized at runtime via an environment variable.
|
||||
// This is specified as an env var because it is a niche option that prysm may use for performance testing or debugging
|
||||
// bug which users are unlikely to need. Using an env var keeps the set of user-facing flags cleaner.
|
||||
func (c *BeaconApiRestHandler) appendAcceptOverride() {
|
||||
func (c *restHandler) appendAcceptOverride() {
|
||||
if accept := os.Getenv(params.EnvNameOverrideAccept); accept != "" {
|
||||
c.reqOverrides = append(c.reqOverrides, func(req *http.Request) {
|
||||
req.Header.Set("Accept", accept)
|
||||
@@ -60,18 +66,18 @@ func (c *BeaconApiRestHandler) appendAcceptOverride() {
|
||||
}
|
||||
|
||||
// HttpClient returns the underlying HTTP client of the handler
|
||||
func (c *BeaconApiRestHandler) HttpClient() *http.Client {
|
||||
func (c *restHandler) HttpClient() *http.Client {
|
||||
return &c.client
|
||||
}
|
||||
|
||||
// Host returns the underlying HTTP host
|
||||
func (c *BeaconApiRestHandler) Host() string {
|
||||
func (c *restHandler) Host() string {
|
||||
return c.host
|
||||
}
|
||||
|
||||
// Get sends a GET request and decodes the response body as a JSON object into the passed in object.
|
||||
// If an HTTP error is returned, the body is decoded as a DefaultJsonError JSON object and returned as the first return value.
|
||||
func (c *BeaconApiRestHandler) Get(ctx context.Context, endpoint string, resp any) error {
|
||||
func (c *restHandler) Get(ctx context.Context, endpoint string, resp any) error {
|
||||
url := c.host + endpoint
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
@@ -94,7 +100,7 @@ func (c *BeaconApiRestHandler) Get(ctx context.Context, endpoint string, resp an
|
||||
// GetStatusCode sends a GET request and returns only the HTTP status code.
|
||||
// This is useful for endpoints like /eth/v1/node/health that communicate status via HTTP codes
|
||||
// (200 = ready, 206 = syncing, 503 = unavailable) rather than response bodies.
|
||||
func (c *BeaconApiRestHandler) GetStatusCode(ctx context.Context, endpoint string) (int, error) {
|
||||
func (c *restHandler) GetStatusCode(ctx context.Context, endpoint string) (int, error) {
|
||||
url := c.host + endpoint
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
@@ -113,7 +119,7 @@ func (c *BeaconApiRestHandler) GetStatusCode(ctx context.Context, endpoint strin
|
||||
return httpResp.StatusCode, nil
|
||||
}
|
||||
|
||||
func (c *BeaconApiRestHandler) GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error) {
|
||||
func (c *restHandler) GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error) {
|
||||
url := c.host + endpoint
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
@@ -168,7 +174,7 @@ func (c *BeaconApiRestHandler) GetSSZ(ctx context.Context, endpoint string) ([]b
|
||||
|
||||
// Post sends a POST request and decodes the response body as a JSON object into the passed in object.
|
||||
// If an HTTP error is returned, the body is decoded as a DefaultJsonError JSON object and returned as the first return value.
|
||||
func (c *BeaconApiRestHandler) Post(
|
||||
func (c *restHandler) Post(
|
||||
ctx context.Context,
|
||||
apiEndpoint string,
|
||||
headers map[string]string,
|
||||
@@ -204,7 +210,7 @@ func (c *BeaconApiRestHandler) Post(
|
||||
}
|
||||
|
||||
// PostSSZ sends a POST request and prefers an SSZ (application/octet-stream) response body.
|
||||
func (c *BeaconApiRestHandler) PostSSZ(
|
||||
func (c *restHandler) PostSSZ(
|
||||
ctx context.Context,
|
||||
apiEndpoint string,
|
||||
headers map[string]string,
|
||||
@@ -305,6 +311,6 @@ func decodeResp(httpResp *http.Response, resp any) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *BeaconApiRestHandler) SetHost(host string) {
|
||||
func (c *restHandler) SetHost(host string) {
|
||||
c.host = host
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package altair
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
@@ -23,7 +24,7 @@ func ProcessPreGenesisDeposits(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposit")
|
||||
}
|
||||
beaconState, err = helpers.ActivateValidatorWithEffectiveBalance(beaconState, deposits)
|
||||
beaconState, err = blocks.ActivateValidatorWithEffectiveBalance(beaconState, deposits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -36,7 +37,7 @@ func ProcessDeposits(
|
||||
beaconState state.BeaconState,
|
||||
deposits []*ethpb.Deposit,
|
||||
) (state.BeaconState, error) {
|
||||
allSignaturesVerified, err := helpers.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
allSignaturesVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -81,7 +82,7 @@ func ProcessDeposits(
|
||||
// signature=deposit.data.signature,
|
||||
// )
|
||||
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, allSignaturesVerified bool) (state.BeaconState, error) {
|
||||
if err := helpers.VerifyDeposit(beaconState, deposit); err != nil {
|
||||
if err := blocks.VerifyDeposit(beaconState, deposit); err != nil {
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -121,7 +122,7 @@ func ApplyDeposit(beaconState state.BeaconState, data *ethpb.Deposit_Data, allSi
|
||||
index, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
if !ok {
|
||||
if !allSignaturesVerified {
|
||||
valid, err := helpers.IsValidDepositSignature(data)
|
||||
valid, err := blocks.IsValidDepositSignature(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"attestation.go",
|
||||
"attester_slashing.go",
|
||||
"deposit.go",
|
||||
"error.go",
|
||||
"eth1_data.go",
|
||||
"exit.go",
|
||||
@@ -20,7 +21,6 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/gloas:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
@@ -33,6 +33,8 @@ go_library(
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//contracts/deposit:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
@@ -59,6 +61,7 @@ go_test(
|
||||
"attester_slashing_test.go",
|
||||
"block_operations_fuzz_test.go",
|
||||
"block_regression_test.go",
|
||||
"deposit_test.go",
|
||||
"eth1_data_test.go",
|
||||
"exit_test.go",
|
||||
"exports_test.go",
|
||||
@@ -87,6 +90,7 @@ go_test(
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
|
||||
@@ -3,7 +3,6 @@ package blocks
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -319,7 +318,7 @@ func TestFuzzverifyDeposit_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
err = helpers.VerifyDeposit(s, deposit)
|
||||
err = VerifyDeposit(s, deposit)
|
||||
_ = err
|
||||
fuzz.FreeMemory(i)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package helpers
|
||||
package blocks
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -1,9 +1,9 @@
|
||||
package helpers_test
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -45,7 +45,7 @@ func TestBatchVerifyDepositsSignatures_Ok(t *testing.T) {
|
||||
|
||||
deposit.Proof = proof
|
||||
require.NoError(t, err)
|
||||
verified, err := helpers.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit})
|
||||
verified, err := blocks.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified)
|
||||
}
|
||||
@@ -68,7 +68,7 @@ func TestBatchVerifyDepositsSignatures_InvalidSignature(t *testing.T) {
|
||||
|
||||
deposit.Proof = proof
|
||||
require.NoError(t, err)
|
||||
verified, err := helpers.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit})
|
||||
verified, err := blocks.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, verified)
|
||||
}
|
||||
@@ -99,7 +99,7 @@ func TestVerifyDeposit_MerkleBranchFailsVerification(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
want := "deposit root did not verify"
|
||||
err = helpers.VerifyDeposit(beaconState, deposit)
|
||||
err = blocks.VerifyDeposit(beaconState, deposit)
|
||||
require.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -123,7 +123,7 @@ func TestIsValidDepositSignature_Ok(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
sig := sk.Sign(sr[:])
|
||||
depositData.Signature = sig.Marshal()
|
||||
valid, err := helpers.IsValidDepositSignature(depositData)
|
||||
valid, err := blocks.IsValidDepositSignature(depositData)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, valid)
|
||||
}
|
||||
@@ -163,7 +163,7 @@ func TestBatchVerifyPendingDepositsSignatures_Ok(t *testing.T) {
|
||||
sig2 := sk2.Sign(sr2[:])
|
||||
pendingDeposit2.Signature = sig2.Marshal()
|
||||
|
||||
verified, err := helpers.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit, pendingDeposit2})
|
||||
verified, err := blocks.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit, pendingDeposit2})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified)
|
||||
}
|
||||
@@ -174,7 +174,7 @@ func TestBatchVerifyPendingDepositsSignatures_InvalidSignature(t *testing.T) {
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
verified, err := helpers.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit})
|
||||
verified, err := blocks.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, verified)
|
||||
}
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
@@ -12,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -128,16 +126,7 @@ func processProposerSlashing(
|
||||
if exitInfo == nil {
|
||||
return nil, errors.New("exit info is required to process proposer slashing")
|
||||
}
|
||||
|
||||
var err error
|
||||
// [New in Gloas:EIP7732]: remove the BuilderPendingPayment corresponding to the slashed proposer within 2 epoch window
|
||||
if beaconState.Version() >= version.Gloas {
|
||||
err = gloas.RemoveBuilderPendingPayment(beaconState, slashing.Header_1.Header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
beaconState, err = validators.SlashValidator(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)
|
||||
|
||||
@@ -14,11 +14,13 @@ go_library(
|
||||
"transition.go",
|
||||
"upgrade.go",
|
||||
"validator.go",
|
||||
"withdrawals.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
@@ -41,6 +43,8 @@ go_library(
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/math:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
@@ -60,12 +64,12 @@ go_test(
|
||||
"transition_test.go",
|
||||
"upgrade_test.go",
|
||||
"validator_test.go",
|
||||
"withdrawals_test.go",
|
||||
],
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/requests:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
@@ -79,12 +83,16 @@ go_test(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/fuzz:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -3,14 +3,19 @@ package electra
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -90,6 +95,217 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessConsolidationRequests implements the spec definition below. This method makes mutating
|
||||
// calls to the beacon state.
|
||||
//
|
||||
// def process_consolidation_request(
|
||||
// state: BeaconState,
|
||||
// consolidation_request: ConsolidationRequest
|
||||
// ) -> None:
|
||||
// if is_valid_switch_to_compounding_request(state, consolidation_request):
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// request_source_pubkey = consolidation_request.source_pubkey
|
||||
// source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey))
|
||||
// switch_to_compounding_validator(state, source_index)
|
||||
// return
|
||||
//
|
||||
// # Verify that source != target, so a consolidation cannot be used as an exit.
|
||||
// if consolidation_request.source_pubkey == consolidation_request.target_pubkey:
|
||||
// return
|
||||
// # If the pending consolidations queue is full, consolidation requests are ignored
|
||||
// if len(state.pending_consolidations) == PENDING_CONSOLIDATIONS_LIMIT:
|
||||
// return
|
||||
// # If there is too little available consolidation churn limit, consolidation requests are ignored
|
||||
// if get_consolidation_churn_limit(state) <= MIN_ACTIVATION_BALANCE:
|
||||
// return
|
||||
//
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// # Verify pubkeys exists
|
||||
// request_source_pubkey = consolidation_request.source_pubkey
|
||||
// request_target_pubkey = consolidation_request.target_pubkey
|
||||
// if request_source_pubkey not in validator_pubkeys:
|
||||
// return
|
||||
// if request_target_pubkey not in validator_pubkeys:
|
||||
// return
|
||||
// source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey))
|
||||
// target_index = ValidatorIndex(validator_pubkeys.index(request_target_pubkey))
|
||||
// source_validator = state.validators[source_index]
|
||||
// target_validator = state.validators[target_index]
|
||||
//
|
||||
// # Verify source withdrawal credentials
|
||||
// has_correct_credential = has_execution_withdrawal_credential(source_validator)
|
||||
// is_correct_source_address = (
|
||||
// source_validator.withdrawal_credentials[12:] == consolidation_request.source_address
|
||||
// )
|
||||
// if not (has_correct_credential and is_correct_source_address):
|
||||
// return
|
||||
//
|
||||
// # Verify that target has compounding withdrawal credentials
|
||||
// if not has_compounding_withdrawal_credential(target_validator):
|
||||
// return
|
||||
//
|
||||
// # Verify the source and the target are active
|
||||
// current_epoch = get_current_epoch(state)
|
||||
// if not is_active_validator(source_validator, current_epoch):
|
||||
// return
|
||||
// if not is_active_validator(target_validator, current_epoch):
|
||||
// return
|
||||
// # Verify exits for source and target have not been initiated
|
||||
// if source_validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||
// return
|
||||
// if target_validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||
// return
|
||||
//
|
||||
// # Verify the source has been active long enough
|
||||
// if current_epoch < source_validator.activation_epoch + SHARD_COMMITTEE_PERIOD:
|
||||
// return
|
||||
//
|
||||
// # Verify the source has no pending withdrawals in the queue
|
||||
// if get_pending_balance_to_withdraw(state, source_index) > 0:
|
||||
// return
|
||||
// # Initiate source validator exit and append pending consolidation
|
||||
// source_validator.exit_epoch = compute_consolidation_epoch_and_update_churn(
|
||||
// state, source_validator.effective_balance
|
||||
// )
|
||||
// source_validator.withdrawable_epoch = Epoch(
|
||||
// source_validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
// )
|
||||
// state.pending_consolidations.append(PendingConsolidation(
|
||||
// source_index=source_index,
|
||||
// target_index=target_index
|
||||
// ))
|
||||
func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, reqs []*enginev1.ConsolidationRequest) error {
|
||||
if len(reqs) == 0 || st == nil {
|
||||
return nil
|
||||
}
|
||||
curEpoch := slots.ToEpoch(st.Slot())
|
||||
ffe := params.BeaconConfig().FarFutureEpoch
|
||||
minValWithdrawDelay := params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
pcLimit := params.BeaconConfig().PendingConsolidationsLimit
|
||||
|
||||
for _, cr := range reqs {
|
||||
if cr == nil {
|
||||
return errors.New("nil consolidation request")
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return fmt.Errorf("cannot process consolidation requests: %w", ctx.Err())
|
||||
}
|
||||
if IsValidSwitchToCompoundingRequest(st, cr) {
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(cr.SourcePubkey))
|
||||
if !ok {
|
||||
log.Error("Failed to find source validator index")
|
||||
continue
|
||||
}
|
||||
if err := SwitchToCompoundingValidator(st, srcIdx); err != nil {
|
||||
log.WithError(err).Error("Failed to switch to compounding validator")
|
||||
}
|
||||
continue
|
||||
}
|
||||
sourcePubkey := bytesutil.ToBytes48(cr.SourcePubkey)
|
||||
targetPubkey := bytesutil.ToBytes48(cr.TargetPubkey)
|
||||
if sourcePubkey == targetPubkey {
|
||||
continue
|
||||
}
|
||||
|
||||
if npc, err := st.NumPendingConsolidations(); err != nil {
|
||||
return fmt.Errorf("failed to fetch number of pending consolidations: %w", err) // This should never happen.
|
||||
} else if npc >= pcLimit {
|
||||
continue
|
||||
}
|
||||
|
||||
activeBal, err := helpers.TotalActiveBalance(st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
churnLimit := helpers.ConsolidationChurnLimit(primitives.Gwei(activeBal))
|
||||
if churnLimit <= primitives.Gwei(params.BeaconConfig().MinActivationBalance) {
|
||||
continue
|
||||
}
|
||||
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(sourcePubkey)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
tgtIdx, ok := st.ValidatorIndexByPubkey(targetPubkey)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
srcV, err := st.ValidatorAtIndex(srcIdx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch source validator: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
roSrcV, err := state_native.NewValidator(srcV)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tgtV, err := st.ValidatorAtIndexReadOnly(tgtIdx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch target validator: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
// Verify source withdrawal credentials
|
||||
if !roSrcV.HasExecutionWithdrawalCredentials() {
|
||||
continue
|
||||
}
|
||||
// Confirm source_validator.withdrawal_credentials[12:] == consolidation_request.source_address
|
||||
if len(srcV.WithdrawalCredentials) != 32 || len(cr.SourceAddress) != 20 || !bytes.HasSuffix(srcV.WithdrawalCredentials, cr.SourceAddress) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Target validator must have their withdrawal credentials set appropriately.
|
||||
if !tgtV.HasCompoundingWithdrawalCredentials() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Both validators must be active.
|
||||
if !helpers.IsActiveValidator(srcV, curEpoch) || !helpers.IsActiveValidatorUsingTrie(tgtV, curEpoch) {
|
||||
continue
|
||||
}
|
||||
// Neither validator is exiting.
|
||||
if srcV.ExitEpoch != ffe || tgtV.ExitEpoch() != ffe {
|
||||
continue
|
||||
}
|
||||
|
||||
e, overflow := math.SafeAdd(uint64(srcV.ActivationEpoch), uint64(params.BeaconConfig().ShardCommitteePeriod))
|
||||
if overflow {
|
||||
log.Error("Overflow when adding activation epoch and shard committee period")
|
||||
continue
|
||||
}
|
||||
if uint64(curEpoch) < e {
|
||||
continue
|
||||
}
|
||||
hasBal, err := st.HasPendingBalanceToWithdraw(srcIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
|
||||
continue
|
||||
}
|
||||
if hasBal {
|
||||
continue
|
||||
}
|
||||
|
||||
// Initiate the exit of the source validator.
|
||||
exitEpoch, err := ComputeConsolidationEpochAndUpdateChurn(ctx, st, primitives.Gwei(srcV.EffectiveBalance))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to compute consolidation epoch")
|
||||
continue
|
||||
}
|
||||
srcV.ExitEpoch = exitEpoch
|
||||
srcV.WithdrawableEpoch = exitEpoch + minValWithdrawDelay
|
||||
if err := st.UpdateValidatorAtIndex(srcIdx, srcV); err != nil {
|
||||
return fmt.Errorf("failed to update validator: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
if err := st.AppendPendingConsolidation(ð.PendingConsolidation{SourceIndex: srcIdx, TargetIndex: tgtIdx}); err != nil {
|
||||
return fmt.Errorf("failed to append pending consolidation: %w", err) // This should never happen.
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsValidSwitchToCompoundingRequest returns true if the given consolidation request is valid for switching to compounding.
|
||||
//
|
||||
// Spec code:
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
@@ -458,7 +457,7 @@ func TestProcessConsolidationRequests(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := requests.ProcessConsolidationRequests(context.TODO(), tt.state, tt.reqs)
|
||||
err := electra.ProcessConsolidationRequests(context.TODO(), tt.state, tt.reqs)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ProcessWithdrawalRequests() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
|
||||
@@ -3,6 +3,7 @@ package electra
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
@@ -36,7 +37,7 @@ func ProcessDeposits(
|
||||
defer span.End()
|
||||
// Attempt to verify all deposit signatures at once, if this fails then fall back to processing
|
||||
// individual deposits with signature verification enabled.
|
||||
allSignaturesVerified, err := helpers.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
allSignaturesVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify deposit signatures in batch")
|
||||
}
|
||||
@@ -81,7 +82,7 @@ func ProcessDeposits(
|
||||
// signature=deposit.data.signature,
|
||||
// )
|
||||
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, allSignaturesVerified bool) (state.BeaconState, error) {
|
||||
if err := helpers.VerifyDeposit(beaconState, deposit); err != nil {
|
||||
if err := blocks.VerifyDeposit(beaconState, deposit); err != nil {
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -376,7 +377,7 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
|
||||
return nil
|
||||
}
|
||||
|
||||
allSignaturesVerified, err := helpers.BatchVerifyPendingDepositsSignatures(ctx, pendingDeposits)
|
||||
allSignaturesVerified, err := blocks.BatchVerifyPendingDepositsSignatures(ctx, pendingDeposits)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "batch signature verification failed")
|
||||
}
|
||||
@@ -385,7 +386,7 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
|
||||
validSig := allSignaturesVerified
|
||||
|
||||
if !allSignaturesVerified {
|
||||
validSig, err = helpers.IsValidDepositSignature(ðpb.Deposit_Data{
|
||||
validSig, err = blocks.IsValidDepositSignature(ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.SafeCopyBytes(pd.PublicKey),
|
||||
WithdrawalCredentials: bytesutil.SafeCopyBytes(pd.WithdrawalCredentials),
|
||||
Amount: pd.Amount,
|
||||
@@ -440,7 +441,7 @@ func ApplyPendingDeposit(ctx context.Context, st state.BeaconState, deposit *eth
|
||||
defer span.End()
|
||||
index, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(deposit.PublicKey))
|
||||
if !ok {
|
||||
verified, err := helpers.IsValidDepositSignature(ðpb.Deposit_Data{
|
||||
verified, err := blocks.IsValidDepositSignature(ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.SafeCopyBytes(deposit.PublicKey),
|
||||
WithdrawalCredentials: bytesutil.SafeCopyBytes(deposit.WithdrawalCredentials),
|
||||
Amount: deposit.Amount,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package requests
|
||||
package electra
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -88,7 +88,7 @@ import (
|
||||
// withdrawable_epoch=withdrawable_epoch,
|
||||
// ))
|
||||
func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []*enginev1.WithdrawalRequest) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "requests.ProcessWithdrawalRequests")
|
||||
ctx, span := trace.StartSpan(ctx, "electra.ProcessWithdrawalRequests")
|
||||
defer span.End()
|
||||
currentEpoch := slots.ToEpoch(st.Slot())
|
||||
if len(wrs) == 0 {
|
||||
@@ -1,9 +1,9 @@
|
||||
package requests_test
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -289,7 +289,7 @@ func TestProcessWithdrawRequests(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
got, err := requests.ProcessWithdrawalRequests(t.Context(), tt.args.st, tt.args.wrs)
|
||||
got, err := electra.ProcessWithdrawalRequests(t.Context(), tt.args.st, tt.args.wrs)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ProcessWithdrawalRequests() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
@@ -2,22 +2,16 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"bid.go",
|
||||
"pending_payment.go",
|
||||
"proposer_slashing.go",
|
||||
],
|
||||
srcs = ["bid.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
@@ -28,16 +22,10 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"bid_test.go",
|
||||
"pending_payment_test.go",
|
||||
"proposer_slashing_test.go",
|
||||
],
|
||||
srcs = ["bid_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ProcessBuilderPendingPayments processes the builder pending payments from the previous epoch.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// def process_builder_pending_payments(state: BeaconState) -> None:
|
||||
//
|
||||
// quorum = get_builder_payment_quorum_threshold(state)
|
||||
// for payment in state.builder_pending_payments[:SLOTS_PER_EPOCH]:
|
||||
// if payment.weight >= quorum:
|
||||
// state.builder_pending_withdrawals.append(payment.withdrawal)
|
||||
//
|
||||
// old_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:]
|
||||
// new_payments = [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
|
||||
// state.builder_pending_payments = old_payments + new_payments
|
||||
func ProcessBuilderPendingPayments(state state.BeaconState) error {
|
||||
quorum, err := builderQuorumThreshold(state)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute builder payment quorum threshold")
|
||||
}
|
||||
|
||||
payments, err := state.BuilderPendingPayments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get builder pending payments")
|
||||
}
|
||||
|
||||
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
var withdrawals []*ethpb.BuilderPendingWithdrawal
|
||||
for _, payment := range payments[:slotsPerEpoch] {
|
||||
if quorum > payment.Weight {
|
||||
continue
|
||||
}
|
||||
withdrawals = append(withdrawals, payment.Withdrawal)
|
||||
}
|
||||
|
||||
if err := state.AppendBuilderPendingWithdrawals(withdrawals); err != nil {
|
||||
return errors.Wrap(err, "could not append builder pending withdrawals")
|
||||
}
|
||||
|
||||
if err := state.RotateBuilderPendingPayments(); err != nil {
|
||||
return errors.Wrap(err, "could not rotate builder pending payments")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// builderQuorumThreshold calculates the quorum threshold for builder payments.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// def get_builder_payment_quorum_threshold(state: BeaconState) -> uint64:
|
||||
//
|
||||
// per_slot_balance = get_total_active_balance(state) // SLOTS_PER_EPOCH
|
||||
// quorum = per_slot_balance * BUILDER_PAYMENT_THRESHOLD_NUMERATOR
|
||||
// return uint64(quorum // BUILDER_PAYMENT_THRESHOLD_DENOMINATOR)
|
||||
func builderQuorumThreshold(state state.ReadOnlyBeaconState) (primitives.Gwei, error) {
|
||||
activeBalance, err := helpers.TotalActiveBalance(state)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get total active balance")
|
||||
}
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
slotsPerEpoch := uint64(cfg.SlotsPerEpoch)
|
||||
numerator := cfg.BuilderPaymentThresholdNumerator
|
||||
denominator := cfg.BuilderPaymentThresholdDenominator
|
||||
|
||||
activeBalancePerSlot := activeBalance / slotsPerEpoch
|
||||
quorum := (activeBalancePerSlot * numerator) / denominator
|
||||
return primitives.Gwei(quorum), nil
|
||||
}
|
||||
@@ -1,119 +0,0 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestBuilderQuorumThreshold(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
validators := []*ethpb.Validator{
|
||||
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
|
||||
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{Validators: validators})
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := builderQuorumThreshold(st)
|
||||
require.NoError(t, err)
|
||||
|
||||
total := uint64(len(validators)) * cfg.MaxEffectiveBalance
|
||||
perSlot := total / uint64(cfg.SlotsPerEpoch)
|
||||
want := (perSlot * cfg.BuilderPaymentThresholdNumerator) / cfg.BuilderPaymentThresholdDenominator
|
||||
require.Equal(t, primitives.Gwei(want), got)
|
||||
}
|
||||
|
||||
func TestProcessBuilderPendingPayments(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
buildPayments := func(weights ...primitives.Gwei) []*ethpb.BuilderPendingPayment {
|
||||
p := make([]*ethpb.BuilderPendingPayment, 2*int(cfg.SlotsPerEpoch))
|
||||
for i := range p {
|
||||
p[i] = ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{FeeRecipient: make([]byte, 20)},
|
||||
}
|
||||
}
|
||||
for i, w := range weights {
|
||||
p[i].Weight = w
|
||||
p[i].Withdrawal.Amount = 1
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
validators := []*ethpb.Validator{
|
||||
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
|
||||
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
|
||||
}
|
||||
pbSt, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Validators: validators})
|
||||
require.NoError(t, err)
|
||||
|
||||
total := uint64(len(validators)) * cfg.MaxEffectiveBalance
|
||||
perSlot := total / uint64(cfg.SlotsPerEpoch)
|
||||
quorum := (perSlot * cfg.BuilderPaymentThresholdNumerator) / cfg.BuilderPaymentThresholdDenominator
|
||||
slotsPerEpoch := int(cfg.SlotsPerEpoch)
|
||||
|
||||
t.Run("append qualifying withdrawals", func(t *testing.T) {
|
||||
payments := buildPayments(primitives.Gwei(quorum+1), primitives.Gwei(quorum+2))
|
||||
st := &testProcessState{BeaconState: pbSt, payments: payments}
|
||||
|
||||
require.NoError(t, ProcessBuilderPendingPayments(st))
|
||||
require.Equal(t, 2, len(st.withdrawals))
|
||||
require.Equal(t, payments[0].Withdrawal, st.withdrawals[0])
|
||||
require.Equal(t, payments[1].Withdrawal, st.withdrawals[1])
|
||||
|
||||
require.Equal(t, 2*slotsPerEpoch, len(st.payments))
|
||||
for i := slotsPerEpoch; i < 2*slotsPerEpoch; i++ {
|
||||
require.Equal(t, primitives.Gwei(0), st.payments[i].Weight)
|
||||
require.Equal(t, primitives.Gwei(0), st.payments[i].Withdrawal.Amount)
|
||||
require.Equal(t, 20, len(st.payments[i].Withdrawal.FeeRecipient))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("no withdrawals when below quorum", func(t *testing.T) {
|
||||
payments := buildPayments(primitives.Gwei(quorum - 1))
|
||||
st := &testProcessState{BeaconState: pbSt, payments: payments}
|
||||
|
||||
require.NoError(t, ProcessBuilderPendingPayments(st))
|
||||
require.Equal(t, 0, len(st.withdrawals))
|
||||
})
|
||||
}
|
||||
|
||||
type testProcessState struct {
|
||||
state.BeaconState
|
||||
payments []*ethpb.BuilderPendingPayment
|
||||
withdrawals []*ethpb.BuilderPendingWithdrawal
|
||||
}
|
||||
|
||||
func (t *testProcessState) BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error) {
|
||||
return t.payments, nil
|
||||
}
|
||||
|
||||
func (t *testProcessState) AppendBuilderPendingWithdrawals(withdrawals []*ethpb.BuilderPendingWithdrawal) error {
|
||||
t.withdrawals = append(t.withdrawals, withdrawals...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *testProcessState) RotateBuilderPendingPayments() error {
|
||||
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
|
||||
rotated := slices.Clone(t.payments[slotsPerEpoch:])
|
||||
for range slotsPerEpoch {
|
||||
rotated = append(rotated, ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
},
|
||||
})
|
||||
}
|
||||
t.payments = rotated
|
||||
return nil
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// RemoveBuilderPendingPayment removes the pending builder payment for the proposal slot.
|
||||
// Spec v1.7.0 (pseudocode):
|
||||
//
|
||||
// slot = header_1.slot
|
||||
// proposal_epoch = compute_epoch_at_slot(slot)
|
||||
// if proposal_epoch == get_current_epoch(state):
|
||||
// payment_index = SLOTS_PER_EPOCH + slot % SLOTS_PER_EPOCH
|
||||
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
|
||||
// elif proposal_epoch == get_previous_epoch(state):
|
||||
// payment_index = slot % SLOTS_PER_EPOCH
|
||||
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
|
||||
func RemoveBuilderPendingPayment(st state.BeaconState, header *eth.BeaconBlockHeader) error {
|
||||
proposalEpoch := slots.ToEpoch(header.Slot)
|
||||
currentEpoch := time.CurrentEpoch(st)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
var paymentIndex primitives.Slot
|
||||
if proposalEpoch == currentEpoch {
|
||||
paymentIndex = slotsPerEpoch + header.Slot%slotsPerEpoch
|
||||
} else if proposalEpoch+1 == currentEpoch {
|
||||
paymentIndex = header.Slot % slotsPerEpoch
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := st.ClearBuilderPendingPayment(paymentIndex); err != nil {
|
||||
return errors.Wrap(err, "could not clear builder pending payment")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,112 +0,0 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestRemoveBuilderPendingPayment_CurrentEpoch(t *testing.T) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
stateSlot := slotsPerEpoch*2 + 1
|
||||
headerSlot := slotsPerEpoch * 2
|
||||
|
||||
st := newGloasStateWithPayments(t, stateSlot)
|
||||
paymentIndex := int(slotsPerEpoch + headerSlot%slotsPerEpoch)
|
||||
|
||||
setPendingPayment(t, st, paymentIndex, 123)
|
||||
|
||||
err := RemoveBuilderPendingPayment(st, ð.BeaconBlockHeader{Slot: headerSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
got := getPendingPayment(t, st, paymentIndex)
|
||||
require.NotNil(t, got.Withdrawal)
|
||||
require.DeepEqual(t, make([]byte, 20), got.Withdrawal.FeeRecipient)
|
||||
require.Equal(t, uint64(0), uint64(got.Withdrawal.Amount))
|
||||
}
|
||||
|
||||
func TestRemoveBuilderPendingPayment_PreviousEpoch(t *testing.T) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
stateSlot := slotsPerEpoch*2 + 1
|
||||
headerSlot := slotsPerEpoch + 7
|
||||
|
||||
st := newGloasStateWithPayments(t, stateSlot)
|
||||
paymentIndex := int(headerSlot % slotsPerEpoch)
|
||||
|
||||
setPendingPayment(t, st, paymentIndex, 456)
|
||||
|
||||
err := RemoveBuilderPendingPayment(st, ð.BeaconBlockHeader{Slot: headerSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
got := getPendingPayment(t, st, paymentIndex)
|
||||
require.NotNil(t, got.Withdrawal)
|
||||
require.DeepEqual(t, make([]byte, 20), got.Withdrawal.FeeRecipient)
|
||||
require.Equal(t, uint64(0), uint64(got.Withdrawal.Amount))
|
||||
}
|
||||
|
||||
func TestRemoveBuilderPendingPayment_OlderThanTwoEpoch(t *testing.T) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
stateSlot := slotsPerEpoch*4 + 1 // current epoch far ahead
|
||||
headerSlot := slotsPerEpoch * 2 // two epochs behind
|
||||
|
||||
st := newGloasStateWithPayments(t, stateSlot)
|
||||
paymentIndex := int(headerSlot % slotsPerEpoch)
|
||||
|
||||
original := getPendingPayment(t, st, paymentIndex)
|
||||
|
||||
err := RemoveBuilderPendingPayment(st, ð.BeaconBlockHeader{Slot: headerSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
after := getPendingPayment(t, st, paymentIndex)
|
||||
require.DeepEqual(t, original.Withdrawal.FeeRecipient, after.Withdrawal.FeeRecipient)
|
||||
require.Equal(t, original.Withdrawal.Amount, after.Withdrawal.Amount)
|
||||
}
|
||||
|
||||
func newGloasStateWithPayments(t *testing.T, slot primitives.Slot) state.BeaconState {
|
||||
t.Helper()
|
||||
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
paymentCount := int(slotsPerEpoch * 2)
|
||||
payments := make([]*eth.BuilderPendingPayment, paymentCount)
|
||||
for i := range payments {
|
||||
payments[i] = ð.BuilderPendingPayment{
|
||||
Withdrawal: ð.BuilderPendingWithdrawal{
|
||||
FeeRecipient: bytes.Repeat([]byte{0x01}, 20),
|
||||
Amount: 1,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(ð.BeaconStateGloas{
|
||||
Slot: slot,
|
||||
BuilderPendingPayments: payments,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}
|
||||
|
||||
func setPendingPayment(t *testing.T, st state.BeaconState, index int, amount uint64) {
|
||||
t.Helper()
|
||||
|
||||
payment := ð.BuilderPendingPayment{
|
||||
Withdrawal: ð.BuilderPendingWithdrawal{
|
||||
FeeRecipient: bytes.Repeat([]byte{0x02}, 20),
|
||||
Amount: primitives.Gwei(amount),
|
||||
},
|
||||
}
|
||||
require.NoError(t, st.SetBuilderPendingPayment(primitives.Slot(index), payment))
|
||||
}
|
||||
|
||||
func getPendingPayment(t *testing.T, st state.BeaconState, index int) *eth.BuilderPendingPayment {
|
||||
t.Helper()
|
||||
|
||||
stateProto := st.ToProtoUnsafe().(*eth.BeaconStateGloas)
|
||||
|
||||
return stateProto.BuilderPendingPayments[index]
|
||||
}
|
||||
@@ -6,7 +6,6 @@ go_library(
|
||||
"attestation.go",
|
||||
"beacon_committee.go",
|
||||
"block.go",
|
||||
"deposit.go",
|
||||
"genesis.go",
|
||||
"legacy.go",
|
||||
"log.go",
|
||||
@@ -24,7 +23,6 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
@@ -33,7 +31,6 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//contracts/deposit:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
@@ -57,7 +54,6 @@ go_test(
|
||||
"attestation_test.go",
|
||||
"beacon_committee_test.go",
|
||||
"block_test.go",
|
||||
"deposit_test.go",
|
||||
"legacy_test.go",
|
||||
"private_access_fuzz_noop_test.go", # keep
|
||||
"private_access_test.go",
|
||||
@@ -76,7 +72,6 @@ go_test(
|
||||
tags = ["CI_race_detection"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
@@ -85,8 +80,6 @@ go_test(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"consolidations.go",
|
||||
"log.go",
|
||||
"withdrawals.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/math:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["withdrawals_test.go"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,365 +0,0 @@
|
||||
package requests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls/common"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
prysmMath "github.com/OffchainLabs/prysm/v7/math"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ProcessConsolidationRequests implements the spec definition below. This method makes mutating
|
||||
// calls to the beacon state.
|
||||
//
|
||||
// def process_consolidation_request(
|
||||
// state: BeaconState,
|
||||
// consolidation_request: ConsolidationRequest
|
||||
// ) -> None:
|
||||
// if is_valid_switch_to_compounding_request(state, consolidation_request):
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// request_source_pubkey = consolidation_request.source_pubkey
|
||||
// source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey))
|
||||
// switch_to_compounding_validator(state, source_index)
|
||||
// return
|
||||
//
|
||||
// # Verify that source != target, so a consolidation cannot be used as an exit.
|
||||
// if consolidation_request.source_pubkey == consolidation_request.target_pubkey:
|
||||
// return
|
||||
// # If the pending consolidations queue is full, consolidation requests are ignored
|
||||
// if len(state.pending_consolidations) == PENDING_CONSOLIDATIONS_LIMIT:
|
||||
// return
|
||||
// # If there is too little available consolidation churn limit, consolidation requests are ignored
|
||||
// if get_consolidation_churn_limit(state) <= MIN_ACTIVATION_BALANCE:
|
||||
// return
|
||||
//
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// # Verify pubkeys exists
|
||||
// request_source_pubkey = consolidation_request.source_pubkey
|
||||
// request_target_pubkey = consolidation_request.target_pubkey
|
||||
// if request_source_pubkey not in validator_pubkeys:
|
||||
// return
|
||||
// if request_target_pubkey not in validator_pubkeys:
|
||||
// return
|
||||
// source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey))
|
||||
// target_index = ValidatorIndex(validator_pubkeys.index(request_target_pubkey))
|
||||
// source_validator = state.validators[source_index]
|
||||
// target_validator = state.validators[target_index]
|
||||
//
|
||||
// # Verify source withdrawal credentials
|
||||
// has_correct_credential = has_execution_withdrawal_credential(source_validator)
|
||||
// is_correct_source_address = (
|
||||
// source_validator.withdrawal_credentials[12:] == consolidation_request.source_address
|
||||
// )
|
||||
// if not (has_correct_credential and is_correct_source_address):
|
||||
// return
|
||||
//
|
||||
// # Verify that target has compounding withdrawal credentials
|
||||
// if not has_compounding_withdrawal_credential(target_validator):
|
||||
// return
|
||||
//
|
||||
// # Verify the source and the target are active
|
||||
// current_epoch = get_current_epoch(state)
|
||||
// if not is_active_validator(source_validator, current_epoch):
|
||||
// return
|
||||
// if not is_active_validator(target_validator, current_epoch):
|
||||
// return
|
||||
// # Verify exits for source and target have not been initiated
|
||||
// if source_validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||
// return
|
||||
// if target_validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||
// return
|
||||
//
|
||||
// # Verify the source has been active long enough
|
||||
// if current_epoch < source_validator.activation_epoch + SHARD_COMMITTEE_PERIOD:
|
||||
// return
|
||||
//
|
||||
// # Verify the source has no pending withdrawals in the queue
|
||||
// if get_pending_balance_to_withdraw(state, source_index) > 0:
|
||||
// return
|
||||
// # Initiate source validator exit and append pending consolidation
|
||||
// source_validator.exit_epoch = compute_consolidation_epoch_and_update_churn(
|
||||
// state, source_validator.effective_balance
|
||||
// )
|
||||
// source_validator.withdrawable_epoch = Epoch(
|
||||
// source_validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
// )
|
||||
// state.pending_consolidations.append(PendingConsolidation(
|
||||
// source_index=source_index,
|
||||
// target_index=target_index
|
||||
// ))
|
||||
func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, reqs []*enginev1.ConsolidationRequest) error {
|
||||
ctx, span := trace.StartSpan(ctx, "requests.ProcessConsolidationRequests")
|
||||
defer span.End()
|
||||
|
||||
if len(reqs) == 0 || st == nil {
|
||||
return nil
|
||||
}
|
||||
curEpoch := slots.ToEpoch(st.Slot())
|
||||
ffe := params.BeaconConfig().FarFutureEpoch
|
||||
minValWithdrawDelay := params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
pcLimit := params.BeaconConfig().PendingConsolidationsLimit
|
||||
|
||||
for _, cr := range reqs {
|
||||
if cr == nil {
|
||||
return errors.New("nil consolidation request")
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return fmt.Errorf("cannot process consolidation requests: %w", ctx.Err())
|
||||
}
|
||||
|
||||
if isValidSwitchToCompoundingRequest(st, cr) {
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(cr.SourcePubkey))
|
||||
if !ok {
|
||||
log.Error("Failed to find source validator index")
|
||||
continue
|
||||
}
|
||||
if err := switchToCompoundingValidator(st, srcIdx); err != nil {
|
||||
log.WithError(err).Error("Failed to switch to compounding validator")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
sourcePubkey := bytesutil.ToBytes48(cr.SourcePubkey)
|
||||
targetPubkey := bytesutil.ToBytes48(cr.TargetPubkey)
|
||||
if sourcePubkey == targetPubkey {
|
||||
continue
|
||||
}
|
||||
|
||||
if npc, err := st.NumPendingConsolidations(); err != nil {
|
||||
return fmt.Errorf("failed to fetch number of pending consolidations: %w", err) // This should never happen.
|
||||
} else if npc >= pcLimit {
|
||||
continue
|
||||
}
|
||||
|
||||
activeBal, err := helpers.TotalActiveBalance(st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
churnLimit := helpers.ConsolidationChurnLimit(primitives.Gwei(activeBal))
|
||||
if churnLimit <= primitives.Gwei(params.BeaconConfig().MinActivationBalance) {
|
||||
continue
|
||||
}
|
||||
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(sourcePubkey)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
tgtIdx, ok := st.ValidatorIndexByPubkey(targetPubkey)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
srcV, err := st.ValidatorAtIndex(srcIdx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch source validator: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
roSrcV, err := state_native.NewValidator(srcV)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tgtV, err := st.ValidatorAtIndexReadOnly(tgtIdx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch target validator: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
// Verify source withdrawal credentials.
|
||||
if !roSrcV.HasExecutionWithdrawalCredentials() {
|
||||
continue
|
||||
}
|
||||
// Confirm source_validator.withdrawal_credentials[12:] == consolidation_request.source_address.
|
||||
if len(srcV.WithdrawalCredentials) != 32 || len(cr.SourceAddress) != 20 || !bytes.HasSuffix(srcV.WithdrawalCredentials, cr.SourceAddress) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Target validator must have their withdrawal credentials set appropriately.
|
||||
if !tgtV.HasCompoundingWithdrawalCredentials() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Both validators must be active.
|
||||
if !helpers.IsActiveValidator(srcV, curEpoch) || !helpers.IsActiveValidatorUsingTrie(tgtV, curEpoch) {
|
||||
continue
|
||||
}
|
||||
// Neither validator is exiting.
|
||||
if srcV.ExitEpoch != ffe || tgtV.ExitEpoch() != ffe {
|
||||
continue
|
||||
}
|
||||
|
||||
e, overflow := math.SafeAdd(uint64(srcV.ActivationEpoch), uint64(params.BeaconConfig().ShardCommitteePeriod))
|
||||
if overflow {
|
||||
log.Error("Overflow when adding activation epoch and shard committee period")
|
||||
continue
|
||||
}
|
||||
if uint64(curEpoch) < e {
|
||||
continue
|
||||
}
|
||||
|
||||
hasBal, err := st.HasPendingBalanceToWithdraw(srcIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
|
||||
continue
|
||||
}
|
||||
if hasBal {
|
||||
continue
|
||||
}
|
||||
|
||||
exitEpoch, err := computeConsolidationEpochAndUpdateChurn(st, primitives.Gwei(srcV.EffectiveBalance))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to compute consolidation epoch")
|
||||
continue
|
||||
}
|
||||
srcV.ExitEpoch = exitEpoch
|
||||
srcV.WithdrawableEpoch = exitEpoch + minValWithdrawDelay
|
||||
if err := st.UpdateValidatorAtIndex(srcIdx, srcV); err != nil {
|
||||
return fmt.Errorf("failed to update validator: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
if err := st.AppendPendingConsolidation(ð.PendingConsolidation{SourceIndex: srcIdx, TargetIndex: tgtIdx}); err != nil {
|
||||
return fmt.Errorf("failed to append pending consolidation: %w", err) // This should never happen.
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isValidSwitchToCompoundingRequest(st state.BeaconState, req *enginev1.ConsolidationRequest) bool {
|
||||
if req.SourcePubkey == nil || req.TargetPubkey == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if !bytes.Equal(req.SourcePubkey, req.TargetPubkey) {
|
||||
return false
|
||||
}
|
||||
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(req.SourcePubkey))
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
srcV, err := st.ValidatorAtIndexReadOnly(srcIdx)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
sourceAddress := req.SourceAddress
|
||||
withdrawalCreds := srcV.GetWithdrawalCredentials()
|
||||
if len(withdrawalCreds) != 32 || len(sourceAddress) != 20 || !bytes.HasSuffix(withdrawalCreds, sourceAddress) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !srcV.HasETH1WithdrawalCredentials() {
|
||||
return false
|
||||
}
|
||||
|
||||
curEpoch := slots.ToEpoch(st.Slot())
|
||||
if !helpers.IsActiveValidatorUsingTrie(srcV, curEpoch) {
|
||||
return false
|
||||
}
|
||||
|
||||
if srcV.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func switchToCompoundingValidator(st state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
v, err := st.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(v.WithdrawalCredentials) == 0 {
|
||||
return errors.New("validator has no withdrawal credentials")
|
||||
}
|
||||
|
||||
v.WithdrawalCredentials[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
if err := st.UpdateValidatorAtIndex(idx, v); err != nil {
|
||||
return err
|
||||
}
|
||||
return queueExcessActiveBalance(st, idx)
|
||||
}
|
||||
|
||||
func queueExcessActiveBalance(st state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
bal, err := st.BalanceAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bal > params.BeaconConfig().MinActivationBalance {
|
||||
if err := st.UpdateBalancesAtIndex(idx, params.BeaconConfig().MinActivationBalance); err != nil {
|
||||
return err
|
||||
}
|
||||
excessBalance := bal - params.BeaconConfig().MinActivationBalance
|
||||
val, err := st.ValidatorAtIndexReadOnly(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pk := val.PublicKey()
|
||||
return st.AppendPendingDeposit(ð.PendingDeposit{
|
||||
PublicKey: pk[:],
|
||||
WithdrawalCredentials: val.GetWithdrawalCredentials(),
|
||||
Amount: excessBalance,
|
||||
Signature: common.InfiniteSignature[:],
|
||||
Slot: params.BeaconConfig().GenesisSlot,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func computeConsolidationEpochAndUpdateChurn(st state.BeaconState, consolidationBalance primitives.Gwei) (primitives.Epoch, error) {
|
||||
earliestEpoch, err := st.EarliestConsolidationEpoch()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
earliestConsolidationEpoch := max(earliestEpoch, helpers.ActivationExitEpoch(slots.ToEpoch(st.Slot())))
|
||||
|
||||
activeBal, err := helpers.TotalActiveBalance(st)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
perEpochConsolidationChurn := helpers.ConsolidationChurnLimit(primitives.Gwei(activeBal))
|
||||
|
||||
var consolidationBalanceToConsume primitives.Gwei
|
||||
if earliestEpoch < earliestConsolidationEpoch {
|
||||
consolidationBalanceToConsume = perEpochConsolidationChurn
|
||||
} else {
|
||||
consolidationBalanceToConsume, err = st.ConsolidationBalanceToConsume()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if consolidationBalance > consolidationBalanceToConsume {
|
||||
balanceToProcess := consolidationBalance - consolidationBalanceToConsume
|
||||
additionalEpochs, err := prysmMath.Div64(uint64(balanceToProcess-1), uint64(perEpochConsolidationChurn))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
additionalEpochs++
|
||||
earliestConsolidationEpoch += primitives.Epoch(additionalEpochs)
|
||||
consolidationBalanceToConsume += primitives.Gwei(additionalEpochs) * perEpochConsolidationChurn
|
||||
}
|
||||
|
||||
if err := st.SetConsolidationBalanceToConsume(consolidationBalanceToConsume - consolidationBalance); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := st.SetEarliestConsolidationEpoch(earliestConsolidationEpoch); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return earliestConsolidationEpoch, nil
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package requests
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/core/requests")
|
||||
@@ -27,7 +27,6 @@ go_library(
|
||||
"//beacon-chain/core/execution:go_default_library",
|
||||
"//beacon-chain/core/fulu:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/requests:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition/interop:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
@@ -72,7 +71,6 @@ go_test(
|
||||
"state_test.go",
|
||||
"trailing_slot_state_cache_test.go",
|
||||
"transition_fuzz_test.go",
|
||||
"transition_gloas_test.go",
|
||||
"transition_no_verify_sig_test.go",
|
||||
"transition_test.go",
|
||||
],
|
||||
@@ -108,7 +106,6 @@ go_test(
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_stretchr_testify//require:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
coreRequests "github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
|
||||
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
@@ -108,7 +107,7 @@ func electraOperations(ctx context.Context, st state.BeaconState, block interfac
|
||||
return nil, electra.NewExecReqError("nil withdrawal request")
|
||||
}
|
||||
}
|
||||
st, err = coreRequests.ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
|
||||
st, err = electra.ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
|
||||
if err != nil {
|
||||
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process withdrawal requests").Error())
|
||||
}
|
||||
@@ -117,7 +116,7 @@ func electraOperations(ctx context.Context, st state.BeaconState, block interfac
|
||||
return nil, electra.NewExecReqError("nil consolidation request")
|
||||
}
|
||||
}
|
||||
if err := coreRequests.ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
|
||||
if err := electra.ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
|
||||
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process consolidation requests").Error())
|
||||
}
|
||||
return st, nil
|
||||
|
||||
@@ -142,18 +142,6 @@ func ProcessSlot(ctx context.Context, state state.BeaconState) (state.BeaconStat
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Spec v1.6.1 (pseudocode):
|
||||
// # [New in Gloas:EIP7732]
|
||||
// # Unset the next payload availability
|
||||
// state.execution_payload_availability[(state.slot + 1) % SLOTS_PER_HISTORICAL_ROOT] = 0b0
|
||||
if state.Version() >= version.Gloas {
|
||||
index := uint64((state.Slot() + 1) % params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
if err := state.UpdateExecutionPayloadAvailabilityAtIndex(index, 0x0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,141 +0,0 @@
|
||||
package transition
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestProcessSlot_GloasClearsNextPayloadAvailability(t *testing.T) {
|
||||
slot := primitives.Slot(10)
|
||||
cfg := params.BeaconConfig()
|
||||
nextIdx := uint64((slot + 1) % cfg.SlotsPerHistoricalRoot)
|
||||
byteIdx := nextIdx / 8
|
||||
bitMask := byte(1 << (nextIdx % 8))
|
||||
availability := bytes.Repeat([]byte{0xFF}, int(cfg.SlotsPerHistoricalRoot/8))
|
||||
st := newGloasState(t, slot, availability)
|
||||
|
||||
_, err := ProcessSlot(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
post := st.ToProto().(*ethpb.BeaconStateGloas)
|
||||
require.Equal(t, byte(0xFF)&^bitMask, post.ExecutionPayloadAvailability[byteIdx])
|
||||
}
|
||||
|
||||
func TestProcessSlot_GloasClearsNextPayloadAvailability_Wrap(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
slot := primitives.Slot(cfg.SlotsPerHistoricalRoot - 1)
|
||||
availability := bytes.Repeat([]byte{0xFF}, int(cfg.SlotsPerHistoricalRoot/8))
|
||||
st := newGloasState(t, slot, availability)
|
||||
|
||||
_, err := ProcessSlot(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
post := st.ToProto().(*ethpb.BeaconStateGloas)
|
||||
require.Equal(t, byte(0xFE), post.ExecutionPayloadAvailability[0])
|
||||
}
|
||||
|
||||
func TestProcessSlot_GloasAvailabilityUpdateError(t *testing.T) {
|
||||
slot := primitives.Slot(7)
|
||||
availability := make([]byte, 1)
|
||||
st := newGloasState(t, slot, availability)
|
||||
|
||||
_, err := ProcessSlot(context.Background(), st)
|
||||
cfg := params.BeaconConfig()
|
||||
idx := uint64((slot + 1) % cfg.SlotsPerHistoricalRoot)
|
||||
byteIdx := idx / 8
|
||||
require.EqualError(t, err, fmt.Sprintf(
|
||||
"bit index %d (byte index %d) out of range for execution payload availability length %d",
|
||||
idx, byteIdx, len(availability),
|
||||
))
|
||||
}
|
||||
|
||||
func newGloasState(t *testing.T, slot primitives.Slot, availability []byte) state.BeaconState {
|
||||
t.Helper()
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
protoState := ðpb.BeaconStateGloas{
|
||||
Slot: slot,
|
||||
LatestBlockHeader: testBeaconBlockHeader(),
|
||||
BlockRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
|
||||
StateRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
|
||||
RandaoMixes: make([][]byte, fieldparams.RandaoMixesLength),
|
||||
ExecutionPayloadAvailability: availability,
|
||||
BuilderPendingPayments: make([]*ethpb.BuilderPendingPayment, int(cfg.SlotsPerEpoch*2)),
|
||||
LatestExecutionPayloadBid: ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: make([]byte, 32),
|
||||
ParentBlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
PrevRandao: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitmentsRoot: make([]byte, 32),
|
||||
},
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
PreviousEpochParticipation: []byte{},
|
||||
CurrentEpochParticipation: []byte{},
|
||||
JustificationBits: []byte{0},
|
||||
PreviousJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
CurrentSyncCommittee: ðpb.SyncCommittee{},
|
||||
NextSyncCommittee: ðpb.SyncCommittee{},
|
||||
}
|
||||
|
||||
for i := range protoState.BlockRoots {
|
||||
protoState.BlockRoots[i] = make([]byte, 32)
|
||||
}
|
||||
for i := range protoState.StateRoots {
|
||||
protoState.StateRoots[i] = make([]byte, 32)
|
||||
}
|
||||
for i := range protoState.RandaoMixes {
|
||||
protoState.RandaoMixes[i] = make([]byte, 32)
|
||||
}
|
||||
|
||||
for i := range protoState.BuilderPendingPayments {
|
||||
protoState.BuilderPendingPayments[i] = ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pubkeys := make([][]byte, cfg.SyncCommitteeSize)
|
||||
for i := range pubkeys {
|
||||
pubkeys[i] = make([]byte, fieldparams.BLSPubkeyLength)
|
||||
}
|
||||
aggPubkey := make([]byte, fieldparams.BLSPubkeyLength)
|
||||
protoState.CurrentSyncCommittee = ðpb.SyncCommittee{
|
||||
Pubkeys: pubkeys,
|
||||
AggregatePubkey: aggPubkey,
|
||||
}
|
||||
protoState.NextSyncCommittee = ðpb.SyncCommittee{
|
||||
Pubkeys: pubkeys,
|
||||
AggregatePubkey: aggPubkey,
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoGloas(protoState)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, version.Gloas, st.Version())
|
||||
return st
|
||||
}
|
||||
|
||||
func testBeaconBlockHeader() *ethpb.BeaconBlockHeader {
|
||||
return ðpb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
@@ -512,11 +512,6 @@ func (dcs *DataColumnStorage) Get(root [fieldparams.RootLength]byte, indices []u
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidecars file path open")
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := file.Close(); closeErr != nil {
|
||||
log.WithError(closeErr).WithField("file", filePath).Error("Error closing file during Get")
|
||||
}
|
||||
}()
|
||||
|
||||
// Read file metadata.
|
||||
metadata, err := dcs.metadata(file)
|
||||
|
||||
@@ -7,7 +7,6 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v7/network/httputil"
|
||||
@@ -182,16 +181,6 @@ func prepareConfigSpec() (map[string]any, error) {
|
||||
data[tag] = convertValueForJSON(val, tag)
|
||||
}
|
||||
|
||||
// Add Fulu preset values. These are compile-time constants from fieldparams,
|
||||
// not runtime configs, but are required by the /eth/v1/config/spec API.
|
||||
data["NUMBER_OF_COLUMNS"] = convertValueForJSON(reflect.ValueOf(uint64(fieldparams.NumberOfColumns)), "NUMBER_OF_COLUMNS")
|
||||
data["CELLS_PER_EXT_BLOB"] = convertValueForJSON(reflect.ValueOf(uint64(fieldparams.NumberOfColumns)), "CELLS_PER_EXT_BLOB")
|
||||
data["FIELD_ELEMENTS_PER_CELL"] = convertValueForJSON(reflect.ValueOf(uint64(fieldparams.CellsPerBlob)), "FIELD_ELEMENTS_PER_CELL")
|
||||
data["FIELD_ELEMENTS_PER_EXT_BLOB"] = convertValueForJSON(reflect.ValueOf(config.FieldElementsPerBlob*2), "FIELD_ELEMENTS_PER_EXT_BLOB")
|
||||
data["KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH"] = convertValueForJSON(reflect.ValueOf(uint64(4)), "KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH")
|
||||
// UPDATE_TIMEOUT is derived from SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
data["UPDATE_TIMEOUT"] = convertValueForJSON(reflect.ValueOf(uint64(config.SlotsPerEpoch)*uint64(config.EpochsPerSyncCommitteePeriod)), "UPDATE_TIMEOUT")
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -132,7 +132,7 @@ func TestGetSpec(t *testing.T) {
|
||||
config.MinSyncCommitteeParticipants = 71
|
||||
config.ProposerReorgCutoffBPS = primitives.BP(121)
|
||||
config.AttestationDueBPS = primitives.BP(122)
|
||||
config.AggregateDueBPS = primitives.BP(123)
|
||||
config.AggregrateDueBPS = primitives.BP(123)
|
||||
config.ContributionDueBPS = primitives.BP(124)
|
||||
config.TerminalBlockHash = common.HexToHash("TerminalBlockHash")
|
||||
config.TerminalBlockHashActivationEpoch = 72
|
||||
@@ -170,8 +170,6 @@ func TestGetSpec(t *testing.T) {
|
||||
config.SyncMessageDueBPS = 103
|
||||
config.BuilderWithdrawalPrefixByte = byte('b')
|
||||
config.BuilderIndexSelfBuild = primitives.BuilderIndex(125)
|
||||
config.BuilderPaymentThresholdNumerator = 104
|
||||
config.BuilderPaymentThresholdDenominator = 105
|
||||
|
||||
var dbp [4]byte
|
||||
copy(dbp[:], []byte{'0', '0', '0', '1'})
|
||||
@@ -212,7 +210,7 @@ func TestGetSpec(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||
data, ok := resp.Data.(map[string]any)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, 186, len(data))
|
||||
assert.Equal(t, 178, len(data))
|
||||
for k, v := range data {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
switch k {
|
||||
@@ -470,7 +468,7 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "121", v)
|
||||
case "ATTESTATION_DUE_BPS":
|
||||
assert.Equal(t, "122", v)
|
||||
case "AGGREGATE_DUE_BPS":
|
||||
case "AGGREGRATE_DUE_BPS":
|
||||
assert.Equal(t, "123", v)
|
||||
case "CONTRIBUTION_DUE_BPS":
|
||||
assert.Equal(t, "124", v)
|
||||
@@ -590,26 +588,10 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "102", v)
|
||||
case "SYNC_MESSAGE_DUE_BPS":
|
||||
assert.Equal(t, "103", v)
|
||||
case "BUILDER_PAYMENT_THRESHOLD_NUMERATOR":
|
||||
assert.Equal(t, "104", v)
|
||||
case "BUILDER_PAYMENT_THRESHOLD_DENOMINATOR":
|
||||
assert.Equal(t, "105", v)
|
||||
case "BLOB_SCHEDULE":
|
||||
blobSchedule, ok := v.([]any)
|
||||
assert.Equal(t, true, ok)
|
||||
assert.Equal(t, 2, len(blobSchedule))
|
||||
case "FIELD_ELEMENTS_PER_CELL":
|
||||
assert.Equal(t, "64", v) // From fieldparams.CellsPerBlob
|
||||
case "FIELD_ELEMENTS_PER_EXT_BLOB":
|
||||
assert.Equal(t, "198", v) // FieldElementsPerBlob (99) * 2
|
||||
case "KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH":
|
||||
assert.Equal(t, "4", v) // Preset value
|
||||
case "CELLS_PER_EXT_BLOB":
|
||||
assert.Equal(t, "128", v) // From fieldparams.NumberOfColumns
|
||||
case "NUMBER_OF_COLUMNS":
|
||||
assert.Equal(t, "128", v) // From fieldparams.NumberOfColumns
|
||||
case "UPDATE_TIMEOUT":
|
||||
assert.Equal(t, "1782", v) // SlotsPerEpoch (27) * EpochsPerSyncCommitteePeriod (66)
|
||||
default:
|
||||
t.Errorf("Incorrect key: %s", k)
|
||||
}
|
||||
|
||||
@@ -9,10 +9,6 @@ import (
|
||||
type writeOnlyGloasFields interface {
|
||||
SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid) error
|
||||
SetBuilderPendingPayment(index primitives.Slot, payment *ethpb.BuilderPendingPayment) error
|
||||
ClearBuilderPendingPayment(index primitives.Slot) error
|
||||
RotateBuilderPendingPayments() error
|
||||
AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal) error
|
||||
UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val byte) error
|
||||
}
|
||||
|
||||
type readOnlyGloasFields interface {
|
||||
@@ -20,5 +16,4 @@ type readOnlyGloasFields interface {
|
||||
IsActiveBuilder(primitives.BuilderIndex) (bool, error)
|
||||
CanBuilderCoverBid(primitives.BuilderIndex, primitives.Gwei) (bool, error)
|
||||
LatestBlockHash() ([32]byte, error)
|
||||
BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error)
|
||||
}
|
||||
|
||||
@@ -135,15 +135,3 @@ func (b *BeaconState) builderPendingBalanceToWithdraw(builderIndex primitives.Bu
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
// BuilderPendingPayments returns a copy of the builder pending payments.
|
||||
func (b *BeaconState) BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error) {
|
||||
if b.version < version.Gloas {
|
||||
return nil, errNotSupported("BuilderPendingPayments", b.version)
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
return b.builderPendingPaymentsVal(), nil
|
||||
}
|
||||
|
||||
@@ -157,12 +157,3 @@ func TestBuilderHelpers(t *testing.T) {
|
||||
require.Equal(t, false, ok)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuilderPendingPayments_UnsupportedVersion(t *testing.T) {
|
||||
stIface, err := state_native.InitializeFromProtoElectra(ðpb.BeaconStateElectra{})
|
||||
require.NoError(t, err)
|
||||
st := stIface.(*state_native.BeaconState)
|
||||
|
||||
_, err = st.BuilderPendingPayments()
|
||||
require.ErrorContains(t, "BuilderPendingPayments", err)
|
||||
}
|
||||
|
||||
@@ -725,13 +725,3 @@ func ProtobufBeaconStateFulu(s any) (*ethpb.BeaconStateFulu, error) {
|
||||
}
|
||||
return pbState, nil
|
||||
}
|
||||
|
||||
// ProtobufBeaconStateGloas transforms an input into beacon state Gloas in the form of protobuf.
|
||||
// Error is returned if the input is not type protobuf beacon state.
|
||||
func ProtobufBeaconStateGloas(s any) (*ethpb.BeaconStateGloas, error) {
|
||||
pbState, ok := s.(*ethpb.BeaconStateGloas)
|
||||
if !ok {
|
||||
return nil, errors.New("input is not type pb.BeaconStateGloas")
|
||||
}
|
||||
return pbState, nil
|
||||
}
|
||||
|
||||
@@ -4,71 +4,12 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
)
|
||||
|
||||
// RotateBuilderPendingPayments rotates the queue by dropping slots per epoch payments from the
|
||||
// front and appending slots per epoch empty payments to the end.
|
||||
// This implements: state.builder_pending_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:] + [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
|
||||
func (b *BeaconState) RotateBuilderPendingPayments() error {
|
||||
if b.version < version.Gloas {
|
||||
return errNotSupported("RotateBuilderPendingPayments", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
copy(b.builderPendingPayments[:slotsPerEpoch], b.builderPendingPayments[slotsPerEpoch:2*slotsPerEpoch])
|
||||
|
||||
for i := slotsPerEpoch; i < primitives.Slot(len(b.builderPendingPayments)); i++ {
|
||||
b.builderPendingPayments[i] = emptyBuilderPendingPayment
|
||||
}
|
||||
|
||||
b.markFieldAsDirty(types.BuilderPendingPayments)
|
||||
b.rebuildTrie[types.BuilderPendingPayments] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// emptyBuilderPendingPayment is a shared zero-value payment used to clear entries.
|
||||
var emptyBuilderPendingPayment = ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
},
|
||||
}
|
||||
|
||||
// AppendBuilderPendingWithdrawals appends builder pending withdrawals to the beacon state.
|
||||
// If the withdrawals slice is shared, it copies the slice first to preserve references.
|
||||
func (b *BeaconState) AppendBuilderPendingWithdrawals(withdrawals []*ethpb.BuilderPendingWithdrawal) error {
|
||||
if b.version < version.Gloas {
|
||||
return errNotSupported("AppendBuilderPendingWithdrawals", b.version)
|
||||
}
|
||||
|
||||
if len(withdrawals) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
pendingWithdrawals := b.builderPendingWithdrawals
|
||||
if b.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs() > 1 {
|
||||
pendingWithdrawals = make([]*ethpb.BuilderPendingWithdrawal, 0, len(b.builderPendingWithdrawals)+len(withdrawals))
|
||||
pendingWithdrawals = append(pendingWithdrawals, b.builderPendingWithdrawals...)
|
||||
b.sharedFieldReferences[types.BuilderPendingWithdrawals].MinusRef()
|
||||
b.sharedFieldReferences[types.BuilderPendingWithdrawals] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
b.builderPendingWithdrawals = append(pendingWithdrawals, withdrawals...)
|
||||
b.markFieldAsDirty(types.BuilderPendingWithdrawals)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetExecutionPayloadBid sets the latest execution payload bid in the state.
|
||||
func (b *BeaconState) SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid) error {
|
||||
if b.version < version.Gloas {
|
||||
@@ -102,25 +43,6 @@ func (b *BeaconState) SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearBuilderPendingPayment clears a builder pending payment at the specified index.
|
||||
func (b *BeaconState) ClearBuilderPendingPayment(index primitives.Slot) error {
|
||||
if b.version < version.Gloas {
|
||||
return errNotSupported("ClearBuilderPendingPayment", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if uint64(index) >= uint64(len(b.builderPendingPayments)) {
|
||||
return fmt.Errorf("builder pending payments index %d out of range (len=%d)", index, len(b.builderPendingPayments))
|
||||
}
|
||||
|
||||
b.builderPendingPayments[index] = emptyBuilderPendingPayment
|
||||
|
||||
b.markFieldAsDirty(types.BuilderPendingPayments)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetBuilderPendingPayment sets a builder pending payment at the specified index.
|
||||
func (b *BeaconState) SetBuilderPendingPayment(index primitives.Slot, payment *ethpb.BuilderPendingPayment) error {
|
||||
if b.version < version.Gloas {
|
||||
@@ -139,25 +61,3 @@ func (b *BeaconState) SetBuilderPendingPayment(index primitives.Slot, payment *e
|
||||
b.markFieldAsDirty(types.BuilderPendingPayments)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateExecutionPayloadAvailabilityAtIndex updates the execution payload availability bit at a specific index.
|
||||
func (b *BeaconState) UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val byte) error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
byteIndex := idx / 8
|
||||
bitIndex := idx % 8
|
||||
|
||||
if byteIndex >= uint64(len(b.executionPayloadAvailability)) {
|
||||
return fmt.Errorf("bit index %d (byte index %d) out of range for execution payload availability length %d", idx, byteIndex, len(b.executionPayloadAvailability))
|
||||
}
|
||||
|
||||
if val != 0 {
|
||||
b.executionPayloadAvailability[byteIndex] |= (1 << bitIndex)
|
||||
} else {
|
||||
b.executionPayloadAvailability[byteIndex] &^= (1 << bitIndex)
|
||||
}
|
||||
|
||||
b.markFieldAsDirty(types.ExecutionPayloadAvailability)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
@@ -140,191 +138,3 @@ func TestSetBuilderPendingPayment(t *testing.T) {
|
||||
require.Equal(t, false, st.dirtyFields[types.BuilderPendingPayments])
|
||||
})
|
||||
}
|
||||
|
||||
func TestClearBuilderPendingPayment(t *testing.T) {
|
||||
t.Run("previous fork returns expected error", func(t *testing.T) {
|
||||
st := &BeaconState{version: version.Fulu}
|
||||
err := st.ClearBuilderPendingPayment(0)
|
||||
require.ErrorContains(t, "is not supported", err)
|
||||
})
|
||||
|
||||
t.Run("clears and marks dirty", func(t *testing.T) {
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
builderPendingPayments: make([]*ethpb.BuilderPendingPayment, 2),
|
||||
}
|
||||
st.builderPendingPayments[1] = ðpb.BuilderPendingPayment{
|
||||
Weight: 2,
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
Amount: 99,
|
||||
BuilderIndex: 1,
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, st.ClearBuilderPendingPayment(1))
|
||||
require.Equal(t, emptyBuilderPendingPayment, st.builderPendingPayments[1])
|
||||
require.Equal(t, true, st.dirtyFields[types.BuilderPendingPayments])
|
||||
})
|
||||
|
||||
t.Run("returns error on out of range index", func(t *testing.T) {
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
builderPendingPayments: make([]*ethpb.BuilderPendingPayment, 1),
|
||||
}
|
||||
|
||||
err := st.ClearBuilderPendingPayment(2)
|
||||
|
||||
require.ErrorContains(t, "out of range", err)
|
||||
require.Equal(t, false, st.dirtyFields[types.BuilderPendingPayments])
|
||||
})
|
||||
}
|
||||
|
||||
func TestRotateBuilderPendingPayments(t *testing.T) {
|
||||
totalPayments := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
payments := make([]*ethpb.BuilderPendingPayment, totalPayments)
|
||||
for i := range payments {
|
||||
idx := uint64(i)
|
||||
payments[i] = ðpb.BuilderPendingPayment{
|
||||
Weight: primitives.Gwei(idx * 100e9),
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
Amount: primitives.Gwei(idx * 1e9),
|
||||
BuilderIndex: primitives.BuilderIndex(idx + 100),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
statePb, err := InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{
|
||||
BuilderPendingPayments: payments,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
st, ok := statePb.(*BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
oldPayments, err := st.BuilderPendingPayments()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.RotateBuilderPendingPayments())
|
||||
|
||||
newPayments, err := st.BuilderPendingPayments()
|
||||
require.NoError(t, err)
|
||||
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := range slotsPerEpoch {
|
||||
require.DeepEqual(t, oldPayments[slotsPerEpoch+i], newPayments[i])
|
||||
}
|
||||
|
||||
for i := slotsPerEpoch; i < 2*slotsPerEpoch; i++ {
|
||||
payment := newPayments[i]
|
||||
require.Equal(t, primitives.Gwei(0), payment.Weight)
|
||||
require.Equal(t, 20, len(payment.Withdrawal.FeeRecipient))
|
||||
require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
|
||||
require.Equal(t, primitives.BuilderIndex(0), payment.Withdrawal.BuilderIndex)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRotateBuilderPendingPayments_UnsupportedVersion(t *testing.T) {
|
||||
st := &BeaconState{version: version.Electra}
|
||||
err := st.RotateBuilderPendingPayments()
|
||||
require.ErrorContains(t, "RotateBuilderPendingPayments", err)
|
||||
}
|
||||
|
||||
func TestAppendBuilderPendingWithdrawal_CopyOnWrite(t *testing.T) {
|
||||
wd := ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
Amount: 1,
|
||||
BuilderIndex: 2,
|
||||
}
|
||||
statePb, err := InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{
|
||||
BuilderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{wd},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
st, ok := statePb.(*BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
copied := st.Copy().(*BeaconState)
|
||||
require.Equal(t, uint(2), st.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs())
|
||||
|
||||
appended := ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
Amount: 4,
|
||||
BuilderIndex: 5,
|
||||
}
|
||||
require.NoError(t, copied.AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal{appended}))
|
||||
|
||||
require.Equal(t, 1, len(st.builderPendingWithdrawals))
|
||||
require.Equal(t, 2, len(copied.builderPendingWithdrawals))
|
||||
require.DeepEqual(t, wd, copied.builderPendingWithdrawals[0])
|
||||
require.DeepEqual(t, appended, copied.builderPendingWithdrawals[1])
|
||||
require.DeepEqual(t, wd, st.builderPendingWithdrawals[0])
|
||||
require.Equal(t, uint(1), st.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs())
|
||||
require.Equal(t, uint(1), copied.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs())
|
||||
}
|
||||
|
||||
func TestAppendBuilderPendingWithdrawals(t *testing.T) {
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
|
||||
types.BuilderPendingWithdrawals: stateutil.NewRef(1),
|
||||
},
|
||||
builderPendingWithdrawals: make([]*ethpb.BuilderPendingWithdrawal, 0),
|
||||
}
|
||||
|
||||
first := ðpb.BuilderPendingWithdrawal{Amount: 1}
|
||||
second := ðpb.BuilderPendingWithdrawal{Amount: 2}
|
||||
require.NoError(t, st.AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal{first, second}))
|
||||
|
||||
require.Equal(t, 2, len(st.builderPendingWithdrawals))
|
||||
require.DeepEqual(t, first, st.builderPendingWithdrawals[0])
|
||||
require.DeepEqual(t, second, st.builderPendingWithdrawals[1])
|
||||
require.Equal(t, true, st.dirtyFields[types.BuilderPendingWithdrawals])
|
||||
}
|
||||
|
||||
func TestAppendBuilderPendingWithdrawals_UnsupportedVersion(t *testing.T) {
|
||||
st := &BeaconState{version: version.Electra}
|
||||
err := st.AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal{{}})
|
||||
require.ErrorContains(t, "AppendBuilderPendingWithdrawals", err)
|
||||
}
|
||||
|
||||
func TestUpdateExecutionPayloadAvailabilityAtIndex_SetAndClear(t *testing.T) {
|
||||
st := newGloasStateWithAvailability(t, make([]byte, 1024))
|
||||
|
||||
otherIdx := uint64(8) // byte 1, bit 0
|
||||
idx := uint64(9) // byte 1, bit 1
|
||||
|
||||
require.NoError(t, st.UpdateExecutionPayloadAvailabilityAtIndex(otherIdx, 1))
|
||||
require.Equal(t, byte(0x01), st.executionPayloadAvailability[1])
|
||||
|
||||
require.NoError(t, st.UpdateExecutionPayloadAvailabilityAtIndex(idx, 1))
|
||||
require.Equal(t, byte(0x03), st.executionPayloadAvailability[1])
|
||||
|
||||
require.NoError(t, st.UpdateExecutionPayloadAvailabilityAtIndex(idx, 0))
|
||||
require.Equal(t, byte(0x01), st.executionPayloadAvailability[1])
|
||||
}
|
||||
|
||||
func TestUpdateExecutionPayloadAvailabilityAtIndex_OutOfRange(t *testing.T) {
|
||||
st := newGloasStateWithAvailability(t, make([]byte, 1024))
|
||||
|
||||
idx := uint64(len(st.executionPayloadAvailability)) * 8
|
||||
err := st.UpdateExecutionPayloadAvailabilityAtIndex(idx, 1)
|
||||
require.ErrorContains(t, "out of range", err)
|
||||
|
||||
for _, b := range st.executionPayloadAvailability {
|
||||
if b != 0 {
|
||||
t.Fatalf("execution payload availability mutated on error")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newGloasStateWithAvailability(t *testing.T, availability []byte) *BeaconState {
|
||||
t.Helper()
|
||||
|
||||
st, err := InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{
|
||||
ExecutionPayloadAvailability: availability,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
return st.(*BeaconState)
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ go_library(
|
||||
"//beacon-chain/state:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
|
||||
@@ -3,7 +3,7 @@ package testing
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -31,7 +31,7 @@ func GeneratePendingDeposit(t *testing.T, key common.SecretKey, amount uint64, w
|
||||
Amount: dm.Amount,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
valid, err := helpers.IsValidDepositSignature(depositData)
|
||||
valid, err := blocks.IsValidDepositSignature(depositData)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, valid)
|
||||
return ðpb.PendingDeposit{
|
||||
|
||||
@@ -4,6 +4,9 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
|
||||
@@ -53,6 +56,32 @@ func (s *Service) verifierRoutine() {
|
||||
}
|
||||
}
|
||||
|
||||
// A routine that runs in the background to perform batch
|
||||
// KZG verifications by draining the channel and processing all pending requests.
|
||||
func (s *Service) kzgVerifierRoutine() {
|
||||
for {
|
||||
kzgBatch := make([]*kzgVerifier, 0, 1)
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case kzg := <-s.kzgChan:
|
||||
kzgBatch = append(kzgBatch, kzg)
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case kzg := <-s.kzgChan:
|
||||
kzgBatch = append(kzgBatch, kzg)
|
||||
continue
|
||||
default:
|
||||
verifyKzgBatch(kzgBatch)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) validateWithBatchVerifier(ctx context.Context, message string, set *bls.SignatureBatch) (pubsub.ValidationResult, error) {
|
||||
_, span := trace.StartSpan(ctx, "sync.validateWithBatchVerifier")
|
||||
defer span.End()
|
||||
@@ -125,3 +154,71 @@ func performBatchAggregation(aggSet *bls.SignatureBatch) (*bls.SignatureBatch, e
|
||||
}
|
||||
return aggSet, nil
|
||||
}
|
||||
|
||||
func (s *Service) validateWithKzgBatchVerifier(ctx context.Context, dataColumns []blocks.RODataColumn) (pubsub.ValidationResult, error) {
|
||||
_, span := trace.StartSpan(ctx, "sync.validateWithKzgBatchVerifier")
|
||||
defer span.End()
|
||||
|
||||
timeout := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
|
||||
resChan := make(chan error, 1)
|
||||
verificationSet := &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
select {
|
||||
case s.kzgChan <- verificationSet:
|
||||
case <-ctx.Done():
|
||||
return pubsub.ValidationIgnore, ctx.Err()
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return pubsub.ValidationIgnore, ctx.Err() // parent context canceled, give up
|
||||
case err := <-resChan:
|
||||
if err != nil {
|
||||
log.WithError(err).Trace("Could not perform batch verification")
|
||||
tracing.AnnotateError(span, err)
|
||||
return s.validateUnbatchedColumnsKzg(ctx, dataColumns)
|
||||
}
|
||||
}
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
func (s *Service) validateUnbatchedColumnsKzg(ctx context.Context, columns []blocks.RODataColumn) (pubsub.ValidationResult, error) {
|
||||
_, span := trace.StartSpan(ctx, "sync.validateUnbatchedColumnsKzg")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
if err := peerdas.VerifyDataColumnsSidecarKZGProofs(columns); err != nil {
|
||||
err = errors.Wrap(err, "could not verify")
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
verification.DataColumnBatchKZGVerificationHistogram.WithLabelValues("fallback").Observe(float64(time.Since(start).Milliseconds()))
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
func verifyKzgBatch(kzgBatch []*kzgVerifier) {
|
||||
if len(kzgBatch) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
allDataColumns := make([]blocks.RODataColumn, 0, len(kzgBatch))
|
||||
for _, kzgVerifier := range kzgBatch {
|
||||
allDataColumns = append(allDataColumns, kzgVerifier.dataColumns...)
|
||||
}
|
||||
|
||||
var verificationErr error
|
||||
start := time.Now()
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(allDataColumns)
|
||||
if err != nil {
|
||||
verificationErr = errors.Wrap(err, "batch KZG verification failed")
|
||||
} else {
|
||||
verification.DataColumnBatchKZGVerificationHistogram.WithLabelValues("batch").Observe(float64(time.Since(start).Milliseconds()))
|
||||
}
|
||||
|
||||
// Send the same result to all verifiers in the batch
|
||||
for _, verifier := range kzgBatch {
|
||||
verifier.resChan <- verificationErr
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,339 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
)
|
||||
|
||||
func TestValidateWithKzgBatchVerifier(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
dataColumns []blocks.RODataColumn
|
||||
expectedResult pubsub.ValidationResult
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "single valid data column",
|
||||
dataColumns: createValidTestDataColumns(t, 1),
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "multiple valid data columns",
|
||||
dataColumns: createValidTestDataColumns(t, 3),
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "single invalid data column",
|
||||
dataColumns: createInvalidTestDataColumns(t, 1),
|
||||
expectedResult: pubsub.ValidationReject,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "empty data column slice",
|
||||
dataColumns: []blocks.RODataColumn{},
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
result, err := service.validateWithKzgBatchVerifier(ctx, tt.dataColumns)
|
||||
|
||||
require.Equal(t, tt.expectedResult, result)
|
||||
if tt.expectError {
|
||||
assert.NotNil(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifierRoutine(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("processes single request", func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
dataColumns := createValidTestDataColumns(t, 1)
|
||||
resChan := make(chan error, 1)
|
||||
service.kzgChan <- &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
|
||||
|
||||
select {
|
||||
case err := <-resChan:
|
||||
require.NoError(t, err)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("timeout waiting for verification result")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("batches multiple requests", func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
const numRequests = 5
|
||||
resChans := make([]chan error, numRequests)
|
||||
|
||||
for i := range numRequests {
|
||||
dataColumns := createValidTestDataColumns(t, 1)
|
||||
resChan := make(chan error, 1)
|
||||
resChans[i] = resChan
|
||||
service.kzgChan <- &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
|
||||
}
|
||||
|
||||
for i := range numRequests {
|
||||
select {
|
||||
case err := <-resChans[i]:
|
||||
require.NoError(t, err)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("timeout waiting for verification result %d", i)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("context cancellation stops routine", func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
|
||||
routineDone := make(chan struct{})
|
||||
go func() {
|
||||
service.kzgVerifierRoutine()
|
||||
close(routineDone)
|
||||
}()
|
||||
|
||||
cancel()
|
||||
|
||||
select {
|
||||
case <-routineDone:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("timeout waiting for routine to exit")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestVerifyKzgBatch(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("all valid data columns succeed", func(t *testing.T) {
|
||||
dataColumns := createValidTestDataColumns(t, 3)
|
||||
resChan := make(chan error, 1)
|
||||
kzgVerifiers := []*kzgVerifier{{dataColumns: dataColumns, resChan: resChan}}
|
||||
|
||||
verifyKzgBatch(kzgVerifiers)
|
||||
|
||||
select {
|
||||
case err := <-resChan:
|
||||
require.NoError(t, err)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("timeout waiting for batch verification")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("invalid proofs fail entire batch", func(t *testing.T) {
|
||||
validColumns := createValidTestDataColumns(t, 1)
|
||||
invalidColumns := createInvalidTestDataColumns(t, 1)
|
||||
allColumns := append(validColumns, invalidColumns...)
|
||||
|
||||
resChan := make(chan error, 1)
|
||||
kzgVerifiers := []*kzgVerifier{{dataColumns: allColumns, resChan: resChan}}
|
||||
|
||||
verifyKzgBatch(kzgVerifiers)
|
||||
|
||||
select {
|
||||
case err := <-resChan:
|
||||
assert.NotNil(t, err)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("timeout waiting for batch verification")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("empty batch handling", func(t *testing.T) {
|
||||
verifyKzgBatch([]*kzgVerifier{})
|
||||
})
|
||||
}
|
||||
|
||||
func TestKzgBatchVerifierConcurrency(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
const numGoroutines = 10
|
||||
const numRequestsPerGoroutine = 5
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(numGoroutines)
|
||||
|
||||
// Multiple goroutines sending verification requests simultaneously
|
||||
for i := range numGoroutines {
|
||||
go func(goroutineID int) {
|
||||
defer wg.Done()
|
||||
|
||||
for range numRequestsPerGoroutine {
|
||||
dataColumns := createValidTestDataColumns(t, 1)
|
||||
result, err := service.validateWithKzgBatchVerifier(ctx, dataColumns)
|
||||
require.Equal(t, pubsub.ValidationAccept, result)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestKzgBatchVerifierFallback(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("fallback handles mixed valid/invalid batch correctly", func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
validColumns := createValidTestDataColumns(t, 1)
|
||||
invalidColumns := createInvalidTestDataColumns(t, 1)
|
||||
|
||||
result, err := service.validateWithKzgBatchVerifier(ctx, validColumns)
|
||||
require.Equal(t, pubsub.ValidationAccept, result)
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err = service.validateWithKzgBatchVerifier(ctx, invalidColumns)
|
||||
require.Equal(t, pubsub.ValidationReject, result)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("empty data columns fallback", func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
result, err := service.validateWithKzgBatchVerifier(ctx, []blocks.RODataColumn{})
|
||||
require.Equal(t, pubsub.ValidationAccept, result)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestValidateWithKzgBatchVerifier_DeadlockOnTimeout(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.SecondsPerSlot = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
result, err := service.validateWithKzgBatchVerifier(context.Background(), nil)
|
||||
require.Equal(t, pubsub.ValidationIgnore, result)
|
||||
require.ErrorIs(t, err, context.DeadlineExceeded)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
_, _ = service.validateWithKzgBatchVerifier(context.Background(), nil)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
t.Fatal("validateWithKzgBatchVerifier blocked")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateWithKzgBatchVerifier_ContextCanceledBeforeSend(t *testing.T) {
|
||||
cancelledCtx, cancel := context.WithCancel(t.Context())
|
||||
cancel()
|
||||
|
||||
service := &Service{
|
||||
ctx: context.Background(),
|
||||
kzgChan: make(chan *kzgVerifier),
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
result, err := service.validateWithKzgBatchVerifier(cancelledCtx, nil)
|
||||
require.Equal(t, pubsub.ValidationIgnore, result)
|
||||
require.ErrorIs(t, err, context.Canceled)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
t.Fatal("validateWithKzgBatchVerifier did not return after context cancellation")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-service.kzgChan:
|
||||
t.Fatal("verificationSet was sent to kzgChan despite canceled context")
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func createValidTestDataColumns(t *testing.T, count int) []blocks.RODataColumn {
|
||||
_, roSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, count)
|
||||
if len(roSidecars) >= count {
|
||||
|
||||
@@ -168,6 +168,7 @@ type Service struct {
|
||||
syncContributionBitsOverlapLock sync.RWMutex
|
||||
syncContributionBitsOverlapCache *lru.Cache
|
||||
signatureChan chan *signatureVerifier
|
||||
kzgChan chan *kzgVerifier
|
||||
clockWaiter startup.ClockWaiter
|
||||
initialSyncComplete chan struct{}
|
||||
verifierWaiter *verification.InitializerWaiter
|
||||
@@ -208,7 +209,10 @@ func NewService(ctx context.Context, opts ...Option) *Service {
|
||||
}
|
||||
// Initialize signature channel with configured limit
|
||||
r.signatureChan = make(chan *signatureVerifier, r.cfg.batchVerifierLimit)
|
||||
|
||||
// Initialize KZG channel with fixed buffer size of 100.
|
||||
// This buffer size is designed to handle burst traffic of data column gossip messages:
|
||||
// - Data columns arrive less frequently than attestations (default batchVerifierLimit=1000)
|
||||
r.kzgChan = make(chan *kzgVerifier, 100)
|
||||
// Correctly remove it from our seen pending block map.
|
||||
// The eviction method always assumes that the mutex is held.
|
||||
r.slotToPendingBlocks.OnEvicted(func(s string, i any) {
|
||||
@@ -261,6 +265,7 @@ func (s *Service) Start() {
|
||||
s.newColumnsVerifier = newDataColumnsVerifierFromInitializer(v)
|
||||
|
||||
go s.verifierRoutine()
|
||||
go s.kzgVerifierRoutine()
|
||||
go s.startDiscoveryAndSubscriptions()
|
||||
go s.processDataColumnLogs()
|
||||
|
||||
|
||||
@@ -144,9 +144,12 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
}
|
||||
|
||||
// [REJECT] The sidecar's column data is valid as verified by `verify_data_column_sidecar_kzg_proofs(sidecar)`.
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
validationResult, err := s.validateWithKzgBatchVerifier(ctx, roDataColumns)
|
||||
if validationResult != pubsub.ValidationAccept {
|
||||
return validationResult, err
|
||||
}
|
||||
// Mark KZG verification as satisfied since we did it via batch verifier
|
||||
verifier.SatisfyRequirement(verification.RequireSidecarKzgProofVerified)
|
||||
|
||||
// [IGNORE] The sidecar is the first sidecar for the tuple `(block_header.slot, block_header.proposer_index, sidecar.index)`
|
||||
// with valid header signature, sidecar inclusion proof, and kzg proof.
|
||||
|
||||
@@ -71,7 +71,10 @@ func TestValidateDataColumn(t *testing.T) {
|
||||
ctx: ctx,
|
||||
newColumnsVerifier: newDataColumnsVerifier,
|
||||
seenDataColumnCache: newSlotAwareCache(seenDataColumnSize),
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
// Start the KZG verifier routine for batch verification
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
// Encode a `beaconBlock` message instead of expected.
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
### Ignored
|
||||
|
||||
- Add `NewBeaconStateGloas()`.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- Flag `--log.vmodule` to set per-package verbosity levels for logging.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Fixed
|
||||
- Fixed a typo: AggregrateDueBPS -> AggregateDueBPS.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Fixed
|
||||
|
||||
- Prevent authentication bypass on direct `/v2/validator/*` endpoints by enforcing auth checks for non-public routes.
|
||||
7
changelog/james-prysm_grpc-fallback.md
Normal file
7
changelog/james-prysm_grpc-fallback.md
Normal file
@@ -0,0 +1,7 @@
|
||||
### Changed
|
||||
|
||||
- gRPC fallback now matches rest api implementation and will also check and connect to only synced nodes.
|
||||
|
||||
### Removed
|
||||
|
||||
- gRPC resolver for load balancing, the new implementation matches rest api's so we should remove the resolver so it's handled the same way for consistency.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Ignored
|
||||
|
||||
- delayed head evaluator check to mid epoch for e2e.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Ignored
|
||||
|
||||
- updating phase 0 constants for ethspecify
|
||||
@@ -1,3 +0,0 @@
|
||||
### Removed
|
||||
|
||||
- Batching of KZG verification for incoming via gossip data column sidecars
|
||||
@@ -1,2 +0,0 @@
|
||||
### Removed
|
||||
- `--disable-get-blobs-v2` flag from help.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Added
|
||||
- Close opened file in data_column.go
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- Added missing beacon config in fulu so that the presets don't go missing in /eth/v1/config/spec beacon api.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Added
|
||||
- Implement modified proposer slashing for gloas
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- Add slot processing with execution payload availability updates
|
||||
@@ -1,2 +0,0 @@
|
||||
### Added
|
||||
- add pending payments processing and quorum threshold, plus spectests and state hooks (rotate/append)
|
||||
@@ -1,2 +0,0 @@
|
||||
### Ignored
|
||||
- Move withdrawal/consolidation request processing into `beacon-chain/core/requests` to avoid fork/package dependency cycles.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Ignored
|
||||
- Move deposit helpers from `beacon-chain/core/blocks` to `beacon-chain/core/helpers` (refactor only).
|
||||
@@ -42,7 +42,6 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@org_uber_go_mock//gomock:go_default_library",
|
||||
],
|
||||
|
||||
@@ -364,8 +364,7 @@ var (
|
||||
}
|
||||
// DisableGetBlobsV2 disables the engine_getBlobsV2 usage.
|
||||
DisableGetBlobsV2 = &cli.BoolFlag{
|
||||
Name: "disable-get-blobs-v2",
|
||||
Usage: "Disables the engine_getBlobsV2 usage.",
|
||||
Hidden: true,
|
||||
Name: "disable-get-blobs-v2",
|
||||
Usage: "Disables the engine_getBlobsV2 usage.",
|
||||
}
|
||||
)
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
runtimeDebug "runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/builder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/node"
|
||||
@@ -114,7 +113,6 @@ var appFlags = []cli.Flag{
|
||||
cmd.PubsubQueueSize,
|
||||
cmd.DataDirFlag,
|
||||
cmd.VerbosityFlag,
|
||||
cmd.LogVModuleFlag,
|
||||
cmd.EnableTracingFlag,
|
||||
cmd.TracingProcessNameFlag,
|
||||
cmd.TracingEndpointFlag,
|
||||
@@ -173,22 +171,13 @@ func before(ctx *cli.Context) error {
|
||||
return errors.Wrap(err, "failed to load flags from config file")
|
||||
}
|
||||
|
||||
// determine default log verbosity
|
||||
// determine log verbosity
|
||||
verbosity := ctx.String(cmd.VerbosityFlag.Name)
|
||||
verbosityLevel, err := logrus.ParseLevel(verbosity)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse log verbosity")
|
||||
}
|
||||
|
||||
// determine per package verbosity. if not set, maxLevel will be 0.
|
||||
vmoduleInput := strings.Join(ctx.StringSlice(cmd.LogVModuleFlag.Name), ",")
|
||||
vmodule, maxLevel, err := cmd.ParseVModule(vmoduleInput)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse log vmodule")
|
||||
}
|
||||
|
||||
// set the global logging level to allow for the highest verbosity requested
|
||||
logs.SetLoggingLevel(max(verbosityLevel, maxLevel))
|
||||
logs.SetLoggingLevel(verbosityLevel)
|
||||
|
||||
format := ctx.String(cmd.LogFormat.Name)
|
||||
switch format {
|
||||
@@ -202,13 +191,11 @@ func before(ctx *cli.Context) error {
|
||||
formatter.FullTimestamp = true
|
||||
formatter.ForceFormatting = true
|
||||
formatter.ForceColors = true
|
||||
formatter.VModule = vmodule
|
||||
formatter.BaseVerbosity = verbosityLevel
|
||||
|
||||
logrus.AddHook(&logs.WriterHook{
|
||||
Formatter: formatter,
|
||||
Writer: os.Stderr,
|
||||
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
|
||||
AllowedLevels: logrus.AllLevels[:verbosityLevel+1],
|
||||
})
|
||||
case "fluentd":
|
||||
f := joonix.NewFormatter()
|
||||
@@ -232,7 +219,7 @@ func before(ctx *cli.Context) error {
|
||||
|
||||
logFileName := ctx.String(cmd.LogFileName.Name)
|
||||
if logFileName != "" {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, verbosityLevel, vmodule); err != nil {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, verbosityLevel); err != nil {
|
||||
log.WithError(err).Error("Failed to configuring logging to disk.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -169,6 +169,7 @@ var appHelpFlagGroups = []flagGroup{
|
||||
flags.ExecutionJWTSecretFlag,
|
||||
flags.JwtId,
|
||||
flags.InteropMockEth1DataVotesFlag,
|
||||
flags.DisableGetBlobsV2,
|
||||
},
|
||||
},
|
||||
{ // Flags relevant to configuring beacon chain monitoring.
|
||||
@@ -200,7 +201,6 @@ var appHelpFlagGroups = []flagGroup{
|
||||
cmd.LogFileName,
|
||||
cmd.VerbosityFlag,
|
||||
flags.DisableEphemeralLogFile,
|
||||
cmd.LogVModuleFlag,
|
||||
},
|
||||
},
|
||||
{ // Feature flags.
|
||||
|
||||
@@ -86,7 +86,7 @@ func main() {
|
||||
|
||||
logFileName := ctx.String(cmd.LogFileName.Name)
|
||||
if logFileName != "" {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, level, map[string]logrus.Level{}); err != nil {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, level); err != nil {
|
||||
log.WithError(err).Error("Failed to configuring logging to disk.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,11 +36,6 @@ var (
|
||||
Usage: "Logging verbosity. (trace, debug, info, warn, error, fatal, panic)",
|
||||
Value: "info",
|
||||
}
|
||||
// LogVModuleFlag defines per-package log levels.
|
||||
LogVModuleFlag = &cli.StringSliceFlag{
|
||||
Name: "log.vmodule",
|
||||
Usage: "Per-package log verbosity. packagePath=level entries separated by commas.",
|
||||
}
|
||||
// DataDirFlag defines a path on disk where Prysm databases are stored.
|
||||
DataDirFlag = &cli.StringFlag{
|
||||
Name: "datadir",
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
@@ -238,41 +237,3 @@ func TestValidateNoArgs_SubcommandFlags(t *testing.T) {
|
||||
err = app.Run([]string{"command", "bar", "subComm2", "--barfoo100", "garbage", "subComm4"})
|
||||
require.ErrorContains(t, "unrecognized argument: garbage", err)
|
||||
}
|
||||
|
||||
func TestParseVModule(t *testing.T) {
|
||||
t.Run("valid", func(t *testing.T) {
|
||||
input := "beacon-chain/p2p=error, beacon-chain/light-client=trace"
|
||||
parsed, maxL, err := ParseVModule(input)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, logrus.ErrorLevel, parsed["beacon-chain/p2p"])
|
||||
require.Equal(t, logrus.TraceLevel, parsed["beacon-chain/light-client"])
|
||||
require.Equal(t, logrus.TraceLevel, maxL)
|
||||
})
|
||||
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
parsed, maxL, err := ParseVModule(" ")
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, parsed)
|
||||
require.Equal(t, logrus.PanicLevel, maxL)
|
||||
})
|
||||
|
||||
t.Run("invalid", func(t *testing.T) {
|
||||
tests := []string{
|
||||
"beacon-chain/p2p",
|
||||
"beacon-chain/p2p=",
|
||||
"beacon-chain/p2p/=error",
|
||||
"=info",
|
||||
"beacon-chain/*=info",
|
||||
"beacon-chain/p2p=meow",
|
||||
"/beacon-chain/p2p=info",
|
||||
"beacon-chain/../p2p=info",
|
||||
"beacon-chain/p2p=info,",
|
||||
"beacon-chain/p2p=info,beacon-chain/p2p=debug",
|
||||
}
|
||||
for _, input := range tests {
|
||||
_, maxL, err := ParseVModule(input)
|
||||
require.ErrorContains(t, "invalid", err)
|
||||
require.Equal(t, logrus.PanicLevel, maxL)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
@@ -103,63 +102,3 @@ func ExpandSingleEndpointIfFile(ctx *cli.Context, flag *cli.StringFlag) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseVModule parses a comma-separated list of package=level entries.
|
||||
func ParseVModule(input string) (map[string]logrus.Level, logrus.Level, error) {
|
||||
var l logrus.Level
|
||||
trimmed := strings.TrimSpace(input)
|
||||
if trimmed == "" {
|
||||
return nil, l, nil
|
||||
}
|
||||
|
||||
parts := strings.Split(trimmed, ",")
|
||||
result := make(map[string]logrus.Level, len(parts))
|
||||
for _, raw := range parts {
|
||||
entry := strings.TrimSpace(raw)
|
||||
if entry == "" {
|
||||
return nil, l, fmt.Errorf("invalid vmodule entry: empty segment")
|
||||
}
|
||||
kv := strings.Split(entry, "=")
|
||||
if len(kv) != 2 {
|
||||
return nil, l, fmt.Errorf("invalid vmodule entry %q: expected path=level", entry)
|
||||
}
|
||||
pkg := strings.TrimSpace(kv[0])
|
||||
levelText := strings.TrimSpace(kv[1])
|
||||
if pkg == "" {
|
||||
return nil, l, fmt.Errorf("invalid vmodule entry %q: empty package path", entry)
|
||||
}
|
||||
if levelText == "" {
|
||||
return nil, l, fmt.Errorf("invalid vmodule entry %q: empty level", entry)
|
||||
}
|
||||
if strings.Contains(pkg, "*") {
|
||||
return nil, l, fmt.Errorf("invalid vmodule package path %q: wildcards are not allowed", pkg)
|
||||
}
|
||||
if strings.ContainsAny(pkg, " \t\n") {
|
||||
return nil, l, fmt.Errorf("invalid vmodule package path %q: whitespace is not allowed", pkg)
|
||||
}
|
||||
if strings.HasPrefix(pkg, "/") {
|
||||
return nil, l, fmt.Errorf("invalid vmodule package path %q: leading slash is not allowed", pkg)
|
||||
}
|
||||
cleaned := path.Clean(pkg)
|
||||
if cleaned != pkg || pkg == "." || pkg == ".." {
|
||||
return nil, l, fmt.Errorf("invalid vmodule package path %q: must be an absolute package path. (trailing slash not allowed)", pkg)
|
||||
}
|
||||
if _, exists := result[pkg]; exists {
|
||||
return nil, l, fmt.Errorf("invalid vmodule package path %q: duplicate entry", pkg)
|
||||
}
|
||||
level, err := logrus.ParseLevel(levelText)
|
||||
if err != nil {
|
||||
return nil, l, fmt.Errorf("invalid vmodule level %q: must be one of panic, fatal, error, warn, info, debug, trace", levelText)
|
||||
}
|
||||
result[pkg] = level
|
||||
}
|
||||
|
||||
maxLevel := logrus.PanicLevel
|
||||
for _, lvl := range result {
|
||||
if lvl > maxLevel {
|
||||
maxLevel = lvl
|
||||
}
|
||||
}
|
||||
|
||||
return result, maxLevel, nil
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
runtimeDebug "runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/cmd"
|
||||
accountcommands "github.com/OffchainLabs/prysm/v7/cmd/validator/accounts"
|
||||
@@ -96,7 +95,6 @@ var appFlags = []cli.Flag{
|
||||
cmd.MinimalConfigFlag,
|
||||
cmd.E2EConfigFlag,
|
||||
cmd.VerbosityFlag,
|
||||
cmd.LogVModuleFlag,
|
||||
cmd.DataDirFlag,
|
||||
cmd.ClearDB,
|
||||
cmd.ForceClearDB,
|
||||
@@ -152,22 +150,13 @@ func main() {
|
||||
return err
|
||||
}
|
||||
|
||||
// determine default log verbosity
|
||||
// determine log verbosity
|
||||
verbosity := ctx.String(cmd.VerbosityFlag.Name)
|
||||
verbosityLevel, err := logrus.ParseLevel(verbosity)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse log verbosity")
|
||||
}
|
||||
|
||||
// determine per package verbosity. if not set, maxLevel will be 0.
|
||||
vmoduleInput := strings.Join(ctx.StringSlice(cmd.LogVModuleFlag.Name), ",")
|
||||
vmodule, maxLevel, err := cmd.ParseVModule(vmoduleInput)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse log vmodule")
|
||||
}
|
||||
|
||||
// set the global logging level to allow for the highest verbosity requested
|
||||
logs.SetLoggingLevel(max(maxLevel, verbosityLevel))
|
||||
logs.SetLoggingLevel(verbosityLevel)
|
||||
|
||||
logFileName := ctx.String(cmd.LogFileName.Name)
|
||||
|
||||
@@ -183,13 +172,11 @@ func main() {
|
||||
formatter.FullTimestamp = true
|
||||
formatter.ForceFormatting = true
|
||||
formatter.ForceColors = true
|
||||
formatter.VModule = vmodule
|
||||
formatter.BaseVerbosity = verbosityLevel
|
||||
|
||||
logrus.AddHook(&logs.WriterHook{
|
||||
Formatter: formatter,
|
||||
Writer: os.Stderr,
|
||||
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
|
||||
AllowedLevels: logrus.AllLevels[:verbosityLevel+1],
|
||||
})
|
||||
case "fluentd":
|
||||
f := joonix.NewFormatter()
|
||||
@@ -210,7 +197,7 @@ func main() {
|
||||
}
|
||||
|
||||
if logFileName != "" {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, verbosityLevel, vmodule); err != nil {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, verbosityLevel); err != nil {
|
||||
log.WithError(err).Error("Failed to configuring logging to disk.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,7 +76,6 @@ var appHelpFlagGroups = []flagGroup{
|
||||
cmd.AcceptTosFlag,
|
||||
cmd.ApiTimeoutFlag,
|
||||
flags.DisableEphemeralLogFile,
|
||||
cmd.LogVModuleFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -88,7 +88,7 @@ type BeaconChainConfig struct {
|
||||
IntervalsPerSlot uint64 `yaml:"INTERVALS_PER_SLOT"` // IntervalsPerSlot defines the number of fork choice intervals in a slot defined in the fork choice spec.
|
||||
ProposerReorgCutoffBPS primitives.BP `yaml:"PROPOSER_REORG_CUTOFF_BPS" spec:"true"` // ProposerReorgCutoffBPS defines the proposer reorg deadline in basis points of the slot.
|
||||
AttestationDueBPS primitives.BP `yaml:"ATTESTATION_DUE_BPS" spec:"true"` // AttestationDueBPS defines the attestation due time in basis points of the slot.
|
||||
AggregateDueBPS primitives.BP `yaml:"AGGREGATE_DUE_BPS" spec:"true"` // AggregateDueBPS defines the aggregate due time in basis points of the slot.
|
||||
AggregrateDueBPS primitives.BP `yaml:"AGGREGRATE_DUE_BPS" spec:"true"` // AggregrateDueBPS defines the aggregate due time in basis points of the slot.
|
||||
SyncMessageDueBPS primitives.BP `yaml:"SYNC_MESSAGE_DUE_BPS" spec:"true"` // SyncMessageDueBPS defines the sync message due time in basis points of the slot.
|
||||
ContributionDueBPS primitives.BP `yaml:"CONTRIBUTION_DUE_BPS" spec:"true"` // ContributionDueBPS defines the contribution due time in basis points of the slot.
|
||||
|
||||
@@ -293,10 +293,6 @@ type BeaconChainConfig struct {
|
||||
ValidatorCustodyRequirement uint64 `yaml:"VALIDATOR_CUSTODY_REQUIREMENT" spec:"true"` // ValidatorCustodyRequirement is the minimum number of custody groups an honest node with validators attached custodies and serves samples from
|
||||
BalancePerAdditionalCustodyGroup uint64 `yaml:"BALANCE_PER_ADDITIONAL_CUSTODY_GROUP" spec:"true"` // BalancePerAdditionalCustodyGroup is the balance increment corresponding to one additional group to custody.
|
||||
|
||||
// Values introduced in Gloas upgrade
|
||||
BuilderPaymentThresholdNumerator uint64 `yaml:"BUILDER_PAYMENT_THRESHOLD_NUMERATOR" spec:"true"` // BuilderPaymentThresholdNumerator is the numerator for builder payment quorum threshold calculation.
|
||||
BuilderPaymentThresholdDenominator uint64 `yaml:"BUILDER_PAYMENT_THRESHOLD_DENOMINATOR" spec:"true"` // BuilderPaymentThresholdDenominator is the denominator for builder payment quorum threshold calculation.
|
||||
|
||||
// Networking Specific Parameters
|
||||
MaxPayloadSize uint64 `yaml:"MAX_PAYLOAD_SIZE" spec:"true"` // MAX_PAYLOAD_SIZE is the maximum allowed size of uncompressed payload in gossip messages and rpc chunks.
|
||||
AttestationSubnetCount uint64 `yaml:"ATTESTATION_SUBNET_COUNT" spec:"true"` // AttestationSubnetCount is the number of attestation subnets used in the gossipsub protocol.
|
||||
|
||||
@@ -243,7 +243,7 @@ func ConfigToYaml(cfg *BeaconChainConfig) []byte {
|
||||
fmt.Sprintf("MAX_BLOBS_PER_BLOCK: %d", cfg.DeprecatedMaxBlobsPerBlock),
|
||||
fmt.Sprintf("PROPOSER_REORG_CUTOFF_BPS: %d", cfg.ProposerReorgCutoffBPS),
|
||||
fmt.Sprintf("ATTESTATION_DUE_BPS: %d", cfg.AttestationDueBPS),
|
||||
fmt.Sprintf("AGGREGATE_DUE_BPS: %d", cfg.AggregateDueBPS),
|
||||
fmt.Sprintf("AGGREGRATE_DUE_BPS: %d", cfg.AggregrateDueBPS),
|
||||
fmt.Sprintf("SYNC_MESSAGE_DUE_BPS: %d", cfg.SyncMessageDueBPS),
|
||||
fmt.Sprintf("CONTRIBUTION_DUE_BPS: %d", cfg.ContributionDueBPS),
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
// These are variables that we don't use in Prysm. (i.e. future hardfork, light client... etc)
|
||||
// IMPORTANT: Use one field per line and sort these alphabetically to reduce conflicts.
|
||||
var placeholderFields = []string{
|
||||
"AGGREGATE_DUE_BPS",
|
||||
"AGGREGATE_DUE_BPS_GLOAS",
|
||||
"ATTESTATION_DEADLINE",
|
||||
"ATTESTATION_DUE_BPS_GLOAS",
|
||||
@@ -98,7 +99,7 @@ func assertEqualConfigs(t *testing.T, name string, fields []string, expected, ac
|
||||
assert.Equal(t, expected.HysteresisDownwardMultiplier, actual.HysteresisDownwardMultiplier, "%s: HysteresisDownwardMultiplier", name)
|
||||
assert.Equal(t, expected.HysteresisUpwardMultiplier, actual.HysteresisUpwardMultiplier, "%s: HysteresisUpwardMultiplier", name)
|
||||
assert.Equal(t, expected.AttestationDueBPS, actual.AttestationDueBPS, "%s: AttestationDueBPS", name)
|
||||
assert.Equal(t, expected.AggregateDueBPS, actual.AggregateDueBPS, "%s: AggregateDueBPS", name)
|
||||
assert.Equal(t, expected.AggregrateDueBPS, actual.AggregrateDueBPS, "%s: AggregrateDueBPS", name)
|
||||
assert.Equal(t, expected.ContributionDueBPS, actual.ContributionDueBPS, "%s: ContributionDueBPS", name)
|
||||
assert.Equal(t, expected.ProposerReorgCutoffBPS, actual.ProposerReorgCutoffBPS, "%s: ProposerReorgCutoffBPS", name)
|
||||
assert.Equal(t, expected.SyncMessageDueBPS, actual.SyncMessageDueBPS, "%s: SyncMessageDueBPS", name)
|
||||
|
||||
@@ -123,7 +123,7 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
// Time-based protocol parameters.
|
||||
ProposerReorgCutoffBPS: primitives.BP(1667),
|
||||
AttestationDueBPS: primitives.BP(3333),
|
||||
AggregateDueBPS: primitives.BP(6667),
|
||||
AggregrateDueBPS: primitives.BP(6667),
|
||||
SyncMessageDueBPS: primitives.BP(3333),
|
||||
ContributionDueBPS: primitives.BP(6667),
|
||||
|
||||
@@ -331,11 +331,6 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
MinEpochsForDataColumnSidecarsRequest: 4096,
|
||||
ValidatorCustodyRequirement: 8,
|
||||
BalancePerAdditionalCustodyGroup: 32_000_000_000,
|
||||
|
||||
// Values related to gloas
|
||||
BuilderPaymentThresholdNumerator: 6,
|
||||
BuilderPaymentThresholdDenominator: 10,
|
||||
|
||||
// Values related to networking parameters.
|
||||
MaxPayloadSize: 10 * 1 << 20, // 10 MiB
|
||||
AttestationSubnetCount: 64,
|
||||
|
||||
@@ -30,7 +30,7 @@ func addLogWriter(w io.Writer) {
|
||||
}
|
||||
|
||||
// ConfigurePersistentLogging adds a log-to-file writer. File content is identical to stdout.
|
||||
func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Level, vmodule map[string]logrus.Level) error {
|
||||
func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Level) error {
|
||||
logrus.WithField("logFileName", logFileName).Info("Logs will be made persistent")
|
||||
if err := file.MkdirAll(filepath.Dir(logFileName)); err != nil {
|
||||
return err
|
||||
@@ -47,13 +47,6 @@ func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Le
|
||||
return nil
|
||||
}
|
||||
|
||||
maxVmoduleLevel := logrus.PanicLevel
|
||||
for _, level := range vmodule {
|
||||
if level > maxVmoduleLevel {
|
||||
maxVmoduleLevel = level
|
||||
}
|
||||
}
|
||||
|
||||
// Create formatter and writer hook
|
||||
formatter := new(prefixed.TextFormatter)
|
||||
formatter.TimestampFormat = "2006-01-02 15:04:05.00"
|
||||
@@ -61,13 +54,11 @@ func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Le
|
||||
// If persistent log files are written - we disable the log messages coloring because
|
||||
// the colors are ANSI codes and seen as gibberish in the log files.
|
||||
formatter.DisableColors = true
|
||||
formatter.BaseVerbosity = lvl
|
||||
formatter.VModule = vmodule
|
||||
|
||||
logrus.AddHook(&WriterHook{
|
||||
Formatter: formatter,
|
||||
Writer: f,
|
||||
AllowedLevels: logrus.AllLevels[:max(lvl, maxVmoduleLevel)+1],
|
||||
AllowedLevels: logrus.AllLevels[:lvl+1],
|
||||
})
|
||||
|
||||
logrus.Info("File logging initialized")
|
||||
|
||||
@@ -28,20 +28,20 @@ func TestMaskCredentialsLogging(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigurePersistentLogging(t *testing.T) {
|
||||
func TestConfigurePersistantLogging(t *testing.T) {
|
||||
testParentDir := t.TempDir()
|
||||
|
||||
// 1. Test creation of file in an existing parent directory
|
||||
logFileName := "test.log"
|
||||
existingDirectory := "test-1-existing-testing-dir"
|
||||
|
||||
err := ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, existingDirectory, logFileName), "text", logrus.InfoLevel, map[string]logrus.Level{})
|
||||
err := ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, existingDirectory, logFileName), "text", logrus.InfoLevel)
|
||||
require.NoError(t, err)
|
||||
|
||||
// 2. Test creation of file along with parent directory
|
||||
nonExistingDirectory := "test-2-non-existing-testing-dir"
|
||||
|
||||
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, nonExistingDirectory, logFileName), "text", logrus.InfoLevel, map[string]logrus.Level{})
|
||||
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, nonExistingDirectory, logFileName), "text", logrus.InfoLevel)
|
||||
require.NoError(t, err)
|
||||
|
||||
// 3. Test creation of file in an existing parent directory with a non-existing sub-directory
|
||||
@@ -52,7 +52,7 @@ func TestConfigurePersistentLogging(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s/%s", testParentDir, existingDirectory, nonExistingSubDirectory, logFileName), "text", logrus.InfoLevel, map[string]logrus.Level{})
|
||||
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s/%s", testParentDir, existingDirectory, nonExistingSubDirectory, logFileName), "text", logrus.InfoLevel)
|
||||
require.NoError(t, err)
|
||||
|
||||
//4. Create log file in a directory without 700 permissions
|
||||
|
||||
@@ -120,13 +120,6 @@ type TextFormatter struct {
|
||||
|
||||
// Timestamp format to use for display when a full timestamp is printed.
|
||||
TimestampFormat string
|
||||
|
||||
// VModule overrides the allowed log level for exact package paths.
|
||||
// When using VModule, you should also set BaseVerbosity to the default verbosity level provided by the user.
|
||||
VModule map[string]logrus.Level
|
||||
|
||||
// BaseVerbosity is the default verbosity level for all packages.
|
||||
BaseVerbosity logrus.Level
|
||||
}
|
||||
|
||||
func getCompiledColor(main string, fallback string) func(string) string {
|
||||
@@ -175,11 +168,6 @@ func (f *TextFormatter) SetColorScheme(colorScheme *ColorScheme) {
|
||||
}
|
||||
|
||||
func (f *TextFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||
// check for vmodule compatibility
|
||||
if !f.shouldLog(entry) {
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
var b *bytes.Buffer
|
||||
keys := make([]string, 0, len(entry.Data))
|
||||
for k := range entry.Data {
|
||||
@@ -248,39 +236,6 @@ func (f *TextFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func (f *TextFormatter) shouldLog(entry *logrus.Entry) bool {
|
||||
if len(f.VModule) == 0 {
|
||||
return true
|
||||
}
|
||||
packagePath, ok := entry.Data["package"]
|
||||
if !ok {
|
||||
return entry.Level <= f.BaseVerbosity
|
||||
}
|
||||
packageName, ok := packagePath.(string)
|
||||
if !ok {
|
||||
return entry.Level <= f.BaseVerbosity
|
||||
}
|
||||
|
||||
packageLevel := f.bestMatchLevel(packageName)
|
||||
|
||||
return entry.Level <= packageLevel
|
||||
}
|
||||
|
||||
// bestMatchLevel returns the level of the most specific path that matches package name.
|
||||
func (f *TextFormatter) bestMatchLevel(pkg string) logrus.Level {
|
||||
bestLen := 0
|
||||
bestLevel := f.BaseVerbosity
|
||||
for k, v := range f.VModule {
|
||||
if k == pkg || strings.HasPrefix(pkg, k+"/") {
|
||||
if len(k) > bestLen {
|
||||
bestLen = len(k)
|
||||
bestLevel = v
|
||||
}
|
||||
}
|
||||
}
|
||||
return bestLevel
|
||||
}
|
||||
|
||||
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *logrus.Entry, keys []string, timestampFormat string, colorScheme *compiledColorScheme) (err error) {
|
||||
var levelColor func(string) string
|
||||
var levelText string
|
||||
|
||||
@@ -13,6 +13,10 @@ specrefs:
|
||||
|
||||
exceptions:
|
||||
presets:
|
||||
# Not implemented
|
||||
- CELLS_PER_EXT_BLOB#fulu
|
||||
- UPDATE_TIMEOUT#altair
|
||||
|
||||
# Not implemented: gloas (future fork)
|
||||
- BUILDER_PENDING_WITHDRAWALS_LIMIT#gloas
|
||||
- MAX_PAYLOAD_ATTESTATIONS#gloas
|
||||
@@ -61,6 +65,14 @@ exceptions:
|
||||
- PTC_TIMELINESS_INDEX#gloas
|
||||
|
||||
configs:
|
||||
# Not implemented (placeholders)
|
||||
- AGGREGATE_DUE_BPS#phase0
|
||||
- ATTESTATION_DUE_BPS#phase0
|
||||
- CONTRIBUTION_DUE_BPS#altair
|
||||
- PROPOSER_REORG_CUTOFF_BPS#phase0
|
||||
- SLOT_DURATION_MS#phase0
|
||||
- SYNC_MESSAGE_DUE_BPS#altair
|
||||
|
||||
# Not implemented: gloas (future fork)
|
||||
- AGGREGATE_DUE_BPS_GLOAS#gloas
|
||||
- ATTESTATION_DUE_BPS_GLOAS#gloas
|
||||
@@ -411,8 +423,10 @@ exceptions:
|
||||
- update_proposer_boost_root#phase0
|
||||
|
||||
presets:
|
||||
- CELLS_PER_EXT_BLOB#fulu
|
||||
- BUILDER_PENDING_WITHDRAWALS_LIMIT#gloas
|
||||
- BUILDER_REGISTRY_LIMIT#gloas
|
||||
- MAX_BUILDERS_PER_WITHDRAWALS_SWEEP#gloas
|
||||
- MAX_PAYLOAD_ATTESTATIONS#gloas
|
||||
- PTC_SIZE#gloas
|
||||
- UPDATE_TIMEOUT#altair
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
- name: AGGREGATE_DUE_BPS
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
search: AggregateDueBPS\s+primitives.BP
|
||||
regex: true
|
||||
sources: []
|
||||
spec: |
|
||||
<spec config_var="AGGREGATE_DUE_BPS" fork="phase0" hash="7eaa811a">
|
||||
AGGREGATE_DUE_BPS: uint64 = 6667
|
||||
@@ -29,10 +26,7 @@
|
||||
</spec>
|
||||
|
||||
- name: ATTESTATION_DUE_BPS
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
search: AttestationDueBPS\s+primitives.BP
|
||||
regex: true
|
||||
sources: []
|
||||
spec: |
|
||||
<spec config_var="ATTESTATION_DUE_BPS" fork="phase0" hash="929dd1c9">
|
||||
ATTESTATION_DUE_BPS: uint64 = 3333
|
||||
@@ -178,10 +172,7 @@
|
||||
</spec>
|
||||
|
||||
- name: CONTRIBUTION_DUE_BPS
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
search: ContributionDueBPS\s+primitives.BP
|
||||
regex: true
|
||||
sources: []
|
||||
spec: |
|
||||
<spec config_var="CONTRIBUTION_DUE_BPS" fork="altair" hash="a3808203">
|
||||
CONTRIBUTION_DUE_BPS: uint64 = 6667
|
||||
@@ -558,10 +549,7 @@
|
||||
</spec>
|
||||
|
||||
- name: PROPOSER_REORG_CUTOFF_BPS
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
search: ProposerReorgCutoffBPS\s+primitives.BP
|
||||
regex: true
|
||||
sources: []
|
||||
spec: |
|
||||
<spec config_var="PROPOSER_REORG_CUTOFF_BPS" fork="phase0" hash="a487cc43">
|
||||
PROPOSER_REORG_CUTOFF_BPS: uint64 = 1667
|
||||
@@ -648,10 +636,7 @@
|
||||
</spec>
|
||||
|
||||
- name: SLOT_DURATION_MS
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
search: SlotDurationMilliseconds\s+uint64
|
||||
regex: true
|
||||
sources: []
|
||||
spec: |
|
||||
<spec config_var="SLOT_DURATION_MS" fork="phase0" hash="b6d4ba6d">
|
||||
SLOT_DURATION_MS: uint64 = 12000
|
||||
@@ -668,10 +653,7 @@
|
||||
</spec>
|
||||
|
||||
- name: SYNC_MESSAGE_DUE_BPS
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
search: SyncMessageDueBPS\s+primitives.BP
|
||||
regex: true
|
||||
sources: []
|
||||
spec: |
|
||||
<spec config_var="SYNC_MESSAGE_DUE_BPS" fork="altair" hash="791b29d8">
|
||||
SYNC_MESSAGE_DUE_BPS: uint64 = 3333
|
||||
|
||||
@@ -4230,7 +4230,7 @@
|
||||
sources:
|
||||
- file: beacon-chain/core/electra/deposits.go
|
||||
search: func IsValidDepositSignature(
|
||||
- file: beacon-chain/core/helpers/deposit.go
|
||||
- file: beacon-chain/core/blocks/deposit.go
|
||||
search: func IsValidDepositSignature(
|
||||
spec: |
|
||||
<spec fn="is_valid_deposit_signature" fork="electra" hash="7347b754">
|
||||
@@ -5388,7 +5388,7 @@
|
||||
|
||||
- name: process_consolidation_request
|
||||
sources:
|
||||
- file: beacon-chain/core/requests/consolidations.go
|
||||
- file: beacon-chain/core/electra/consolidations.go
|
||||
search: func ProcessConsolidationRequests(
|
||||
spec: |
|
||||
<spec fn="process_consolidation_request" fork="electra" hash="4da4b0fb">
|
||||
@@ -6993,7 +6993,7 @@
|
||||
|
||||
- name: process_withdrawal_request
|
||||
sources:
|
||||
- file: beacon-chain/core/requests/withdrawals.go
|
||||
- file: beacon-chain/core/electra/withdrawals.go
|
||||
search: func ProcessWithdrawalRequests(
|
||||
spec: |
|
||||
<spec fn="process_withdrawal_request" fork="electra" hash="c21a0a53">
|
||||
|
||||
@@ -19,10 +19,7 @@
|
||||
</spec>
|
||||
|
||||
- name: CELLS_PER_EXT_BLOB
|
||||
sources:
|
||||
- file: beacon-chain/rpc/eth/config/handlers.go
|
||||
search: data\["CELLS_PER_EXT_BLOB"\]
|
||||
regex: true
|
||||
sources: []
|
||||
spec: |
|
||||
<spec preset_var="CELLS_PER_EXT_BLOB" fork="fulu" hash="217075f3">
|
||||
CELLS_PER_EXT_BLOB = 128
|
||||
@@ -688,16 +685,6 @@
|
||||
TARGET_COMMITTEE_SIZE: uint64 = 128
|
||||
</spec>
|
||||
|
||||
- name: UPDATE_TIMEOUT
|
||||
sources:
|
||||
- file: beacon-chain/rpc/eth/config/handlers.go
|
||||
search: data\["UPDATE_TIMEOUT"\]
|
||||
regex: true
|
||||
spec: |
|
||||
<spec preset_var="UPDATE_TIMEOUT" fork="altair" hash="e633d57e">
|
||||
UPDATE_TIMEOUT = 8192
|
||||
</spec>
|
||||
|
||||
- name: VALIDATOR_REGISTRY_LIMIT
|
||||
sources:
|
||||
- file: config/fieldparams/mainnet.go
|
||||
|
||||
@@ -296,7 +296,7 @@ func (r *testRunner) waitForMatchingHead(ctx context.Context, timeout time.Durat
|
||||
}
|
||||
|
||||
func (r *testRunner) testCheckpointSync(ctx context.Context, g *errgroup.Group, i int, conns []*grpc.ClientConn, bnAPI, enr, minerEnr string) error {
|
||||
matchTimeout := 5 * time.Minute
|
||||
matchTimeout := 3 * time.Minute
|
||||
ethNode := eth1.NewNode(i, minerEnr)
|
||||
g.Go(func() error {
|
||||
return ethNode.Start(ctx)
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
e2e "github.com/OffchainLabs/prysm/v7/testing/endtoend/params"
|
||||
@@ -129,42 +128,8 @@ func finishedSyncing(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) e
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForMidEpoch waits until we're at least halfway into the current epoch
|
||||
// and 3/4 into the current slot. This prevents race conditions at epoch
|
||||
// boundaries and slot boundaries where different nodes may report different heads.
|
||||
func waitForMidEpoch(conn *grpc.ClientConn) error {
|
||||
beaconClient := eth.NewBeaconChainClient(conn)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
|
||||
midEpochSlot := slotsPerEpoch / 2
|
||||
|
||||
for {
|
||||
chainHead, err := beaconClient.GetChainHead(context.Background(), &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
slotInEpoch := chainHead.HeadSlot % slotsPerEpoch
|
||||
// If we're at least halfway into the epoch, we're safe
|
||||
if slotInEpoch >= midEpochSlot {
|
||||
// Wait 3/4 into the slot to ensure block propagation
|
||||
time.Sleep(time.Duration(secondsPerSlot) * time.Second * 3 / 4)
|
||||
return nil
|
||||
}
|
||||
// Wait for the remaining slots until mid-epoch
|
||||
slotsToWait := midEpochSlot - slotInEpoch
|
||||
time.Sleep(time.Duration(slotsToWait) * time.Duration(secondsPerSlot) * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func allNodesHaveSameHead(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) error {
|
||||
// Wait until we're at least halfway into the epoch to avoid race conditions
|
||||
// at epoch boundaries where nodes may report different epochs.
|
||||
if err := waitForMidEpoch(conns[0]); err != nil {
|
||||
return errors.Wrap(err, "failed waiting for mid-epoch")
|
||||
}
|
||||
|
||||
headEpochs := make([]primitives.Epoch, len(conns))
|
||||
headBlockRoots := make([][]byte, len(conns))
|
||||
justifiedRoots := make([][]byte, len(conns))
|
||||
prevJustifiedRoots := make([][]byte, len(conns))
|
||||
finalizedRoots := make([][]byte, len(conns))
|
||||
@@ -181,7 +146,6 @@ func allNodesHaveSameHead(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientCo
|
||||
return errors.Wrapf(err, "connection number=%d", conIdx)
|
||||
}
|
||||
headEpochs[conIdx] = chainHead.HeadEpoch
|
||||
headBlockRoots[conIdx] = chainHead.HeadBlockRoot
|
||||
justifiedRoots[conIdx] = chainHead.JustifiedBlockRoot
|
||||
prevJustifiedRoots[conIdx] = chainHead.PreviousJustifiedBlockRoot
|
||||
finalizedRoots[conIdx] = chainHead.FinalizedBlockRoot
|
||||
@@ -202,14 +166,6 @@ func allNodesHaveSameHead(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientCo
|
||||
headEpochs[i],
|
||||
)
|
||||
}
|
||||
if !bytes.Equal(headBlockRoots[0], headBlockRoots[i]) {
|
||||
return fmt.Errorf(
|
||||
"received conflicting head block roots on node %d, expected %#x, received %#x",
|
||||
i,
|
||||
headBlockRoots[0],
|
||||
headBlockRoots[i],
|
||||
)
|
||||
}
|
||||
if !bytes.Equal(justifiedRoots[0], justifiedRoots[i]) {
|
||||
return fmt.Errorf(
|
||||
"received conflicting justified block roots on node %d, expected %#x, received %#x: %s and %s",
|
||||
|
||||
@@ -200,10 +200,7 @@ go_test(
|
||||
"fulu__sanity__blocks_test.go",
|
||||
"fulu__sanity__slots_test.go",
|
||||
"fulu__ssz_static__ssz_static_test.go",
|
||||
"gloas__epoch_processing__process_builder_pending_payments_test.go",
|
||||
"gloas__operations__execution_payload_header_test.go",
|
||||
"gloas__operations__proposer_slashing_test.go",
|
||||
"gloas__sanity__slots_test.go",
|
||||
"gloas__ssz_static__ssz_static_test.go",
|
||||
"phase0__epoch_processing__effective_balance_updates_test.go",
|
||||
"phase0__epoch_processing__epoch_processing_test.go",
|
||||
@@ -282,9 +279,7 @@ go_test(
|
||||
"//testing/spectest/shared/fulu/rewards:go_default_library",
|
||||
"//testing/spectest/shared/fulu/sanity:go_default_library",
|
||||
"//testing/spectest/shared/fulu/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/gloas/epoch_processing:go_default_library",
|
||||
"//testing/spectest/shared/gloas/operations:go_default_library",
|
||||
"//testing/spectest/shared/gloas/sanity:go_default_library",
|
||||
"//testing/spectest/shared/gloas/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/phase0/epoch_processing:go_default_library",
|
||||
"//testing/spectest/shared/phase0/finality:go_default_library",
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
package mainnet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/epoch_processing"
|
||||
)
|
||||
|
||||
func TestMainnet_Gloas_EpochProcessing_ProcessBuilderPendingPayments(t *testing.T) {
|
||||
epoch_processing.RunBuilderPendingPaymentsTests(t, "mainnet")
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
package mainnet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations"
|
||||
)
|
||||
|
||||
func TestMainnet_Gloas_Operations_ProposerSlashing(t *testing.T) {
|
||||
operations.RunProposerSlashingTest(t, "mainnet")
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
package mainnet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/sanity"
|
||||
)
|
||||
|
||||
func TestMainnet_Gloas_Sanity_Slots(t *testing.T) {
|
||||
sanity.RunSlotProcessingTests(t, "mainnet")
|
||||
}
|
||||
@@ -206,10 +206,7 @@ go_test(
|
||||
"fulu__sanity__blocks_test.go",
|
||||
"fulu__sanity__slots_test.go",
|
||||
"fulu__ssz_static__ssz_static_test.go",
|
||||
"gloas__epoch_processing__process_builder_pending_payments_test.go",
|
||||
"gloas__operations__execution_payload_bid_test.go",
|
||||
"gloas__operations__proposer_slashing_test.go",
|
||||
"gloas__sanity__slots_test.go",
|
||||
"gloas__ssz_static__ssz_static_test.go",
|
||||
"phase0__epoch_processing__effective_balance_updates_test.go",
|
||||
"phase0__epoch_processing__epoch_processing_test.go",
|
||||
@@ -292,9 +289,7 @@ go_test(
|
||||
"//testing/spectest/shared/fulu/rewards:go_default_library",
|
||||
"//testing/spectest/shared/fulu/sanity:go_default_library",
|
||||
"//testing/spectest/shared/fulu/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/gloas/epoch_processing:go_default_library",
|
||||
"//testing/spectest/shared/gloas/operations:go_default_library",
|
||||
"//testing/spectest/shared/gloas/sanity:go_default_library",
|
||||
"//testing/spectest/shared/gloas/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/phase0/epoch_processing:go_default_library",
|
||||
"//testing/spectest/shared/phase0/finality:go_default_library",
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
package minimal
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/epoch_processing"
|
||||
)
|
||||
|
||||
func TestMinimal_Gloas_EpochProcessing_ProcessBuilderPendingPayments(t *testing.T) {
|
||||
epoch_processing.RunBuilderPendingPaymentsTests(t, "minimal")
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
package minimal
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations"
|
||||
)
|
||||
|
||||
func TestMinimal_Gloas_Operations_ProposerSlashing(t *testing.T) {
|
||||
operations.RunProposerSlashingTest(t, "minimal")
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
package minimal
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/sanity"
|
||||
)
|
||||
|
||||
func TestMinimal_Gloas_Sanity_Slots(t *testing.T) {
|
||||
sanity.RunSlotProcessingTests(t, "minimal")
|
||||
}
|
||||
@@ -29,7 +29,6 @@ go_library(
|
||||
"//beacon-chain/core/electra:go_default_library",
|
||||
"//beacon-chain/core/gloas:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/requests:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
@@ -32,7 +32,7 @@ func RunConsolidationTest(t *testing.T, config string, fork string, block blockW
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, requests.ProcessConsolidationRequests(ctx, s, er.Consolidations)
|
||||
return s, electra.ProcessConsolidationRequests(ctx, s, er.Consolidations)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user