mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-24 04:38:07 -05:00
Compare commits
15 Commits
gRPC-fallb
...
blocker-fo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
62edbb7196 | ||
|
|
d407387193 | ||
|
|
6a9bcbab3a | ||
|
|
1b2524b0fe | ||
|
|
67d11607ea | ||
|
|
4ff15fa988 | ||
|
|
b37b3e26ba | ||
|
|
4ff9eb067c | ||
|
|
d440aafacf | ||
|
|
e336f7fe59 | ||
|
|
fde63a217a | ||
|
|
055c6eb784 | ||
|
|
d33389fb54 | ||
|
|
ce72deb3c0 | ||
|
|
ec48e6340c |
@@ -3,16 +3,13 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"grpc_connection_provider.go",
|
||||
"grpcutils.go",
|
||||
"log.go",
|
||||
"mock_grpc_provider.go",
|
||||
"parameters.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/api/grpc",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
@@ -21,17 +18,12 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"grpc_connection_provider_test.go",
|
||||
"grpcutils_test.go",
|
||||
],
|
||||
srcs = ["grpcutils_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//credentials/insecure:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,172 +0,0 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
pkgErrors "github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// GrpcConnectionProvider manages gRPC connections for failover support.
|
||||
// It allows switching between different beacon node endpoints when the current one becomes unavailable.
|
||||
// Only one connection is maintained at a time - when switching hosts, the old connection is closed.
|
||||
type GrpcConnectionProvider interface {
|
||||
// CurrentConn returns the currently active gRPC connection.
|
||||
// The connection is created lazily on first call.
|
||||
// Returns nil if the provider has been closed.
|
||||
CurrentConn() *grpc.ClientConn
|
||||
// CurrentHost returns the address of the currently active endpoint.
|
||||
CurrentHost() string
|
||||
// Hosts returns all configured endpoint addresses.
|
||||
Hosts() []string
|
||||
// SetHost switches to the endpoint at the given index.
|
||||
// The new connection is created lazily on next CurrentConn() call.
|
||||
SetHost(index int) error
|
||||
// Close closes the current connection.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type grpcConnectionProvider struct {
|
||||
// Immutable after construction - no lock needed for reads
|
||||
endpoints []string
|
||||
ctx context.Context
|
||||
dialOpts []grpc.DialOption
|
||||
|
||||
// Current connection state (protected by mu)
|
||||
currentIndex uint64
|
||||
conn *grpc.ClientConn
|
||||
|
||||
mu sync.Mutex
|
||||
closed atomic.Bool
|
||||
}
|
||||
|
||||
// NewGrpcConnectionProvider creates a new connection provider that manages gRPC connections.
|
||||
// The endpoint parameter can be a comma-separated list of addresses (e.g., "host1:4000,host2:4000").
|
||||
// Only one connection is maintained at a time, created lazily on first use.
|
||||
func NewGrpcConnectionProvider(
|
||||
ctx context.Context,
|
||||
endpoint string,
|
||||
dialOpts []grpc.DialOption,
|
||||
) (GrpcConnectionProvider, error) {
|
||||
endpoints := parseEndpoints(endpoint)
|
||||
if len(endpoints) == 0 {
|
||||
return nil, pkgErrors.New("no gRPC endpoints provided")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"endpoints": endpoints,
|
||||
"count": len(endpoints),
|
||||
}).Info("Initialized gRPC connection provider with multiple endpoints")
|
||||
|
||||
return &grpcConnectionProvider{
|
||||
endpoints: endpoints,
|
||||
ctx: ctx,
|
||||
dialOpts: dialOpts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseEndpoints splits a comma-separated endpoint string into individual endpoints.
|
||||
func parseEndpoints(endpoint string) []string {
|
||||
if endpoint == "" {
|
||||
return nil
|
||||
}
|
||||
var endpoints []string
|
||||
for p := range strings.SplitSeq(endpoint, ",") {
|
||||
if p = strings.TrimSpace(p); p != "" {
|
||||
endpoints = append(endpoints, p)
|
||||
}
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) CurrentConn() *grpc.ClientConn {
|
||||
if p.closed.Load() {
|
||||
return nil
|
||||
}
|
||||
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
// Return existing connection if available
|
||||
if p.conn != nil {
|
||||
return p.conn
|
||||
}
|
||||
|
||||
// Create connection lazily
|
||||
ep := p.endpoints[p.currentIndex]
|
||||
conn, err := grpc.DialContext(p.ctx, ep, p.dialOpts...)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("endpoint", ep).Error("Failed to create gRPC connection")
|
||||
return nil
|
||||
}
|
||||
|
||||
p.conn = conn
|
||||
log.WithField("endpoint", ep).Debug("Created gRPC connection")
|
||||
return conn
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) CurrentHost() string {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
return p.endpoints[p.currentIndex]
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) Hosts() []string {
|
||||
// Return a copy to maintain immutability
|
||||
hosts := make([]string, len(p.endpoints))
|
||||
copy(hosts, p.endpoints)
|
||||
return hosts
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) SetHost(index int) error {
|
||||
if index < 0 || index >= len(p.endpoints) {
|
||||
return pkgErrors.Errorf("invalid host index %d, must be between 0 and %d", index, len(p.endpoints)-1)
|
||||
}
|
||||
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if uint64(index) == p.currentIndex {
|
||||
return nil // Already on this host
|
||||
}
|
||||
|
||||
oldHost := p.endpoints[p.currentIndex]
|
||||
|
||||
// Close existing connection if any
|
||||
if p.conn != nil {
|
||||
if err := p.conn.Close(); err != nil {
|
||||
log.WithError(err).WithField("endpoint", oldHost).Debug("Failed to close previous connection")
|
||||
}
|
||||
p.conn = nil
|
||||
}
|
||||
|
||||
p.currentIndex = uint64(index)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"previousHost": oldHost,
|
||||
"newHost": p.endpoints[index],
|
||||
}).Debug("Switched gRPC endpoint")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) Close() error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if p.closed.Load() {
|
||||
return nil
|
||||
}
|
||||
p.closed.Store(true)
|
||||
|
||||
if p.conn != nil {
|
||||
if err := p.conn.Close(); err != nil {
|
||||
return pkgErrors.Wrapf(err, "failed to close connection to %s", p.endpoints[p.currentIndex])
|
||||
}
|
||||
p.conn = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,203 +0,0 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
func TestParseEndpoints(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected []string
|
||||
}{
|
||||
{"single endpoint", "localhost:4000", []string{"localhost:4000"}},
|
||||
{"multiple endpoints", "host1:4000,host2:4000,host3:4000", []string{"host1:4000", "host2:4000", "host3:4000"}},
|
||||
{"endpoints with spaces", "host1:4000, host2:4000 , host3:4000", []string{"host1:4000", "host2:4000", "host3:4000"}},
|
||||
{"empty string", "", nil},
|
||||
{"only commas", ",,,", nil},
|
||||
{"trailing comma", "host1:4000,host2:4000,", []string{"host1:4000", "host2:4000"}},
|
||||
{"leading comma", ",host1:4000,host2:4000", []string{"host1:4000", "host2:4000"}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.DeepEqual(t, tt.expected, parseEndpoints(tt.input))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewGrpcConnectionProvider_Errors(t *testing.T) {
|
||||
t.Run("no endpoints", func(t *testing.T) {
|
||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
_, err := NewGrpcConnectionProvider(context.Background(), "", dialOpts)
|
||||
require.ErrorContains(t, "no gRPC endpoints provided", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGrpcConnectionProvider_LazyConnection(t *testing.T) {
|
||||
// Start only one server but configure provider with two endpoints
|
||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
server := grpc.NewServer()
|
||||
go func() { _ = server.Serve(lis) }()
|
||||
defer server.Stop()
|
||||
|
||||
validAddr := lis.Addr().String()
|
||||
invalidAddr := "127.0.0.1:1" // Port 1 is unlikely to be listening
|
||||
|
||||
// Provider should succeed even though second endpoint is invalid (lazy connections)
|
||||
endpoint := validAddr + "," + invalidAddr
|
||||
ctx := context.Background()
|
||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
|
||||
require.NoError(t, err, "Provider creation should succeed with lazy connections")
|
||||
defer func() { _ = provider.Close() }()
|
||||
|
||||
// First endpoint should work
|
||||
conn := provider.CurrentConn()
|
||||
assert.NotNil(t, conn, "First connection should be created lazily")
|
||||
}
|
||||
|
||||
func TestGrpcConnectionProvider_SingleConnectionModel(t *testing.T) {
|
||||
// Create provider with 3 endpoints
|
||||
var addrs []string
|
||||
var servers []*grpc.Server
|
||||
|
||||
for range 3 {
|
||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
server := grpc.NewServer()
|
||||
go func() { _ = server.Serve(lis) }()
|
||||
addrs = append(addrs, lis.Addr().String())
|
||||
servers = append(servers, server)
|
||||
}
|
||||
defer func() {
|
||||
for _, s := range servers {
|
||||
s.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
endpoint := strings.Join(addrs, ",")
|
||||
ctx := context.Background()
|
||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = provider.Close() }()
|
||||
|
||||
// Access the internal state to verify single connection behavior
|
||||
p := provider.(*grpcConnectionProvider)
|
||||
|
||||
// Initially no connection
|
||||
p.mu.Lock()
|
||||
assert.Equal(t, (*grpc.ClientConn)(nil), p.conn, "Connection should be nil before access")
|
||||
p.mu.Unlock()
|
||||
|
||||
// Access connection - should create one
|
||||
conn0 := provider.CurrentConn()
|
||||
assert.NotNil(t, conn0)
|
||||
|
||||
p.mu.Lock()
|
||||
assert.NotNil(t, p.conn, "Connection should be created after CurrentConn()")
|
||||
firstConn := p.conn
|
||||
p.mu.Unlock()
|
||||
|
||||
// Call CurrentConn again - should return same connection
|
||||
conn0Again := provider.CurrentConn()
|
||||
assert.Equal(t, conn0, conn0Again, "Should return same connection")
|
||||
|
||||
// Switch to different host - old connection should be closed, new one created lazily
|
||||
require.NoError(t, provider.SetHost(1))
|
||||
|
||||
p.mu.Lock()
|
||||
assert.Equal(t, (*grpc.ClientConn)(nil), p.conn, "Connection should be nil after SetHost (lazy)")
|
||||
p.mu.Unlock()
|
||||
|
||||
// Get new connection
|
||||
conn1 := provider.CurrentConn()
|
||||
assert.NotNil(t, conn1)
|
||||
assert.NotEqual(t, firstConn, conn1, "Should be a different connection after switching hosts")
|
||||
}
|
||||
|
||||
// testProvider creates a provider with n test servers and returns cleanup function.
|
||||
func testProvider(t *testing.T, n int) (GrpcConnectionProvider, []string, func()) {
|
||||
var addrs []string
|
||||
var cleanups []func()
|
||||
|
||||
for range n {
|
||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
server := grpc.NewServer()
|
||||
go func() { _ = server.Serve(lis) }()
|
||||
addrs = append(addrs, lis.Addr().String())
|
||||
cleanups = append(cleanups, server.Stop)
|
||||
}
|
||||
|
||||
endpoint := strings.Join(addrs, ",")
|
||||
|
||||
ctx := context.Background()
|
||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
|
||||
require.NoError(t, err)
|
||||
|
||||
cleanup := func() {
|
||||
_ = provider.Close()
|
||||
for _, c := range cleanups {
|
||||
c()
|
||||
}
|
||||
}
|
||||
return provider, addrs, cleanup
|
||||
}
|
||||
|
||||
func TestGrpcConnectionProvider(t *testing.T) {
|
||||
provider, addrs, cleanup := testProvider(t, 3)
|
||||
defer cleanup()
|
||||
|
||||
t.Run("initial state", func(t *testing.T) {
|
||||
assert.Equal(t, 3, len(provider.Hosts()))
|
||||
assert.Equal(t, addrs[0], provider.CurrentHost())
|
||||
assert.NotNil(t, provider.CurrentConn())
|
||||
})
|
||||
|
||||
t.Run("SetHost", func(t *testing.T) {
|
||||
require.NoError(t, provider.SetHost(1))
|
||||
assert.Equal(t, addrs[1], provider.CurrentHost())
|
||||
assert.NotNil(t, provider.CurrentConn()) // New connection created lazily
|
||||
require.NoError(t, provider.SetHost(0))
|
||||
assert.Equal(t, addrs[0], provider.CurrentHost())
|
||||
require.ErrorContains(t, "invalid host index", provider.SetHost(-1))
|
||||
require.ErrorContains(t, "invalid host index", provider.SetHost(3))
|
||||
})
|
||||
|
||||
t.Run("SetHost circular", func(t *testing.T) {
|
||||
// Test round-robin style switching using SetHost with manual index
|
||||
indices := []int{1, 2, 0, 1} // Simulate circular switching
|
||||
for i, idx := range indices {
|
||||
require.NoError(t, provider.SetHost(idx))
|
||||
assert.Equal(t, addrs[idx], provider.CurrentHost(), "iteration %d", i)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Hosts returns copy", func(t *testing.T) {
|
||||
hosts := provider.Hosts()
|
||||
original := hosts[0]
|
||||
hosts[0] = "modified"
|
||||
assert.Equal(t, original, provider.Hosts()[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestGrpcConnectionProvider_Close(t *testing.T) {
|
||||
provider, _, cleanup := testProvider(t, 1)
|
||||
defer cleanup()
|
||||
|
||||
assert.NotNil(t, provider.CurrentConn())
|
||||
require.NoError(t, provider.Close())
|
||||
assert.Equal(t, (*grpc.ClientConn)(nil), provider.CurrentConn())
|
||||
require.NoError(t, provider.Close()) // Double close is safe
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
package grpc
|
||||
|
||||
import "google.golang.org/grpc"
|
||||
|
||||
// MockGrpcProvider implements GrpcConnectionProvider for testing.
|
||||
type MockGrpcProvider struct {
|
||||
MockConn *grpc.ClientConn
|
||||
MockHosts []string
|
||||
}
|
||||
|
||||
func (m *MockGrpcProvider) CurrentConn() *grpc.ClientConn { return m.MockConn }
|
||||
func (m *MockGrpcProvider) CurrentHost() string {
|
||||
if len(m.MockHosts) > 0 {
|
||||
return m.MockHosts[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func (m *MockGrpcProvider) Hosts() []string { return m.MockHosts }
|
||||
func (m *MockGrpcProvider) SetHost(int) error { return nil }
|
||||
func (m *MockGrpcProvider) Close() error { return nil }
|
||||
@@ -1,33 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"mock_rest_provider.go",
|
||||
"rest_connection_provider.go",
|
||||
"rest_handler.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/api/rest",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/apiutil:go_default_library",
|
||||
"//api/client:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opentelemetry_go_contrib_instrumentation_net_http_otelhttp//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["rest_connection_provider_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,23 +0,0 @@
|
||||
package rest
|
||||
|
||||
import "net/http"
|
||||
|
||||
// MockRestProvider implements RestConnectionProvider for testing.
|
||||
type MockRestProvider struct {
|
||||
MockClient *http.Client
|
||||
MockHandler RestHandler
|
||||
MockHosts []string
|
||||
HostIndex int
|
||||
}
|
||||
|
||||
func (m *MockRestProvider) HttpClient() *http.Client { return m.MockClient }
|
||||
func (m *MockRestProvider) RestHandler() RestHandler { return m.MockHandler }
|
||||
func (m *MockRestProvider) CurrentHost() string {
|
||||
if len(m.MockHosts) > 0 {
|
||||
return m.MockHosts[m.HostIndex%len(m.MockHosts)]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func (m *MockRestProvider) Hosts() []string { return m.MockHosts }
|
||||
func (m *MockRestProvider) SetHost(index int) error { m.HostIndex = index; return nil }
|
||||
func (m *MockRestProvider) NextHost() { m.HostIndex = (m.HostIndex + 1) % len(m.MockHosts) }
|
||||
@@ -1,177 +0,0 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/client"
|
||||
pkgErrors "github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "rest")
|
||||
|
||||
// RestConnectionProvider manages HTTP client configuration for REST API with failover support.
|
||||
// It allows switching between different beacon node REST endpoints when the current one becomes unavailable.
|
||||
type RestConnectionProvider interface {
|
||||
// HttpClient returns the configured HTTP client with headers, timeout, and optional tracing.
|
||||
HttpClient() *http.Client
|
||||
// RestHandler returns the REST handler for making API requests.
|
||||
RestHandler() RestHandler
|
||||
// CurrentHost returns the current REST API endpoint URL.
|
||||
CurrentHost() string
|
||||
// Hosts returns all configured REST API endpoint URLs.
|
||||
Hosts() []string
|
||||
// SetHost switches to the endpoint at the given index.
|
||||
SetHost(index int) error
|
||||
// NextHost switches to the next endpoint in round-robin fashion.
|
||||
NextHost()
|
||||
}
|
||||
|
||||
// RestConnectionProviderOption is a functional option for configuring the REST connection provider.
|
||||
type RestConnectionProviderOption func(*restConnectionProvider)
|
||||
|
||||
// WithHttpTimeout sets the HTTP client timeout.
|
||||
func WithHttpTimeout(timeout time.Duration) RestConnectionProviderOption {
|
||||
return func(p *restConnectionProvider) {
|
||||
p.timeout = timeout
|
||||
}
|
||||
}
|
||||
|
||||
// WithHttpHeaders sets custom HTTP headers to include in all requests.
|
||||
func WithHttpHeaders(headers map[string][]string) RestConnectionProviderOption {
|
||||
return func(p *restConnectionProvider) {
|
||||
p.headers = headers
|
||||
}
|
||||
}
|
||||
|
||||
// WithTracing enables OpenTelemetry tracing for HTTP requests.
|
||||
func WithTracing() RestConnectionProviderOption {
|
||||
return func(p *restConnectionProvider) {
|
||||
p.enableTracing = true
|
||||
}
|
||||
}
|
||||
|
||||
type restConnectionProvider struct {
|
||||
endpoints []string
|
||||
httpClient *http.Client
|
||||
restHandler RestHandler
|
||||
currentIndex atomic.Uint64
|
||||
timeout time.Duration
|
||||
headers map[string][]string
|
||||
enableTracing bool
|
||||
}
|
||||
|
||||
// NewRestConnectionProvider creates a new REST connection provider that manages HTTP client configuration.
|
||||
// The endpoint parameter can be a comma-separated list of URLs (e.g., "http://host1:3500,http://host2:3500").
|
||||
func NewRestConnectionProvider(endpoint string, opts ...RestConnectionProviderOption) (RestConnectionProvider, error) {
|
||||
endpoints := parseEndpoints(endpoint)
|
||||
if len(endpoints) == 0 {
|
||||
return nil, pkgErrors.New("no REST API endpoints provided")
|
||||
}
|
||||
|
||||
p := &restConnectionProvider{
|
||||
endpoints: endpoints,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(p)
|
||||
}
|
||||
|
||||
// Build the HTTP transport chain
|
||||
var transport http.RoundTripper = http.DefaultTransport
|
||||
|
||||
// Add custom headers if configured
|
||||
if len(p.headers) > 0 {
|
||||
transport = client.NewCustomHeadersTransport(transport, p.headers)
|
||||
}
|
||||
|
||||
// Add tracing if enabled
|
||||
if p.enableTracing {
|
||||
transport = otelhttp.NewTransport(transport)
|
||||
}
|
||||
|
||||
p.httpClient = &http.Client{
|
||||
Timeout: p.timeout,
|
||||
Transport: transport,
|
||||
}
|
||||
|
||||
// Create the REST handler with the HTTP client and initial host
|
||||
p.restHandler = newRestHandler(*p.httpClient, endpoints[0])
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"endpoints": endpoints,
|
||||
"count": len(endpoints),
|
||||
}).Info("Initialized REST connection provider with endpoints")
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// parseEndpoints splits a comma-separated endpoint string into individual endpoints.
|
||||
func parseEndpoints(endpoint string) []string {
|
||||
if endpoint == "" {
|
||||
return nil
|
||||
}
|
||||
var endpoints []string
|
||||
for p := range strings.SplitSeq(endpoint, ",") {
|
||||
if p = strings.TrimSpace(p); p != "" {
|
||||
endpoints = append(endpoints, p)
|
||||
}
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) HttpClient() *http.Client {
|
||||
return p.httpClient
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) RestHandler() RestHandler {
|
||||
return p.restHandler
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) CurrentHost() string {
|
||||
idx := p.currentIndex.Load() % uint64(len(p.endpoints))
|
||||
return p.endpoints[idx]
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) Hosts() []string {
|
||||
// Return a copy to maintain immutability
|
||||
hosts := make([]string, len(p.endpoints))
|
||||
copy(hosts, p.endpoints)
|
||||
return hosts
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) SetHost(index int) error {
|
||||
if index < 0 || index >= len(p.endpoints) {
|
||||
return pkgErrors.Errorf("invalid host index %d, must be between 0 and %d", index, len(p.endpoints)-1)
|
||||
}
|
||||
|
||||
oldIdx := p.currentIndex.Load()
|
||||
p.currentIndex.Store(uint64(index))
|
||||
|
||||
// Update the rest handler's host
|
||||
p.restHandler.SetHost(p.endpoints[index])
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"previousHost": p.endpoints[oldIdx%uint64(len(p.endpoints))],
|
||||
"newHost": p.endpoints[index],
|
||||
}).Debug("Trying REST endpoint")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) NextHost() {
|
||||
oldIdx := p.currentIndex.Load()
|
||||
newIdx := (oldIdx + 1) % uint64(len(p.endpoints))
|
||||
p.currentIndex.Store(newIdx)
|
||||
|
||||
// Update the rest handler's host
|
||||
p.restHandler.SetHost(p.endpoints[newIdx])
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"previousHost": p.endpoints[oldIdx],
|
||||
"newHost": p.endpoints[newIdx],
|
||||
}).Debug("Switched to next REST endpoint")
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestParseEndpoints(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected []string
|
||||
}{
|
||||
{"single endpoint", "http://localhost:3500", []string{"http://localhost:3500"}},
|
||||
{"multiple endpoints", "http://host1:3500,http://host2:3500,http://host3:3500", []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"}},
|
||||
{"endpoints with spaces", "http://host1:3500, http://host2:3500 , http://host3:3500", []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"}},
|
||||
{"empty string", "", nil},
|
||||
{"only commas", ",,,", nil},
|
||||
{"trailing comma", "http://host1:3500,http://host2:3500,", []string{"http://host1:3500", "http://host2:3500"}},
|
||||
{"leading comma", ",http://host1:3500,http://host2:3500", []string{"http://host1:3500", "http://host2:3500"}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.DeepEqual(t, tt.expected, parseEndpoints(tt.input))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewRestConnectionProvider_Errors(t *testing.T) {
|
||||
t.Run("no endpoints", func(t *testing.T) {
|
||||
_, err := NewRestConnectionProvider("")
|
||||
require.ErrorContains(t, "no REST API endpoints provided", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRestConnectionProvider(t *testing.T) {
|
||||
provider, err := NewRestConnectionProvider("http://host1:3500,http://host2:3500,http://host3:3500")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("initial state", func(t *testing.T) {
|
||||
assert.Equal(t, 3, len(provider.Hosts()))
|
||||
assert.Equal(t, "http://host1:3500", provider.CurrentHost())
|
||||
assert.NotNil(t, provider.HttpClient())
|
||||
})
|
||||
|
||||
t.Run("SetHost", func(t *testing.T) {
|
||||
require.NoError(t, provider.SetHost(1))
|
||||
assert.Equal(t, "http://host2:3500", provider.CurrentHost())
|
||||
require.NoError(t, provider.SetHost(0))
|
||||
assert.Equal(t, "http://host1:3500", provider.CurrentHost())
|
||||
require.ErrorContains(t, "invalid host index", provider.SetHost(-1))
|
||||
require.ErrorContains(t, "invalid host index", provider.SetHost(3))
|
||||
})
|
||||
|
||||
t.Run("NextHost circular", func(t *testing.T) {
|
||||
require.NoError(t, provider.SetHost(0)) // Reset to start
|
||||
expected := []string{"http://host2:3500", "http://host3:3500", "http://host1:3500", "http://host2:3500"}
|
||||
for i, exp := range expected {
|
||||
provider.NextHost()
|
||||
assert.Equal(t, exp, provider.CurrentHost(), "iteration %d", i)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Hosts returns copy", func(t *testing.T) {
|
||||
hosts := provider.Hosts()
|
||||
original := hosts[0]
|
||||
hosts[0] = "modified"
|
||||
assert.Equal(t, original, provider.Hosts()[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestRestConnectionProvider_WithOptions(t *testing.T) {
|
||||
headers := map[string][]string{"Authorization": {"Bearer token"}}
|
||||
provider, err := NewRestConnectionProvider(
|
||||
"http://localhost:3500",
|
||||
WithHttpHeaders(headers),
|
||||
WithHttpTimeout(30000000000), // 30 seconds in nanoseconds
|
||||
WithTracing(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, provider.HttpClient())
|
||||
assert.Equal(t, "http://localhost:3500", provider.CurrentHost())
|
||||
}
|
||||
@@ -21,6 +21,7 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/gloas:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
@@ -11,6 +12,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -126,7 +128,16 @@ func processProposerSlashing(
|
||||
if exitInfo == nil {
|
||||
return nil, errors.New("exit info is required to process proposer slashing")
|
||||
}
|
||||
|
||||
var err error
|
||||
// [New in Gloas:EIP7732]: remove the BuilderPendingPayment corresponding to the slashed proposer within 2 epoch window
|
||||
if beaconState.Version() >= version.Gloas {
|
||||
err = gloas.RemoveBuilderPendingPayment(beaconState, slashing.Header_1.Header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
beaconState, err = validators.SlashValidator(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)
|
||||
|
||||
@@ -2,16 +2,22 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["bid.go"],
|
||||
srcs = [
|
||||
"bid.go",
|
||||
"pending_payment.go",
|
||||
"proposer_slashing.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
@@ -22,10 +28,16 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["bid_test.go"],
|
||||
srcs = [
|
||||
"bid_test.go",
|
||||
"pending_payment_test.go",
|
||||
"proposer_slashing_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
|
||||
76
beacon-chain/core/gloas/pending_payment.go
Normal file
76
beacon-chain/core/gloas/pending_payment.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ProcessBuilderPendingPayments processes the builder pending payments from the previous epoch.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// def process_builder_pending_payments(state: BeaconState) -> None:
|
||||
//
|
||||
// quorum = get_builder_payment_quorum_threshold(state)
|
||||
// for payment in state.builder_pending_payments[:SLOTS_PER_EPOCH]:
|
||||
// if payment.weight >= quorum:
|
||||
// state.builder_pending_withdrawals.append(payment.withdrawal)
|
||||
//
|
||||
// old_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:]
|
||||
// new_payments = [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
|
||||
// state.builder_pending_payments = old_payments + new_payments
|
||||
func ProcessBuilderPendingPayments(state state.BeaconState) error {
|
||||
quorum, err := builderQuorumThreshold(state)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute builder payment quorum threshold")
|
||||
}
|
||||
|
||||
payments, err := state.BuilderPendingPayments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get builder pending payments")
|
||||
}
|
||||
|
||||
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
var withdrawals []*ethpb.BuilderPendingWithdrawal
|
||||
for _, payment := range payments[:slotsPerEpoch] {
|
||||
if quorum > payment.Weight {
|
||||
continue
|
||||
}
|
||||
withdrawals = append(withdrawals, payment.Withdrawal)
|
||||
}
|
||||
|
||||
if err := state.AppendBuilderPendingWithdrawals(withdrawals); err != nil {
|
||||
return errors.Wrap(err, "could not append builder pending withdrawals")
|
||||
}
|
||||
|
||||
if err := state.RotateBuilderPendingPayments(); err != nil {
|
||||
return errors.Wrap(err, "could not rotate builder pending payments")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// builderQuorumThreshold calculates the quorum threshold for builder payments.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// def get_builder_payment_quorum_threshold(state: BeaconState) -> uint64:
|
||||
//
|
||||
// per_slot_balance = get_total_active_balance(state) // SLOTS_PER_EPOCH
|
||||
// quorum = per_slot_balance * BUILDER_PAYMENT_THRESHOLD_NUMERATOR
|
||||
// return uint64(quorum // BUILDER_PAYMENT_THRESHOLD_DENOMINATOR)
|
||||
func builderQuorumThreshold(state state.ReadOnlyBeaconState) (primitives.Gwei, error) {
|
||||
activeBalance, err := helpers.TotalActiveBalance(state)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get total active balance")
|
||||
}
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
slotsPerEpoch := uint64(cfg.SlotsPerEpoch)
|
||||
numerator := cfg.BuilderPaymentThresholdNumerator
|
||||
denominator := cfg.BuilderPaymentThresholdDenominator
|
||||
|
||||
activeBalancePerSlot := activeBalance / slotsPerEpoch
|
||||
quorum := (activeBalancePerSlot * numerator) / denominator
|
||||
return primitives.Gwei(quorum), nil
|
||||
}
|
||||
119
beacon-chain/core/gloas/pending_payment_test.go
Normal file
119
beacon-chain/core/gloas/pending_payment_test.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestBuilderQuorumThreshold(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
validators := []*ethpb.Validator{
|
||||
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
|
||||
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{Validators: validators})
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := builderQuorumThreshold(st)
|
||||
require.NoError(t, err)
|
||||
|
||||
total := uint64(len(validators)) * cfg.MaxEffectiveBalance
|
||||
perSlot := total / uint64(cfg.SlotsPerEpoch)
|
||||
want := (perSlot * cfg.BuilderPaymentThresholdNumerator) / cfg.BuilderPaymentThresholdDenominator
|
||||
require.Equal(t, primitives.Gwei(want), got)
|
||||
}
|
||||
|
||||
func TestProcessBuilderPendingPayments(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
buildPayments := func(weights ...primitives.Gwei) []*ethpb.BuilderPendingPayment {
|
||||
p := make([]*ethpb.BuilderPendingPayment, 2*int(cfg.SlotsPerEpoch))
|
||||
for i := range p {
|
||||
p[i] = ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{FeeRecipient: make([]byte, 20)},
|
||||
}
|
||||
}
|
||||
for i, w := range weights {
|
||||
p[i].Weight = w
|
||||
p[i].Withdrawal.Amount = 1
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
validators := []*ethpb.Validator{
|
||||
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
|
||||
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
|
||||
}
|
||||
pbSt, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Validators: validators})
|
||||
require.NoError(t, err)
|
||||
|
||||
total := uint64(len(validators)) * cfg.MaxEffectiveBalance
|
||||
perSlot := total / uint64(cfg.SlotsPerEpoch)
|
||||
quorum := (perSlot * cfg.BuilderPaymentThresholdNumerator) / cfg.BuilderPaymentThresholdDenominator
|
||||
slotsPerEpoch := int(cfg.SlotsPerEpoch)
|
||||
|
||||
t.Run("append qualifying withdrawals", func(t *testing.T) {
|
||||
payments := buildPayments(primitives.Gwei(quorum+1), primitives.Gwei(quorum+2))
|
||||
st := &testProcessState{BeaconState: pbSt, payments: payments}
|
||||
|
||||
require.NoError(t, ProcessBuilderPendingPayments(st))
|
||||
require.Equal(t, 2, len(st.withdrawals))
|
||||
require.Equal(t, payments[0].Withdrawal, st.withdrawals[0])
|
||||
require.Equal(t, payments[1].Withdrawal, st.withdrawals[1])
|
||||
|
||||
require.Equal(t, 2*slotsPerEpoch, len(st.payments))
|
||||
for i := slotsPerEpoch; i < 2*slotsPerEpoch; i++ {
|
||||
require.Equal(t, primitives.Gwei(0), st.payments[i].Weight)
|
||||
require.Equal(t, primitives.Gwei(0), st.payments[i].Withdrawal.Amount)
|
||||
require.Equal(t, 20, len(st.payments[i].Withdrawal.FeeRecipient))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("no withdrawals when below quorum", func(t *testing.T) {
|
||||
payments := buildPayments(primitives.Gwei(quorum - 1))
|
||||
st := &testProcessState{BeaconState: pbSt, payments: payments}
|
||||
|
||||
require.NoError(t, ProcessBuilderPendingPayments(st))
|
||||
require.Equal(t, 0, len(st.withdrawals))
|
||||
})
|
||||
}
|
||||
|
||||
type testProcessState struct {
|
||||
state.BeaconState
|
||||
payments []*ethpb.BuilderPendingPayment
|
||||
withdrawals []*ethpb.BuilderPendingWithdrawal
|
||||
}
|
||||
|
||||
func (t *testProcessState) BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error) {
|
||||
return t.payments, nil
|
||||
}
|
||||
|
||||
func (t *testProcessState) AppendBuilderPendingWithdrawals(withdrawals []*ethpb.BuilderPendingWithdrawal) error {
|
||||
t.withdrawals = append(t.withdrawals, withdrawals...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *testProcessState) RotateBuilderPendingPayments() error {
|
||||
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
|
||||
rotated := slices.Clone(t.payments[slotsPerEpoch:])
|
||||
for range slotsPerEpoch {
|
||||
rotated = append(rotated, ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
},
|
||||
})
|
||||
}
|
||||
t.payments = rotated
|
||||
return nil
|
||||
}
|
||||
43
beacon-chain/core/gloas/proposer_slashing.go
Normal file
43
beacon-chain/core/gloas/proposer_slashing.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// RemoveBuilderPendingPayment removes the pending builder payment for the proposal slot.
|
||||
// Spec v1.7.0 (pseudocode):
|
||||
//
|
||||
// slot = header_1.slot
|
||||
// proposal_epoch = compute_epoch_at_slot(slot)
|
||||
// if proposal_epoch == get_current_epoch(state):
|
||||
// payment_index = SLOTS_PER_EPOCH + slot % SLOTS_PER_EPOCH
|
||||
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
|
||||
// elif proposal_epoch == get_previous_epoch(state):
|
||||
// payment_index = slot % SLOTS_PER_EPOCH
|
||||
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
|
||||
func RemoveBuilderPendingPayment(st state.BeaconState, header *eth.BeaconBlockHeader) error {
|
||||
proposalEpoch := slots.ToEpoch(header.Slot)
|
||||
currentEpoch := time.CurrentEpoch(st)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
var paymentIndex primitives.Slot
|
||||
if proposalEpoch == currentEpoch {
|
||||
paymentIndex = slotsPerEpoch + header.Slot%slotsPerEpoch
|
||||
} else if proposalEpoch+1 == currentEpoch {
|
||||
paymentIndex = header.Slot % slotsPerEpoch
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := st.ClearBuilderPendingPayment(paymentIndex); err != nil {
|
||||
return errors.Wrap(err, "could not clear builder pending payment")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
112
beacon-chain/core/gloas/proposer_slashing_test.go
Normal file
112
beacon-chain/core/gloas/proposer_slashing_test.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestRemoveBuilderPendingPayment_CurrentEpoch(t *testing.T) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
stateSlot := slotsPerEpoch*2 + 1
|
||||
headerSlot := slotsPerEpoch * 2
|
||||
|
||||
st := newGloasStateWithPayments(t, stateSlot)
|
||||
paymentIndex := int(slotsPerEpoch + headerSlot%slotsPerEpoch)
|
||||
|
||||
setPendingPayment(t, st, paymentIndex, 123)
|
||||
|
||||
err := RemoveBuilderPendingPayment(st, ð.BeaconBlockHeader{Slot: headerSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
got := getPendingPayment(t, st, paymentIndex)
|
||||
require.NotNil(t, got.Withdrawal)
|
||||
require.DeepEqual(t, make([]byte, 20), got.Withdrawal.FeeRecipient)
|
||||
require.Equal(t, uint64(0), uint64(got.Withdrawal.Amount))
|
||||
}
|
||||
|
||||
func TestRemoveBuilderPendingPayment_PreviousEpoch(t *testing.T) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
stateSlot := slotsPerEpoch*2 + 1
|
||||
headerSlot := slotsPerEpoch + 7
|
||||
|
||||
st := newGloasStateWithPayments(t, stateSlot)
|
||||
paymentIndex := int(headerSlot % slotsPerEpoch)
|
||||
|
||||
setPendingPayment(t, st, paymentIndex, 456)
|
||||
|
||||
err := RemoveBuilderPendingPayment(st, ð.BeaconBlockHeader{Slot: headerSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
got := getPendingPayment(t, st, paymentIndex)
|
||||
require.NotNil(t, got.Withdrawal)
|
||||
require.DeepEqual(t, make([]byte, 20), got.Withdrawal.FeeRecipient)
|
||||
require.Equal(t, uint64(0), uint64(got.Withdrawal.Amount))
|
||||
}
|
||||
|
||||
func TestRemoveBuilderPendingPayment_OlderThanTwoEpoch(t *testing.T) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
stateSlot := slotsPerEpoch*4 + 1 // current epoch far ahead
|
||||
headerSlot := slotsPerEpoch * 2 // two epochs behind
|
||||
|
||||
st := newGloasStateWithPayments(t, stateSlot)
|
||||
paymentIndex := int(headerSlot % slotsPerEpoch)
|
||||
|
||||
original := getPendingPayment(t, st, paymentIndex)
|
||||
|
||||
err := RemoveBuilderPendingPayment(st, ð.BeaconBlockHeader{Slot: headerSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
after := getPendingPayment(t, st, paymentIndex)
|
||||
require.DeepEqual(t, original.Withdrawal.FeeRecipient, after.Withdrawal.FeeRecipient)
|
||||
require.Equal(t, original.Withdrawal.Amount, after.Withdrawal.Amount)
|
||||
}
|
||||
|
||||
func newGloasStateWithPayments(t *testing.T, slot primitives.Slot) state.BeaconState {
|
||||
t.Helper()
|
||||
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
paymentCount := int(slotsPerEpoch * 2)
|
||||
payments := make([]*eth.BuilderPendingPayment, paymentCount)
|
||||
for i := range payments {
|
||||
payments[i] = ð.BuilderPendingPayment{
|
||||
Withdrawal: ð.BuilderPendingWithdrawal{
|
||||
FeeRecipient: bytes.Repeat([]byte{0x01}, 20),
|
||||
Amount: 1,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(ð.BeaconStateGloas{
|
||||
Slot: slot,
|
||||
BuilderPendingPayments: payments,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}
|
||||
|
||||
func setPendingPayment(t *testing.T, st state.BeaconState, index int, amount uint64) {
|
||||
t.Helper()
|
||||
|
||||
payment := ð.BuilderPendingPayment{
|
||||
Withdrawal: ð.BuilderPendingWithdrawal{
|
||||
FeeRecipient: bytes.Repeat([]byte{0x02}, 20),
|
||||
Amount: primitives.Gwei(amount),
|
||||
},
|
||||
}
|
||||
require.NoError(t, st.SetBuilderPendingPayment(primitives.Slot(index), payment))
|
||||
}
|
||||
|
||||
func getPendingPayment(t *testing.T, st state.BeaconState, index int) *eth.BuilderPendingPayment {
|
||||
t.Helper()
|
||||
|
||||
stateProto := st.ToProtoUnsafe().(*eth.BeaconStateGloas)
|
||||
|
||||
return stateProto.BuilderPendingPayments[index]
|
||||
}
|
||||
@@ -71,6 +71,7 @@ go_test(
|
||||
"state_test.go",
|
||||
"trailing_slot_state_cache_test.go",
|
||||
"transition_fuzz_test.go",
|
||||
"transition_gloas_test.go",
|
||||
"transition_no_verify_sig_test.go",
|
||||
"transition_test.go",
|
||||
],
|
||||
@@ -106,6 +107,7 @@ go_test(
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_stretchr_testify//require:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -142,6 +142,18 @@ func ProcessSlot(ctx context.Context, state state.BeaconState) (state.BeaconStat
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Spec v1.6.1 (pseudocode):
|
||||
// # [New in Gloas:EIP7732]
|
||||
// # Unset the next payload availability
|
||||
// state.execution_payload_availability[(state.slot + 1) % SLOTS_PER_HISTORICAL_ROOT] = 0b0
|
||||
if state.Version() >= version.Gloas {
|
||||
index := uint64((state.Slot() + 1) % params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
if err := state.UpdateExecutionPayloadAvailabilityAtIndex(index, 0x0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
|
||||
141
beacon-chain/core/transition/transition_gloas_test.go
Normal file
141
beacon-chain/core/transition/transition_gloas_test.go
Normal file
@@ -0,0 +1,141 @@
|
||||
package transition
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestProcessSlot_GloasClearsNextPayloadAvailability(t *testing.T) {
|
||||
slot := primitives.Slot(10)
|
||||
cfg := params.BeaconConfig()
|
||||
nextIdx := uint64((slot + 1) % cfg.SlotsPerHistoricalRoot)
|
||||
byteIdx := nextIdx / 8
|
||||
bitMask := byte(1 << (nextIdx % 8))
|
||||
availability := bytes.Repeat([]byte{0xFF}, int(cfg.SlotsPerHistoricalRoot/8))
|
||||
st := newGloasState(t, slot, availability)
|
||||
|
||||
_, err := ProcessSlot(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
post := st.ToProto().(*ethpb.BeaconStateGloas)
|
||||
require.Equal(t, byte(0xFF)&^bitMask, post.ExecutionPayloadAvailability[byteIdx])
|
||||
}
|
||||
|
||||
func TestProcessSlot_GloasClearsNextPayloadAvailability_Wrap(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
slot := primitives.Slot(cfg.SlotsPerHistoricalRoot - 1)
|
||||
availability := bytes.Repeat([]byte{0xFF}, int(cfg.SlotsPerHistoricalRoot/8))
|
||||
st := newGloasState(t, slot, availability)
|
||||
|
||||
_, err := ProcessSlot(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
post := st.ToProto().(*ethpb.BeaconStateGloas)
|
||||
require.Equal(t, byte(0xFE), post.ExecutionPayloadAvailability[0])
|
||||
}
|
||||
|
||||
func TestProcessSlot_GloasAvailabilityUpdateError(t *testing.T) {
|
||||
slot := primitives.Slot(7)
|
||||
availability := make([]byte, 1)
|
||||
st := newGloasState(t, slot, availability)
|
||||
|
||||
_, err := ProcessSlot(context.Background(), st)
|
||||
cfg := params.BeaconConfig()
|
||||
idx := uint64((slot + 1) % cfg.SlotsPerHistoricalRoot)
|
||||
byteIdx := idx / 8
|
||||
require.EqualError(t, err, fmt.Sprintf(
|
||||
"bit index %d (byte index %d) out of range for execution payload availability length %d",
|
||||
idx, byteIdx, len(availability),
|
||||
))
|
||||
}
|
||||
|
||||
func newGloasState(t *testing.T, slot primitives.Slot, availability []byte) state.BeaconState {
|
||||
t.Helper()
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
protoState := ðpb.BeaconStateGloas{
|
||||
Slot: slot,
|
||||
LatestBlockHeader: testBeaconBlockHeader(),
|
||||
BlockRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
|
||||
StateRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
|
||||
RandaoMixes: make([][]byte, fieldparams.RandaoMixesLength),
|
||||
ExecutionPayloadAvailability: availability,
|
||||
BuilderPendingPayments: make([]*ethpb.BuilderPendingPayment, int(cfg.SlotsPerEpoch*2)),
|
||||
LatestExecutionPayloadBid: ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: make([]byte, 32),
|
||||
ParentBlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
PrevRandao: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitmentsRoot: make([]byte, 32),
|
||||
},
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
PreviousEpochParticipation: []byte{},
|
||||
CurrentEpochParticipation: []byte{},
|
||||
JustificationBits: []byte{0},
|
||||
PreviousJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
CurrentSyncCommittee: ðpb.SyncCommittee{},
|
||||
NextSyncCommittee: ðpb.SyncCommittee{},
|
||||
}
|
||||
|
||||
for i := range protoState.BlockRoots {
|
||||
protoState.BlockRoots[i] = make([]byte, 32)
|
||||
}
|
||||
for i := range protoState.StateRoots {
|
||||
protoState.StateRoots[i] = make([]byte, 32)
|
||||
}
|
||||
for i := range protoState.RandaoMixes {
|
||||
protoState.RandaoMixes[i] = make([]byte, 32)
|
||||
}
|
||||
|
||||
for i := range protoState.BuilderPendingPayments {
|
||||
protoState.BuilderPendingPayments[i] = ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pubkeys := make([][]byte, cfg.SyncCommitteeSize)
|
||||
for i := range pubkeys {
|
||||
pubkeys[i] = make([]byte, fieldparams.BLSPubkeyLength)
|
||||
}
|
||||
aggPubkey := make([]byte, fieldparams.BLSPubkeyLength)
|
||||
protoState.CurrentSyncCommittee = ðpb.SyncCommittee{
|
||||
Pubkeys: pubkeys,
|
||||
AggregatePubkey: aggPubkey,
|
||||
}
|
||||
protoState.NextSyncCommittee = ðpb.SyncCommittee{
|
||||
Pubkeys: pubkeys,
|
||||
AggregatePubkey: aggPubkey,
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoGloas(protoState)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, version.Gloas, st.Version())
|
||||
return st
|
||||
}
|
||||
|
||||
func testBeaconBlockHeader() *ethpb.BeaconBlockHeader {
|
||||
return ðpb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
@@ -512,6 +512,11 @@ func (dcs *DataColumnStorage) Get(root [fieldparams.RootLength]byte, indices []u
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidecars file path open")
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := file.Close(); closeErr != nil {
|
||||
log.WithError(closeErr).WithField("file", filePath).Error("Error closing file during Get")
|
||||
}
|
||||
}()
|
||||
|
||||
// Read file metadata.
|
||||
metadata, err := dcs.metadata(file)
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api"
|
||||
@@ -26,7 +25,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v7/network/httputil"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
@@ -1044,112 +1042,27 @@ func (s *Server) GetBlockRoot(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetBlockRoot")
|
||||
defer span.End()
|
||||
|
||||
var err error
|
||||
var root []byte
|
||||
blockID := r.PathValue("block_id")
|
||||
if blockID == "" {
|
||||
httputil.HandleError(w, "block_id is required in URL params", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
switch blockID {
|
||||
case "head":
|
||||
root, err = s.ChainInfoFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not retrieve head root: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if root == nil {
|
||||
httputil.HandleError(w, "No head root was found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
case "finalized":
|
||||
finalized := s.ChainInfoFetcher.FinalizedCheckpt()
|
||||
root = finalized.Root
|
||||
case "genesis":
|
||||
blk, err := s.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not retrieve genesis block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
httputil.HandleError(w, "Could not find genesis block: "+err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
blkRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not hash genesis block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
root = blkRoot[:]
|
||||
default:
|
||||
isHex := strings.HasPrefix(blockID, "0x")
|
||||
if isHex {
|
||||
blockIDBytes, err := hexutil.Decode(blockID)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not decode block ID into bytes: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if len(blockIDBytes) != fieldparams.RootLength {
|
||||
httputil.HandleError(w, fmt.Sprintf("Block ID has length %d instead of %d", len(blockIDBytes), fieldparams.RootLength), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
blockID32 := bytesutil.ToBytes32(blockIDBytes)
|
||||
blk, err := s.BeaconDB.Block(ctx, blockID32)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not retrieve block for block root %#x: %v", blockID, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
httputil.HandleError(w, "Could not find block: "+err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
root = blockIDBytes
|
||||
} else {
|
||||
slot, err := strconv.ParseUint(blockID, 10, 64)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not parse block ID: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
hasRoots, roots, err := s.BeaconDB.BlockRootsBySlot(ctx, primitives.Slot(slot))
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not retrieve blocks for slot %d: %v", slot, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if !hasRoots {
|
||||
httputil.HandleError(w, "Could not find any blocks with given slot", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
root = roots[0][:]
|
||||
if len(roots) == 1 {
|
||||
break
|
||||
}
|
||||
for _, blockRoot := range roots {
|
||||
canonical, err := s.ChainInfoFetcher.IsCanonical(ctx, blockRoot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not determine if block root is canonical: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if canonical {
|
||||
root = blockRoot[:]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
root, err := s.Blocker.BlockRoot(ctx, []byte(blockID))
|
||||
if !shared.WriteBlockRootFetchError(w, err) {
|
||||
return
|
||||
}
|
||||
|
||||
b32Root := bytesutil.ToBytes32(root)
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, b32Root)
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check if block is optimistic: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
response := &structs.BlockRootResponse{
|
||||
Data: &structs.BlockRoot{
|
||||
Root: hexutil.Encode(root),
|
||||
Root: hexutil.Encode(root[:]),
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Finalized: s.FinalizationFetcher.IsFinalized(ctx, b32Root),
|
||||
Finalized: s.FinalizationFetcher.IsFinalized(ctx, root),
|
||||
}
|
||||
httputil.WriteJson(w, response)
|
||||
}
|
||||
|
||||
@@ -2509,6 +2509,10 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
HeadFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
FinalizationFetcher: mockChainFetcher,
|
||||
Blocker: &lookup.BeaconDbBlocker{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
},
|
||||
}
|
||||
|
||||
root, err := genBlk.Block.HashTreeRoot()
|
||||
@@ -2524,7 +2528,7 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
{
|
||||
name: "bad formatting",
|
||||
blockID: map[string]string{"block_id": "3bad0"},
|
||||
wantErr: "Could not parse block ID",
|
||||
wantErr: "Invalid block ID",
|
||||
wantCode: http.StatusBadRequest,
|
||||
},
|
||||
{
|
||||
@@ -2572,7 +2576,7 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
{
|
||||
name: "non-existent root",
|
||||
blockID: map[string]string{"block_id": hexutil.Encode(bytesutil.PadTo([]byte("hi there"), 32))},
|
||||
wantErr: "Could not find block",
|
||||
wantErr: "Block not found",
|
||||
wantCode: http.StatusNotFound,
|
||||
},
|
||||
{
|
||||
@@ -2585,7 +2589,7 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
{
|
||||
name: "no block",
|
||||
blockID: map[string]string{"block_id": "105"},
|
||||
wantErr: "Could not find any blocks with given slot",
|
||||
wantErr: "Block not found",
|
||||
wantCode: http.StatusNotFound,
|
||||
},
|
||||
}
|
||||
@@ -2633,6 +2637,10 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
HeadFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
FinalizationFetcher: mockChainFetcher,
|
||||
Blocker: &lookup.BeaconDbBlocker{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
@@ -2668,6 +2676,10 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
HeadFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
FinalizationFetcher: mockChainFetcher,
|
||||
Blocker: &lookup.BeaconDbBlocker{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
},
|
||||
}
|
||||
t.Run("true", func(t *testing.T) {
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v7/network/httputil"
|
||||
@@ -181,6 +182,16 @@ func prepareConfigSpec() (map[string]any, error) {
|
||||
data[tag] = convertValueForJSON(val, tag)
|
||||
}
|
||||
|
||||
// Add Fulu preset values. These are compile-time constants from fieldparams,
|
||||
// not runtime configs, but are required by the /eth/v1/config/spec API.
|
||||
data["NUMBER_OF_COLUMNS"] = convertValueForJSON(reflect.ValueOf(uint64(fieldparams.NumberOfColumns)), "NUMBER_OF_COLUMNS")
|
||||
data["CELLS_PER_EXT_BLOB"] = convertValueForJSON(reflect.ValueOf(uint64(fieldparams.NumberOfColumns)), "CELLS_PER_EXT_BLOB")
|
||||
data["FIELD_ELEMENTS_PER_CELL"] = convertValueForJSON(reflect.ValueOf(uint64(fieldparams.CellsPerBlob)), "FIELD_ELEMENTS_PER_CELL")
|
||||
data["FIELD_ELEMENTS_PER_EXT_BLOB"] = convertValueForJSON(reflect.ValueOf(config.FieldElementsPerBlob*2), "FIELD_ELEMENTS_PER_EXT_BLOB")
|
||||
data["KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH"] = convertValueForJSON(reflect.ValueOf(uint64(4)), "KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH")
|
||||
// UPDATE_TIMEOUT is derived from SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
data["UPDATE_TIMEOUT"] = convertValueForJSON(reflect.ValueOf(uint64(config.SlotsPerEpoch)*uint64(config.EpochsPerSyncCommitteePeriod)), "UPDATE_TIMEOUT")
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -132,7 +132,7 @@ func TestGetSpec(t *testing.T) {
|
||||
config.MinSyncCommitteeParticipants = 71
|
||||
config.ProposerReorgCutoffBPS = primitives.BP(121)
|
||||
config.AttestationDueBPS = primitives.BP(122)
|
||||
config.AggregrateDueBPS = primitives.BP(123)
|
||||
config.AggregateDueBPS = primitives.BP(123)
|
||||
config.ContributionDueBPS = primitives.BP(124)
|
||||
config.TerminalBlockHash = common.HexToHash("TerminalBlockHash")
|
||||
config.TerminalBlockHashActivationEpoch = 72
|
||||
@@ -170,6 +170,8 @@ func TestGetSpec(t *testing.T) {
|
||||
config.SyncMessageDueBPS = 103
|
||||
config.BuilderWithdrawalPrefixByte = byte('b')
|
||||
config.BuilderIndexSelfBuild = primitives.BuilderIndex(125)
|
||||
config.BuilderPaymentThresholdNumerator = 104
|
||||
config.BuilderPaymentThresholdDenominator = 105
|
||||
|
||||
var dbp [4]byte
|
||||
copy(dbp[:], []byte{'0', '0', '0', '1'})
|
||||
@@ -210,7 +212,7 @@ func TestGetSpec(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||
data, ok := resp.Data.(map[string]any)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, 178, len(data))
|
||||
assert.Equal(t, 186, len(data))
|
||||
for k, v := range data {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
switch k {
|
||||
@@ -468,7 +470,7 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "121", v)
|
||||
case "ATTESTATION_DUE_BPS":
|
||||
assert.Equal(t, "122", v)
|
||||
case "AGGREGRATE_DUE_BPS":
|
||||
case "AGGREGATE_DUE_BPS":
|
||||
assert.Equal(t, "123", v)
|
||||
case "CONTRIBUTION_DUE_BPS":
|
||||
assert.Equal(t, "124", v)
|
||||
@@ -588,10 +590,26 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "102", v)
|
||||
case "SYNC_MESSAGE_DUE_BPS":
|
||||
assert.Equal(t, "103", v)
|
||||
case "BUILDER_PAYMENT_THRESHOLD_NUMERATOR":
|
||||
assert.Equal(t, "104", v)
|
||||
case "BUILDER_PAYMENT_THRESHOLD_DENOMINATOR":
|
||||
assert.Equal(t, "105", v)
|
||||
case "BLOB_SCHEDULE":
|
||||
blobSchedule, ok := v.([]any)
|
||||
assert.Equal(t, true, ok)
|
||||
assert.Equal(t, 2, len(blobSchedule))
|
||||
case "FIELD_ELEMENTS_PER_CELL":
|
||||
assert.Equal(t, "64", v) // From fieldparams.CellsPerBlob
|
||||
case "FIELD_ELEMENTS_PER_EXT_BLOB":
|
||||
assert.Equal(t, "198", v) // FieldElementsPerBlob (99) * 2
|
||||
case "KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH":
|
||||
assert.Equal(t, "4", v) // Preset value
|
||||
case "CELLS_PER_EXT_BLOB":
|
||||
assert.Equal(t, "128", v) // From fieldparams.NumberOfColumns
|
||||
case "NUMBER_OF_COLUMNS":
|
||||
assert.Equal(t, "128", v) // From fieldparams.NumberOfColumns
|
||||
case "UPDATE_TIMEOUT":
|
||||
assert.Equal(t, "1782", v) // SlotsPerEpoch (27) * EpochsPerSyncCommitteePeriod (66)
|
||||
default:
|
||||
t.Errorf("Incorrect key: %s", k)
|
||||
}
|
||||
|
||||
@@ -26,21 +26,30 @@ func WriteStateFetchError(w http.ResponseWriter, err error) {
|
||||
httputil.HandleError(w, "Could not get state: "+err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
// WriteBlockFetchError writes an appropriate error based on the supplied argument.
|
||||
// The argument error should be a result of fetching block.
|
||||
func WriteBlockFetchError(w http.ResponseWriter, blk interfaces.ReadOnlySignedBeaconBlock, err error) bool {
|
||||
// writeBlockIdError handles common block ID lookup errors.
|
||||
// Returns true if an error was handled and written to the response, false if no error.
|
||||
func writeBlockIdError(w http.ResponseWriter, err error, fallbackMsg string) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
var blockNotFoundErr *lookup.BlockNotFoundError
|
||||
if errors.As(err, &blockNotFoundErr) {
|
||||
httputil.HandleError(w, "Block not found: "+blockNotFoundErr.Error(), http.StatusNotFound)
|
||||
return false
|
||||
return true
|
||||
}
|
||||
var invalidBlockIdErr *lookup.BlockIdParseError
|
||||
if errors.As(err, &invalidBlockIdErr) {
|
||||
httputil.HandleError(w, "Invalid block ID: "+invalidBlockIdErr.Error(), http.StatusBadRequest)
|
||||
return false
|
||||
return true
|
||||
}
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get block from block ID: "+err.Error(), http.StatusInternalServerError)
|
||||
httputil.HandleError(w, fallbackMsg+err.Error(), http.StatusInternalServerError)
|
||||
return true
|
||||
}
|
||||
|
||||
// WriteBlockFetchError writes an appropriate error based on the supplied argument.
|
||||
// The argument error should be a result of fetching block.
|
||||
func WriteBlockFetchError(w http.ResponseWriter, blk interfaces.ReadOnlySignedBeaconBlock, err error) bool {
|
||||
if writeBlockIdError(w, err, "Could not get block from block ID: ") {
|
||||
return false
|
||||
}
|
||||
if err = blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
@@ -49,3 +58,10 @@ func WriteBlockFetchError(w http.ResponseWriter, blk interfaces.ReadOnlySignedBe
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// WriteBlockRootFetchError writes an appropriate error based on the supplied argument.
|
||||
// The argument error should be a result of fetching block root.
|
||||
// Returns true if no error occurred, false otherwise.
|
||||
func WriteBlockRootFetchError(w http.ResponseWriter, err error) bool {
|
||||
return !writeBlockIdError(w, err, "Could not get block root from block ID: ")
|
||||
}
|
||||
|
||||
@@ -105,3 +105,59 @@ func TestWriteBlockFetchError(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestWriteBlockRootFetchError tests the WriteBlockRootFetchError function
|
||||
// to ensure that the correct error message and code are written to the response
|
||||
// and that the function returns the correct boolean value.
|
||||
func TestWriteBlockRootFetchError(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
err error
|
||||
expectedMessage string
|
||||
expectedCode int
|
||||
expectedReturn bool
|
||||
}{
|
||||
{
|
||||
name: "Nil error should return true",
|
||||
err: nil,
|
||||
expectedReturn: true,
|
||||
},
|
||||
{
|
||||
name: "BlockNotFoundError should return 404",
|
||||
err: lookup.NewBlockNotFoundError("block not found at slot 123"),
|
||||
expectedMessage: "Block not found",
|
||||
expectedCode: http.StatusNotFound,
|
||||
expectedReturn: false,
|
||||
},
|
||||
{
|
||||
name: "BlockIdParseError should return 400",
|
||||
err: &lookup.BlockIdParseError{},
|
||||
expectedMessage: "Invalid block ID",
|
||||
expectedCode: http.StatusBadRequest,
|
||||
expectedReturn: false,
|
||||
},
|
||||
{
|
||||
name: "Generic error should return 500",
|
||||
err: errors.New("database connection failed"),
|
||||
expectedMessage: "Could not get block root from block ID",
|
||||
expectedCode: http.StatusInternalServerError,
|
||||
expectedReturn: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
writer := httptest.NewRecorder()
|
||||
result := WriteBlockRootFetchError(writer, c.err)
|
||||
|
||||
assert.Equal(t, c.expectedReturn, result, "incorrect return value")
|
||||
if !c.expectedReturn {
|
||||
assert.Equal(t, c.expectedCode, writer.Code, "incorrect status code")
|
||||
assert.StringContains(t, c.expectedMessage, writer.Body.String(), "incorrect error message")
|
||||
|
||||
e := &httputil.DefaultJsonError{}
|
||||
assert.NoError(t, json.Unmarshal(writer.Body.Bytes(), e), "failed to unmarshal response")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,6 +60,7 @@ func (e BlockIdParseError) Error() string {
|
||||
// Blocker is responsible for retrieving blocks.
|
||||
type Blocker interface {
|
||||
Block(ctx context.Context, id []byte) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
BlockRoot(ctx context.Context, id []byte) ([fieldparams.RootLength]byte, error)
|
||||
BlobSidecars(ctx context.Context, id string, opts ...options.BlobsOption) ([]*blocks.VerifiedROBlob, *core.RpcError)
|
||||
Blobs(ctx context.Context, id string, opts ...options.BlobsOption) ([][]byte, *core.RpcError)
|
||||
DataColumns(ctx context.Context, id string, indices []int) ([]blocks.VerifiedRODataColumn, *core.RpcError)
|
||||
@@ -225,6 +226,19 @@ func (p *BeaconDbBlocker) Block(ctx context.Context, id []byte) (interfaces.Read
|
||||
return blk, nil
|
||||
}
|
||||
|
||||
// BlockRoot returns the block root for a given identifier. The identifier can be one of:
|
||||
// - "head" (canonical head in node's view)
|
||||
// - "genesis"
|
||||
// - "finalized"
|
||||
// - "justified"
|
||||
// - <slot>
|
||||
// - <hex encoded block root with '0x' prefix>
|
||||
// - <block root>
|
||||
func (p *BeaconDbBlocker) BlockRoot(ctx context.Context, id []byte) ([fieldparams.RootLength]byte, error) {
|
||||
root, _, err := p.resolveBlockID(ctx, string(id))
|
||||
return root, err
|
||||
}
|
||||
|
||||
// blobsContext holds common information needed for blob retrieval
|
||||
type blobsContext struct {
|
||||
root [fieldparams.RootLength]byte
|
||||
|
||||
@@ -168,6 +168,111 @@ func TestGetBlock(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockRoot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
|
||||
genBlk, blkContainers := testutil.FillDBWithBlocks(ctx, t, beaconDB)
|
||||
canonicalRoots := make(map[[32]byte]bool)
|
||||
|
||||
for _, bContr := range blkContainers {
|
||||
canonicalRoots[bytesutil.ToBytes32(bContr.BlockRoot)] = true
|
||||
}
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
|
||||
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpb.BeaconBlockContainer_Phase0Block).Phase0Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
fetcher := &BeaconDbBlocker{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: &mockChain.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: wsb,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
CurrentJustifiedCheckPoint: ðpb.Checkpoint{Root: blkContainers[32].BlockRoot},
|
||||
CanonicalRoots: canonicalRoots,
|
||||
},
|
||||
}
|
||||
|
||||
genesisRoot, err := genBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blockID []byte
|
||||
want [32]byte
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "slot",
|
||||
blockID: []byte("30"),
|
||||
want: bytesutil.ToBytes32(blkContainers[30].BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "bad formatting",
|
||||
blockID: []byte("3bad0"),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "head",
|
||||
blockID: []byte("head"),
|
||||
want: bytesutil.ToBytes32(headBlock.BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "finalized",
|
||||
blockID: []byte("finalized"),
|
||||
want: bytesutil.ToBytes32(blkContainers[64].BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "justified",
|
||||
blockID: []byte("justified"),
|
||||
want: bytesutil.ToBytes32(blkContainers[32].BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "genesis",
|
||||
blockID: []byte("genesis"),
|
||||
want: genesisRoot,
|
||||
},
|
||||
{
|
||||
name: "genesis root",
|
||||
blockID: genesisRoot[:],
|
||||
want: genesisRoot,
|
||||
},
|
||||
{
|
||||
name: "root",
|
||||
blockID: blkContainers[20].BlockRoot,
|
||||
want: bytesutil.ToBytes32(blkContainers[20].BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "hex root",
|
||||
blockID: []byte(hexutil.Encode(blkContainers[20].BlockRoot)),
|
||||
want: bytesutil.ToBytes32(blkContainers[20].BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "non-existent root",
|
||||
blockID: bytesutil.PadTo([]byte("hi there"), 32),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no block at slot",
|
||||
blockID: []byte("105"),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := fetcher.BlockRoot(ctx, tt.blockID)
|
||||
if tt.wantErr {
|
||||
assert.NotEqual(t, err, nil, "no error has been returned")
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, tt.want, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlobsErrorHandling(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
// MockBlocker is a fake implementation of lookup.Blocker.
|
||||
type MockBlocker struct {
|
||||
BlockToReturn interfaces.ReadOnlySignedBeaconBlock
|
||||
RootToReturn [32]byte
|
||||
ErrorToReturn error
|
||||
SlotBlockMap map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock
|
||||
RootBlockMap map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
|
||||
@@ -39,6 +40,14 @@ func (m *MockBlocker) Block(_ context.Context, b []byte) (interfaces.ReadOnlySig
|
||||
return m.SlotBlockMap[primitives.Slot(slotNumber)], nil
|
||||
}
|
||||
|
||||
// BlockRoot --
|
||||
func (m *MockBlocker) BlockRoot(_ context.Context, _ []byte) ([32]byte, error) {
|
||||
if m.ErrorToReturn != nil {
|
||||
return [32]byte{}, m.ErrorToReturn
|
||||
}
|
||||
return m.RootToReturn, nil
|
||||
}
|
||||
|
||||
// BlobSidecars --
|
||||
func (*MockBlocker) BlobSidecars(_ context.Context, _ string, _ ...options.BlobsOption) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
return nil, &core.RpcError{}
|
||||
|
||||
@@ -9,6 +9,10 @@ import (
|
||||
type writeOnlyGloasFields interface {
|
||||
SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid) error
|
||||
SetBuilderPendingPayment(index primitives.Slot, payment *ethpb.BuilderPendingPayment) error
|
||||
ClearBuilderPendingPayment(index primitives.Slot) error
|
||||
RotateBuilderPendingPayments() error
|
||||
AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal) error
|
||||
UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val byte) error
|
||||
}
|
||||
|
||||
type readOnlyGloasFields interface {
|
||||
@@ -16,4 +20,5 @@ type readOnlyGloasFields interface {
|
||||
IsActiveBuilder(primitives.BuilderIndex) (bool, error)
|
||||
CanBuilderCoverBid(primitives.BuilderIndex, primitives.Gwei) (bool, error)
|
||||
LatestBlockHash() ([32]byte, error)
|
||||
BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error)
|
||||
}
|
||||
|
||||
@@ -135,3 +135,15 @@ func (b *BeaconState) builderPendingBalanceToWithdraw(builderIndex primitives.Bu
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
// BuilderPendingPayments returns a copy of the builder pending payments.
|
||||
func (b *BeaconState) BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error) {
|
||||
if b.version < version.Gloas {
|
||||
return nil, errNotSupported("BuilderPendingPayments", b.version)
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
return b.builderPendingPaymentsVal(), nil
|
||||
}
|
||||
|
||||
@@ -157,3 +157,12 @@ func TestBuilderHelpers(t *testing.T) {
|
||||
require.Equal(t, false, ok)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuilderPendingPayments_UnsupportedVersion(t *testing.T) {
|
||||
stIface, err := state_native.InitializeFromProtoElectra(ðpb.BeaconStateElectra{})
|
||||
require.NoError(t, err)
|
||||
st := stIface.(*state_native.BeaconState)
|
||||
|
||||
_, err = st.BuilderPendingPayments()
|
||||
require.ErrorContains(t, "BuilderPendingPayments", err)
|
||||
}
|
||||
|
||||
@@ -725,3 +725,13 @@ func ProtobufBeaconStateFulu(s any) (*ethpb.BeaconStateFulu, error) {
|
||||
}
|
||||
return pbState, nil
|
||||
}
|
||||
|
||||
// ProtobufBeaconStateGloas transforms an input into beacon state Gloas in the form of protobuf.
|
||||
// Error is returned if the input is not type protobuf beacon state.
|
||||
func ProtobufBeaconStateGloas(s any) (*ethpb.BeaconStateGloas, error) {
|
||||
pbState, ok := s.(*ethpb.BeaconStateGloas)
|
||||
if !ok {
|
||||
return nil, errors.New("input is not type pb.BeaconStateGloas")
|
||||
}
|
||||
return pbState, nil
|
||||
}
|
||||
|
||||
@@ -4,12 +4,71 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
)
|
||||
|
||||
// RotateBuilderPendingPayments rotates the queue by dropping slots per epoch payments from the
|
||||
// front and appending slots per epoch empty payments to the end.
|
||||
// This implements: state.builder_pending_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:] + [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
|
||||
func (b *BeaconState) RotateBuilderPendingPayments() error {
|
||||
if b.version < version.Gloas {
|
||||
return errNotSupported("RotateBuilderPendingPayments", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
copy(b.builderPendingPayments[:slotsPerEpoch], b.builderPendingPayments[slotsPerEpoch:2*slotsPerEpoch])
|
||||
|
||||
for i := slotsPerEpoch; i < primitives.Slot(len(b.builderPendingPayments)); i++ {
|
||||
b.builderPendingPayments[i] = emptyBuilderPendingPayment
|
||||
}
|
||||
|
||||
b.markFieldAsDirty(types.BuilderPendingPayments)
|
||||
b.rebuildTrie[types.BuilderPendingPayments] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// emptyBuilderPendingPayment is a shared zero-value payment used to clear entries.
|
||||
var emptyBuilderPendingPayment = ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
},
|
||||
}
|
||||
|
||||
// AppendBuilderPendingWithdrawals appends builder pending withdrawals to the beacon state.
|
||||
// If the withdrawals slice is shared, it copies the slice first to preserve references.
|
||||
func (b *BeaconState) AppendBuilderPendingWithdrawals(withdrawals []*ethpb.BuilderPendingWithdrawal) error {
|
||||
if b.version < version.Gloas {
|
||||
return errNotSupported("AppendBuilderPendingWithdrawals", b.version)
|
||||
}
|
||||
|
||||
if len(withdrawals) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
pendingWithdrawals := b.builderPendingWithdrawals
|
||||
if b.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs() > 1 {
|
||||
pendingWithdrawals = make([]*ethpb.BuilderPendingWithdrawal, 0, len(b.builderPendingWithdrawals)+len(withdrawals))
|
||||
pendingWithdrawals = append(pendingWithdrawals, b.builderPendingWithdrawals...)
|
||||
b.sharedFieldReferences[types.BuilderPendingWithdrawals].MinusRef()
|
||||
b.sharedFieldReferences[types.BuilderPendingWithdrawals] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
b.builderPendingWithdrawals = append(pendingWithdrawals, withdrawals...)
|
||||
b.markFieldAsDirty(types.BuilderPendingWithdrawals)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetExecutionPayloadBid sets the latest execution payload bid in the state.
|
||||
func (b *BeaconState) SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid) error {
|
||||
if b.version < version.Gloas {
|
||||
@@ -43,6 +102,25 @@ func (b *BeaconState) SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearBuilderPendingPayment clears a builder pending payment at the specified index.
|
||||
func (b *BeaconState) ClearBuilderPendingPayment(index primitives.Slot) error {
|
||||
if b.version < version.Gloas {
|
||||
return errNotSupported("ClearBuilderPendingPayment", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if uint64(index) >= uint64(len(b.builderPendingPayments)) {
|
||||
return fmt.Errorf("builder pending payments index %d out of range (len=%d)", index, len(b.builderPendingPayments))
|
||||
}
|
||||
|
||||
b.builderPendingPayments[index] = emptyBuilderPendingPayment
|
||||
|
||||
b.markFieldAsDirty(types.BuilderPendingPayments)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetBuilderPendingPayment sets a builder pending payment at the specified index.
|
||||
func (b *BeaconState) SetBuilderPendingPayment(index primitives.Slot, payment *ethpb.BuilderPendingPayment) error {
|
||||
if b.version < version.Gloas {
|
||||
@@ -61,3 +139,25 @@ func (b *BeaconState) SetBuilderPendingPayment(index primitives.Slot, payment *e
|
||||
b.markFieldAsDirty(types.BuilderPendingPayments)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateExecutionPayloadAvailabilityAtIndex updates the execution payload availability bit at a specific index.
|
||||
func (b *BeaconState) UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val byte) error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
byteIndex := idx / 8
|
||||
bitIndex := idx % 8
|
||||
|
||||
if byteIndex >= uint64(len(b.executionPayloadAvailability)) {
|
||||
return fmt.Errorf("bit index %d (byte index %d) out of range for execution payload availability length %d", idx, byteIndex, len(b.executionPayloadAvailability))
|
||||
}
|
||||
|
||||
if val != 0 {
|
||||
b.executionPayloadAvailability[byteIndex] |= (1 << bitIndex)
|
||||
} else {
|
||||
b.executionPayloadAvailability[byteIndex] &^= (1 << bitIndex)
|
||||
}
|
||||
|
||||
b.markFieldAsDirty(types.ExecutionPayloadAvailability)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
@@ -138,3 +140,191 @@ func TestSetBuilderPendingPayment(t *testing.T) {
|
||||
require.Equal(t, false, st.dirtyFields[types.BuilderPendingPayments])
|
||||
})
|
||||
}
|
||||
|
||||
func TestClearBuilderPendingPayment(t *testing.T) {
|
||||
t.Run("previous fork returns expected error", func(t *testing.T) {
|
||||
st := &BeaconState{version: version.Fulu}
|
||||
err := st.ClearBuilderPendingPayment(0)
|
||||
require.ErrorContains(t, "is not supported", err)
|
||||
})
|
||||
|
||||
t.Run("clears and marks dirty", func(t *testing.T) {
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
builderPendingPayments: make([]*ethpb.BuilderPendingPayment, 2),
|
||||
}
|
||||
st.builderPendingPayments[1] = ðpb.BuilderPendingPayment{
|
||||
Weight: 2,
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
Amount: 99,
|
||||
BuilderIndex: 1,
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, st.ClearBuilderPendingPayment(1))
|
||||
require.Equal(t, emptyBuilderPendingPayment, st.builderPendingPayments[1])
|
||||
require.Equal(t, true, st.dirtyFields[types.BuilderPendingPayments])
|
||||
})
|
||||
|
||||
t.Run("returns error on out of range index", func(t *testing.T) {
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
builderPendingPayments: make([]*ethpb.BuilderPendingPayment, 1),
|
||||
}
|
||||
|
||||
err := st.ClearBuilderPendingPayment(2)
|
||||
|
||||
require.ErrorContains(t, "out of range", err)
|
||||
require.Equal(t, false, st.dirtyFields[types.BuilderPendingPayments])
|
||||
})
|
||||
}
|
||||
|
||||
func TestRotateBuilderPendingPayments(t *testing.T) {
|
||||
totalPayments := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
payments := make([]*ethpb.BuilderPendingPayment, totalPayments)
|
||||
for i := range payments {
|
||||
idx := uint64(i)
|
||||
payments[i] = ðpb.BuilderPendingPayment{
|
||||
Weight: primitives.Gwei(idx * 100e9),
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
Amount: primitives.Gwei(idx * 1e9),
|
||||
BuilderIndex: primitives.BuilderIndex(idx + 100),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
statePb, err := InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{
|
||||
BuilderPendingPayments: payments,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
st, ok := statePb.(*BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
oldPayments, err := st.BuilderPendingPayments()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.RotateBuilderPendingPayments())
|
||||
|
||||
newPayments, err := st.BuilderPendingPayments()
|
||||
require.NoError(t, err)
|
||||
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := range slotsPerEpoch {
|
||||
require.DeepEqual(t, oldPayments[slotsPerEpoch+i], newPayments[i])
|
||||
}
|
||||
|
||||
for i := slotsPerEpoch; i < 2*slotsPerEpoch; i++ {
|
||||
payment := newPayments[i]
|
||||
require.Equal(t, primitives.Gwei(0), payment.Weight)
|
||||
require.Equal(t, 20, len(payment.Withdrawal.FeeRecipient))
|
||||
require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
|
||||
require.Equal(t, primitives.BuilderIndex(0), payment.Withdrawal.BuilderIndex)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRotateBuilderPendingPayments_UnsupportedVersion(t *testing.T) {
|
||||
st := &BeaconState{version: version.Electra}
|
||||
err := st.RotateBuilderPendingPayments()
|
||||
require.ErrorContains(t, "RotateBuilderPendingPayments", err)
|
||||
}
|
||||
|
||||
func TestAppendBuilderPendingWithdrawal_CopyOnWrite(t *testing.T) {
|
||||
wd := ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
Amount: 1,
|
||||
BuilderIndex: 2,
|
||||
}
|
||||
statePb, err := InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{
|
||||
BuilderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{wd},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
st, ok := statePb.(*BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
copied := st.Copy().(*BeaconState)
|
||||
require.Equal(t, uint(2), st.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs())
|
||||
|
||||
appended := ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
Amount: 4,
|
||||
BuilderIndex: 5,
|
||||
}
|
||||
require.NoError(t, copied.AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal{appended}))
|
||||
|
||||
require.Equal(t, 1, len(st.builderPendingWithdrawals))
|
||||
require.Equal(t, 2, len(copied.builderPendingWithdrawals))
|
||||
require.DeepEqual(t, wd, copied.builderPendingWithdrawals[0])
|
||||
require.DeepEqual(t, appended, copied.builderPendingWithdrawals[1])
|
||||
require.DeepEqual(t, wd, st.builderPendingWithdrawals[0])
|
||||
require.Equal(t, uint(1), st.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs())
|
||||
require.Equal(t, uint(1), copied.sharedFieldReferences[types.BuilderPendingWithdrawals].Refs())
|
||||
}
|
||||
|
||||
func TestAppendBuilderPendingWithdrawals(t *testing.T) {
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
sharedFieldReferences: map[types.FieldIndex]*stateutil.Reference{
|
||||
types.BuilderPendingWithdrawals: stateutil.NewRef(1),
|
||||
},
|
||||
builderPendingWithdrawals: make([]*ethpb.BuilderPendingWithdrawal, 0),
|
||||
}
|
||||
|
||||
first := ðpb.BuilderPendingWithdrawal{Amount: 1}
|
||||
second := ðpb.BuilderPendingWithdrawal{Amount: 2}
|
||||
require.NoError(t, st.AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal{first, second}))
|
||||
|
||||
require.Equal(t, 2, len(st.builderPendingWithdrawals))
|
||||
require.DeepEqual(t, first, st.builderPendingWithdrawals[0])
|
||||
require.DeepEqual(t, second, st.builderPendingWithdrawals[1])
|
||||
require.Equal(t, true, st.dirtyFields[types.BuilderPendingWithdrawals])
|
||||
}
|
||||
|
||||
func TestAppendBuilderPendingWithdrawals_UnsupportedVersion(t *testing.T) {
|
||||
st := &BeaconState{version: version.Electra}
|
||||
err := st.AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal{{}})
|
||||
require.ErrorContains(t, "AppendBuilderPendingWithdrawals", err)
|
||||
}
|
||||
|
||||
func TestUpdateExecutionPayloadAvailabilityAtIndex_SetAndClear(t *testing.T) {
|
||||
st := newGloasStateWithAvailability(t, make([]byte, 1024))
|
||||
|
||||
otherIdx := uint64(8) // byte 1, bit 0
|
||||
idx := uint64(9) // byte 1, bit 1
|
||||
|
||||
require.NoError(t, st.UpdateExecutionPayloadAvailabilityAtIndex(otherIdx, 1))
|
||||
require.Equal(t, byte(0x01), st.executionPayloadAvailability[1])
|
||||
|
||||
require.NoError(t, st.UpdateExecutionPayloadAvailabilityAtIndex(idx, 1))
|
||||
require.Equal(t, byte(0x03), st.executionPayloadAvailability[1])
|
||||
|
||||
require.NoError(t, st.UpdateExecutionPayloadAvailabilityAtIndex(idx, 0))
|
||||
require.Equal(t, byte(0x01), st.executionPayloadAvailability[1])
|
||||
}
|
||||
|
||||
func TestUpdateExecutionPayloadAvailabilityAtIndex_OutOfRange(t *testing.T) {
|
||||
st := newGloasStateWithAvailability(t, make([]byte, 1024))
|
||||
|
||||
idx := uint64(len(st.executionPayloadAvailability)) * 8
|
||||
err := st.UpdateExecutionPayloadAvailabilityAtIndex(idx, 1)
|
||||
require.ErrorContains(t, "out of range", err)
|
||||
|
||||
for _, b := range st.executionPayloadAvailability {
|
||||
if b != 0 {
|
||||
t.Fatalf("execution payload availability mutated on error")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newGloasStateWithAvailability(t *testing.T, availability []byte) *BeaconState {
|
||||
t.Helper()
|
||||
|
||||
st, err := InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{
|
||||
ExecutionPayloadAvailability: availability,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
return st.(*BeaconState)
|
||||
}
|
||||
|
||||
@@ -4,9 +4,6 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
|
||||
@@ -56,32 +53,6 @@ func (s *Service) verifierRoutine() {
|
||||
}
|
||||
}
|
||||
|
||||
// A routine that runs in the background to perform batch
|
||||
// KZG verifications by draining the channel and processing all pending requests.
|
||||
func (s *Service) kzgVerifierRoutine() {
|
||||
for {
|
||||
kzgBatch := make([]*kzgVerifier, 0, 1)
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case kzg := <-s.kzgChan:
|
||||
kzgBatch = append(kzgBatch, kzg)
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case kzg := <-s.kzgChan:
|
||||
kzgBatch = append(kzgBatch, kzg)
|
||||
continue
|
||||
default:
|
||||
verifyKzgBatch(kzgBatch)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) validateWithBatchVerifier(ctx context.Context, message string, set *bls.SignatureBatch) (pubsub.ValidationResult, error) {
|
||||
_, span := trace.StartSpan(ctx, "sync.validateWithBatchVerifier")
|
||||
defer span.End()
|
||||
@@ -154,71 +125,3 @@ func performBatchAggregation(aggSet *bls.SignatureBatch) (*bls.SignatureBatch, e
|
||||
}
|
||||
return aggSet, nil
|
||||
}
|
||||
|
||||
func (s *Service) validateWithKzgBatchVerifier(ctx context.Context, dataColumns []blocks.RODataColumn) (pubsub.ValidationResult, error) {
|
||||
_, span := trace.StartSpan(ctx, "sync.validateWithKzgBatchVerifier")
|
||||
defer span.End()
|
||||
|
||||
timeout := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
|
||||
resChan := make(chan error, 1)
|
||||
verificationSet := &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
select {
|
||||
case s.kzgChan <- verificationSet:
|
||||
case <-ctx.Done():
|
||||
return pubsub.ValidationIgnore, ctx.Err()
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return pubsub.ValidationIgnore, ctx.Err() // parent context canceled, give up
|
||||
case err := <-resChan:
|
||||
if err != nil {
|
||||
log.WithError(err).Trace("Could not perform batch verification")
|
||||
tracing.AnnotateError(span, err)
|
||||
return s.validateUnbatchedColumnsKzg(ctx, dataColumns)
|
||||
}
|
||||
}
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
func (s *Service) validateUnbatchedColumnsKzg(ctx context.Context, columns []blocks.RODataColumn) (pubsub.ValidationResult, error) {
|
||||
_, span := trace.StartSpan(ctx, "sync.validateUnbatchedColumnsKzg")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
if err := peerdas.VerifyDataColumnsSidecarKZGProofs(columns); err != nil {
|
||||
err = errors.Wrap(err, "could not verify")
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
verification.DataColumnBatchKZGVerificationHistogram.WithLabelValues("fallback").Observe(float64(time.Since(start).Milliseconds()))
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
func verifyKzgBatch(kzgBatch []*kzgVerifier) {
|
||||
if len(kzgBatch) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
allDataColumns := make([]blocks.RODataColumn, 0, len(kzgBatch))
|
||||
for _, kzgVerifier := range kzgBatch {
|
||||
allDataColumns = append(allDataColumns, kzgVerifier.dataColumns...)
|
||||
}
|
||||
|
||||
var verificationErr error
|
||||
start := time.Now()
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(allDataColumns)
|
||||
if err != nil {
|
||||
verificationErr = errors.Wrap(err, "batch KZG verification failed")
|
||||
} else {
|
||||
verification.DataColumnBatchKZGVerificationHistogram.WithLabelValues("batch").Observe(float64(time.Since(start).Milliseconds()))
|
||||
}
|
||||
|
||||
// Send the same result to all verifiers in the batch
|
||||
for _, verifier := range kzgBatch {
|
||||
verifier.resChan <- verificationErr
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,339 +1,14 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
)
|
||||
|
||||
func TestValidateWithKzgBatchVerifier(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
dataColumns []blocks.RODataColumn
|
||||
expectedResult pubsub.ValidationResult
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "single valid data column",
|
||||
dataColumns: createValidTestDataColumns(t, 1),
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "multiple valid data columns",
|
||||
dataColumns: createValidTestDataColumns(t, 3),
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "single invalid data column",
|
||||
dataColumns: createInvalidTestDataColumns(t, 1),
|
||||
expectedResult: pubsub.ValidationReject,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "empty data column slice",
|
||||
dataColumns: []blocks.RODataColumn{},
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
result, err := service.validateWithKzgBatchVerifier(ctx, tt.dataColumns)
|
||||
|
||||
require.Equal(t, tt.expectedResult, result)
|
||||
if tt.expectError {
|
||||
assert.NotNil(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifierRoutine(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("processes single request", func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
dataColumns := createValidTestDataColumns(t, 1)
|
||||
resChan := make(chan error, 1)
|
||||
service.kzgChan <- &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
|
||||
|
||||
select {
|
||||
case err := <-resChan:
|
||||
require.NoError(t, err)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("timeout waiting for verification result")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("batches multiple requests", func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
const numRequests = 5
|
||||
resChans := make([]chan error, numRequests)
|
||||
|
||||
for i := range numRequests {
|
||||
dataColumns := createValidTestDataColumns(t, 1)
|
||||
resChan := make(chan error, 1)
|
||||
resChans[i] = resChan
|
||||
service.kzgChan <- &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
|
||||
}
|
||||
|
||||
for i := range numRequests {
|
||||
select {
|
||||
case err := <-resChans[i]:
|
||||
require.NoError(t, err)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("timeout waiting for verification result %d", i)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("context cancellation stops routine", func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
|
||||
routineDone := make(chan struct{})
|
||||
go func() {
|
||||
service.kzgVerifierRoutine()
|
||||
close(routineDone)
|
||||
}()
|
||||
|
||||
cancel()
|
||||
|
||||
select {
|
||||
case <-routineDone:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("timeout waiting for routine to exit")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestVerifyKzgBatch(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("all valid data columns succeed", func(t *testing.T) {
|
||||
dataColumns := createValidTestDataColumns(t, 3)
|
||||
resChan := make(chan error, 1)
|
||||
kzgVerifiers := []*kzgVerifier{{dataColumns: dataColumns, resChan: resChan}}
|
||||
|
||||
verifyKzgBatch(kzgVerifiers)
|
||||
|
||||
select {
|
||||
case err := <-resChan:
|
||||
require.NoError(t, err)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("timeout waiting for batch verification")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("invalid proofs fail entire batch", func(t *testing.T) {
|
||||
validColumns := createValidTestDataColumns(t, 1)
|
||||
invalidColumns := createInvalidTestDataColumns(t, 1)
|
||||
allColumns := append(validColumns, invalidColumns...)
|
||||
|
||||
resChan := make(chan error, 1)
|
||||
kzgVerifiers := []*kzgVerifier{{dataColumns: allColumns, resChan: resChan}}
|
||||
|
||||
verifyKzgBatch(kzgVerifiers)
|
||||
|
||||
select {
|
||||
case err := <-resChan:
|
||||
assert.NotNil(t, err)
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("timeout waiting for batch verification")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("empty batch handling", func(t *testing.T) {
|
||||
verifyKzgBatch([]*kzgVerifier{})
|
||||
})
|
||||
}
|
||||
|
||||
func TestKzgBatchVerifierConcurrency(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
const numGoroutines = 10
|
||||
const numRequestsPerGoroutine = 5
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(numGoroutines)
|
||||
|
||||
// Multiple goroutines sending verification requests simultaneously
|
||||
for i := range numGoroutines {
|
||||
go func(goroutineID int) {
|
||||
defer wg.Done()
|
||||
|
||||
for range numRequestsPerGoroutine {
|
||||
dataColumns := createValidTestDataColumns(t, 1)
|
||||
result, err := service.validateWithKzgBatchVerifier(ctx, dataColumns)
|
||||
require.Equal(t, pubsub.ValidationAccept, result)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestKzgBatchVerifierFallback(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("fallback handles mixed valid/invalid batch correctly", func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
validColumns := createValidTestDataColumns(t, 1)
|
||||
invalidColumns := createInvalidTestDataColumns(t, 1)
|
||||
|
||||
result, err := service.validateWithKzgBatchVerifier(ctx, validColumns)
|
||||
require.Equal(t, pubsub.ValidationAccept, result)
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err = service.validateWithKzgBatchVerifier(ctx, invalidColumns)
|
||||
require.Equal(t, pubsub.ValidationReject, result)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("empty data columns fallback", func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
result, err := service.validateWithKzgBatchVerifier(ctx, []blocks.RODataColumn{})
|
||||
require.Equal(t, pubsub.ValidationAccept, result)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestValidateWithKzgBatchVerifier_DeadlockOnTimeout(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.SecondsPerSlot = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
result, err := service.validateWithKzgBatchVerifier(context.Background(), nil)
|
||||
require.Equal(t, pubsub.ValidationIgnore, result)
|
||||
require.ErrorIs(t, err, context.DeadlineExceeded)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
_, _ = service.validateWithKzgBatchVerifier(context.Background(), nil)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
t.Fatal("validateWithKzgBatchVerifier blocked")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateWithKzgBatchVerifier_ContextCanceledBeforeSend(t *testing.T) {
|
||||
cancelledCtx, cancel := context.WithCancel(t.Context())
|
||||
cancel()
|
||||
|
||||
service := &Service{
|
||||
ctx: context.Background(),
|
||||
kzgChan: make(chan *kzgVerifier),
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
result, err := service.validateWithKzgBatchVerifier(cancelledCtx, nil)
|
||||
require.Equal(t, pubsub.ValidationIgnore, result)
|
||||
require.ErrorIs(t, err, context.Canceled)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
t.Fatal("validateWithKzgBatchVerifier did not return after context cancellation")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-service.kzgChan:
|
||||
t.Fatal("verificationSet was sent to kzgChan despite canceled context")
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func createValidTestDataColumns(t *testing.T, count int) []blocks.RODataColumn {
|
||||
_, roSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, count)
|
||||
if len(roSidecars) >= count {
|
||||
|
||||
@@ -168,7 +168,6 @@ type Service struct {
|
||||
syncContributionBitsOverlapLock sync.RWMutex
|
||||
syncContributionBitsOverlapCache *lru.Cache
|
||||
signatureChan chan *signatureVerifier
|
||||
kzgChan chan *kzgVerifier
|
||||
clockWaiter startup.ClockWaiter
|
||||
initialSyncComplete chan struct{}
|
||||
verifierWaiter *verification.InitializerWaiter
|
||||
@@ -209,10 +208,7 @@ func NewService(ctx context.Context, opts ...Option) *Service {
|
||||
}
|
||||
// Initialize signature channel with configured limit
|
||||
r.signatureChan = make(chan *signatureVerifier, r.cfg.batchVerifierLimit)
|
||||
// Initialize KZG channel with fixed buffer size of 100.
|
||||
// This buffer size is designed to handle burst traffic of data column gossip messages:
|
||||
// - Data columns arrive less frequently than attestations (default batchVerifierLimit=1000)
|
||||
r.kzgChan = make(chan *kzgVerifier, 100)
|
||||
|
||||
// Correctly remove it from our seen pending block map.
|
||||
// The eviction method always assumes that the mutex is held.
|
||||
r.slotToPendingBlocks.OnEvicted(func(s string, i any) {
|
||||
@@ -265,7 +261,6 @@ func (s *Service) Start() {
|
||||
s.newColumnsVerifier = newDataColumnsVerifierFromInitializer(v)
|
||||
|
||||
go s.verifierRoutine()
|
||||
go s.kzgVerifierRoutine()
|
||||
go s.startDiscoveryAndSubscriptions()
|
||||
go s.processDataColumnLogs()
|
||||
|
||||
|
||||
@@ -144,12 +144,9 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
}
|
||||
|
||||
// [REJECT] The sidecar's column data is valid as verified by `verify_data_column_sidecar_kzg_proofs(sidecar)`.
|
||||
validationResult, err := s.validateWithKzgBatchVerifier(ctx, roDataColumns)
|
||||
if validationResult != pubsub.ValidationAccept {
|
||||
return validationResult, err
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
// Mark KZG verification as satisfied since we did it via batch verifier
|
||||
verifier.SatisfyRequirement(verification.RequireSidecarKzgProofVerified)
|
||||
|
||||
// [IGNORE] The sidecar is the first sidecar for the tuple `(block_header.slot, block_header.proposer_index, sidecar.index)`
|
||||
// with valid header signature, sidecar inclusion proof, and kzg proof.
|
||||
|
||||
@@ -71,10 +71,7 @@ func TestValidateDataColumn(t *testing.T) {
|
||||
ctx: ctx,
|
||||
newColumnsVerifier: newDataColumnsVerifier,
|
||||
seenDataColumnCache: newSlotAwareCache(seenDataColumnSize),
|
||||
kzgChan: make(chan *kzgVerifier, 100),
|
||||
}
|
||||
// Start the KZG verifier routine for batch verification
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
// Encode a `beaconBlock` message instead of expected.
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
3
changelog/bastin_gloas-new-state-test.md
Normal file
3
changelog/bastin_gloas-new-state-test.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- Add `NewBeaconStateGloas()`.
|
||||
3
changelog/bastin_per-package-verbosity.md
Normal file
3
changelog/bastin_per-package-verbosity.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Flag `--log.vmodule` to set per-package verbosity levels for logging.
|
||||
2
changelog/eth2353_fix-aggregate-typo.md
Normal file
2
changelog/eth2353_fix-aggregate-typo.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Fixed
|
||||
- Fixed a typo: AggregrateDueBPS -> AggregateDueBPS.
|
||||
3
changelog/fix-validator-web-auth-bypass.md
Normal file
3
changelog/fix-validator-web-auth-bypass.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Prevent authentication bypass on direct `/v2/validator/*` endpoints by enforcing auth checks for non-public routes.
|
||||
3
changelog/james-prysm_blocker-for-block-root.md
Normal file
3
changelog/james-prysm_blocker-for-block-root.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- optmizing /eth/v1/beacon/blocks/{block_id}/root endpoint by reusing blocker lookup instead of duplicated logic.
|
||||
@@ -1,7 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- gRPC fallback now matches rest api implementation and will also check and connect to only synced nodes.
|
||||
|
||||
### Removed
|
||||
|
||||
- gRPC resolver for load balancing, the new implementation matches rest api's so we should remove the resolver so it's handled the same way for consistency.
|
||||
3
changelog/james-prysm_longer-e2e-sync-evaluator.md
Normal file
3
changelog/james-prysm_longer-e2e-sync-evaluator.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- delayed head evaluator check to mid epoch for e2e.
|
||||
@@ -0,0 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- updating phase 0 constants for ethspecify
|
||||
3
changelog/manu-kzg-batch.md
Normal file
3
changelog/manu-kzg-batch.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Removed
|
||||
|
||||
- Batching of KZG verification for incoming via gossip data column sidecars
|
||||
2
changelog/manu_disable_get_blobs_v2_hidden.md
Normal file
2
changelog/manu_disable_get_blobs_v2_hidden.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Removed
|
||||
- `--disable-get-blobs-v2` flag from help.
|
||||
2
changelog/satushh-close-file.md
Normal file
2
changelog/satushh-close-file.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Added
|
||||
- Close opened file in data_column.go
|
||||
3
changelog/satushh-fulu-beacon-config.md
Normal file
3
changelog/satushh-fulu-beacon-config.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Added missing beacon config in fulu so that the presets don't go missing in /eth/v1/config/spec beacon api.
|
||||
2
changelog/t_gloas-process-proposer-slashing.md
Normal file
2
changelog/t_gloas-process-proposer-slashing.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Added
|
||||
- Implement modified proposer slashing for gloas
|
||||
3
changelog/t_gloas_process-slot.md
Normal file
3
changelog/t_gloas_process-slot.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Add slot processing with execution payload availability updates
|
||||
2
changelog/t_process-builder-pending-payments.md
Normal file
2
changelog/t_process-builder-pending-payments.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Added
|
||||
- add pending payments processing and quorum threshold, plus spectests and state hooks (rotate/append)
|
||||
@@ -42,6 +42,7 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@org_uber_go_mock//gomock:go_default_library",
|
||||
],
|
||||
|
||||
@@ -364,7 +364,8 @@ var (
|
||||
}
|
||||
// DisableGetBlobsV2 disables the engine_getBlobsV2 usage.
|
||||
DisableGetBlobsV2 = &cli.BoolFlag{
|
||||
Name: "disable-get-blobs-v2",
|
||||
Usage: "Disables the engine_getBlobsV2 usage.",
|
||||
Name: "disable-get-blobs-v2",
|
||||
Usage: "Disables the engine_getBlobsV2 usage.",
|
||||
Hidden: true,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
runtimeDebug "runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/builder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/node"
|
||||
@@ -113,6 +114,7 @@ var appFlags = []cli.Flag{
|
||||
cmd.PubsubQueueSize,
|
||||
cmd.DataDirFlag,
|
||||
cmd.VerbosityFlag,
|
||||
cmd.LogVModuleFlag,
|
||||
cmd.EnableTracingFlag,
|
||||
cmd.TracingProcessNameFlag,
|
||||
cmd.TracingEndpointFlag,
|
||||
@@ -171,13 +173,22 @@ func before(ctx *cli.Context) error {
|
||||
return errors.Wrap(err, "failed to load flags from config file")
|
||||
}
|
||||
|
||||
// determine log verbosity
|
||||
// determine default log verbosity
|
||||
verbosity := ctx.String(cmd.VerbosityFlag.Name)
|
||||
verbosityLevel, err := logrus.ParseLevel(verbosity)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse log verbosity")
|
||||
}
|
||||
logs.SetLoggingLevel(verbosityLevel)
|
||||
|
||||
// determine per package verbosity. if not set, maxLevel will be 0.
|
||||
vmoduleInput := strings.Join(ctx.StringSlice(cmd.LogVModuleFlag.Name), ",")
|
||||
vmodule, maxLevel, err := cmd.ParseVModule(vmoduleInput)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse log vmodule")
|
||||
}
|
||||
|
||||
// set the global logging level to allow for the highest verbosity requested
|
||||
logs.SetLoggingLevel(max(verbosityLevel, maxLevel))
|
||||
|
||||
format := ctx.String(cmd.LogFormat.Name)
|
||||
switch format {
|
||||
@@ -191,11 +202,13 @@ func before(ctx *cli.Context) error {
|
||||
formatter.FullTimestamp = true
|
||||
formatter.ForceFormatting = true
|
||||
formatter.ForceColors = true
|
||||
formatter.VModule = vmodule
|
||||
formatter.BaseVerbosity = verbosityLevel
|
||||
|
||||
logrus.AddHook(&logs.WriterHook{
|
||||
Formatter: formatter,
|
||||
Writer: os.Stderr,
|
||||
AllowedLevels: logrus.AllLevels[:verbosityLevel+1],
|
||||
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
|
||||
})
|
||||
case "fluentd":
|
||||
f := joonix.NewFormatter()
|
||||
@@ -219,7 +232,7 @@ func before(ctx *cli.Context) error {
|
||||
|
||||
logFileName := ctx.String(cmd.LogFileName.Name)
|
||||
if logFileName != "" {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, verbosityLevel); err != nil {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, verbosityLevel, vmodule); err != nil {
|
||||
log.WithError(err).Error("Failed to configuring logging to disk.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -169,7 +169,6 @@ var appHelpFlagGroups = []flagGroup{
|
||||
flags.ExecutionJWTSecretFlag,
|
||||
flags.JwtId,
|
||||
flags.InteropMockEth1DataVotesFlag,
|
||||
flags.DisableGetBlobsV2,
|
||||
},
|
||||
},
|
||||
{ // Flags relevant to configuring beacon chain monitoring.
|
||||
@@ -201,6 +200,7 @@ var appHelpFlagGroups = []flagGroup{
|
||||
cmd.LogFileName,
|
||||
cmd.VerbosityFlag,
|
||||
flags.DisableEphemeralLogFile,
|
||||
cmd.LogVModuleFlag,
|
||||
},
|
||||
},
|
||||
{ // Feature flags.
|
||||
|
||||
@@ -86,7 +86,7 @@ func main() {
|
||||
|
||||
logFileName := ctx.String(cmd.LogFileName.Name)
|
||||
if logFileName != "" {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, level); err != nil {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, level, map[string]logrus.Level{}); err != nil {
|
||||
log.WithError(err).Error("Failed to configuring logging to disk.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,6 +36,11 @@ var (
|
||||
Usage: "Logging verbosity. (trace, debug, info, warn, error, fatal, panic)",
|
||||
Value: "info",
|
||||
}
|
||||
// LogVModuleFlag defines per-package log levels.
|
||||
LogVModuleFlag = &cli.StringSliceFlag{
|
||||
Name: "log.vmodule",
|
||||
Usage: "Per-package log verbosity. packagePath=level entries separated by commas.",
|
||||
}
|
||||
// DataDirFlag defines a path on disk where Prysm databases are stored.
|
||||
DataDirFlag = &cli.StringFlag{
|
||||
Name: "datadir",
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
@@ -237,3 +238,41 @@ func TestValidateNoArgs_SubcommandFlags(t *testing.T) {
|
||||
err = app.Run([]string{"command", "bar", "subComm2", "--barfoo100", "garbage", "subComm4"})
|
||||
require.ErrorContains(t, "unrecognized argument: garbage", err)
|
||||
}
|
||||
|
||||
func TestParseVModule(t *testing.T) {
|
||||
t.Run("valid", func(t *testing.T) {
|
||||
input := "beacon-chain/p2p=error, beacon-chain/light-client=trace"
|
||||
parsed, maxL, err := ParseVModule(input)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, logrus.ErrorLevel, parsed["beacon-chain/p2p"])
|
||||
require.Equal(t, logrus.TraceLevel, parsed["beacon-chain/light-client"])
|
||||
require.Equal(t, logrus.TraceLevel, maxL)
|
||||
})
|
||||
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
parsed, maxL, err := ParseVModule(" ")
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, parsed)
|
||||
require.Equal(t, logrus.PanicLevel, maxL)
|
||||
})
|
||||
|
||||
t.Run("invalid", func(t *testing.T) {
|
||||
tests := []string{
|
||||
"beacon-chain/p2p",
|
||||
"beacon-chain/p2p=",
|
||||
"beacon-chain/p2p/=error",
|
||||
"=info",
|
||||
"beacon-chain/*=info",
|
||||
"beacon-chain/p2p=meow",
|
||||
"/beacon-chain/p2p=info",
|
||||
"beacon-chain/../p2p=info",
|
||||
"beacon-chain/p2p=info,",
|
||||
"beacon-chain/p2p=info,beacon-chain/p2p=debug",
|
||||
}
|
||||
for _, input := range tests {
|
||||
_, maxL, err := ParseVModule(input)
|
||||
require.ErrorContains(t, "invalid", err)
|
||||
require.Equal(t, logrus.PanicLevel, maxL)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
@@ -102,3 +103,63 @@ func ExpandSingleEndpointIfFile(ctx *cli.Context, flag *cli.StringFlag) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseVModule parses a comma-separated list of package=level entries.
|
||||
func ParseVModule(input string) (map[string]logrus.Level, logrus.Level, error) {
|
||||
var l logrus.Level
|
||||
trimmed := strings.TrimSpace(input)
|
||||
if trimmed == "" {
|
||||
return nil, l, nil
|
||||
}
|
||||
|
||||
parts := strings.Split(trimmed, ",")
|
||||
result := make(map[string]logrus.Level, len(parts))
|
||||
for _, raw := range parts {
|
||||
entry := strings.TrimSpace(raw)
|
||||
if entry == "" {
|
||||
return nil, l, fmt.Errorf("invalid vmodule entry: empty segment")
|
||||
}
|
||||
kv := strings.Split(entry, "=")
|
||||
if len(kv) != 2 {
|
||||
return nil, l, fmt.Errorf("invalid vmodule entry %q: expected path=level", entry)
|
||||
}
|
||||
pkg := strings.TrimSpace(kv[0])
|
||||
levelText := strings.TrimSpace(kv[1])
|
||||
if pkg == "" {
|
||||
return nil, l, fmt.Errorf("invalid vmodule entry %q: empty package path", entry)
|
||||
}
|
||||
if levelText == "" {
|
||||
return nil, l, fmt.Errorf("invalid vmodule entry %q: empty level", entry)
|
||||
}
|
||||
if strings.Contains(pkg, "*") {
|
||||
return nil, l, fmt.Errorf("invalid vmodule package path %q: wildcards are not allowed", pkg)
|
||||
}
|
||||
if strings.ContainsAny(pkg, " \t\n") {
|
||||
return nil, l, fmt.Errorf("invalid vmodule package path %q: whitespace is not allowed", pkg)
|
||||
}
|
||||
if strings.HasPrefix(pkg, "/") {
|
||||
return nil, l, fmt.Errorf("invalid vmodule package path %q: leading slash is not allowed", pkg)
|
||||
}
|
||||
cleaned := path.Clean(pkg)
|
||||
if cleaned != pkg || pkg == "." || pkg == ".." {
|
||||
return nil, l, fmt.Errorf("invalid vmodule package path %q: must be an absolute package path. (trailing slash not allowed)", pkg)
|
||||
}
|
||||
if _, exists := result[pkg]; exists {
|
||||
return nil, l, fmt.Errorf("invalid vmodule package path %q: duplicate entry", pkg)
|
||||
}
|
||||
level, err := logrus.ParseLevel(levelText)
|
||||
if err != nil {
|
||||
return nil, l, fmt.Errorf("invalid vmodule level %q: must be one of panic, fatal, error, warn, info, debug, trace", levelText)
|
||||
}
|
||||
result[pkg] = level
|
||||
}
|
||||
|
||||
maxLevel := logrus.PanicLevel
|
||||
for _, lvl := range result {
|
||||
if lvl > maxLevel {
|
||||
maxLevel = lvl
|
||||
}
|
||||
}
|
||||
|
||||
return result, maxLevel, nil
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
runtimeDebug "runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/cmd"
|
||||
accountcommands "github.com/OffchainLabs/prysm/v7/cmd/validator/accounts"
|
||||
@@ -95,6 +96,7 @@ var appFlags = []cli.Flag{
|
||||
cmd.MinimalConfigFlag,
|
||||
cmd.E2EConfigFlag,
|
||||
cmd.VerbosityFlag,
|
||||
cmd.LogVModuleFlag,
|
||||
cmd.DataDirFlag,
|
||||
cmd.ClearDB,
|
||||
cmd.ForceClearDB,
|
||||
@@ -150,13 +152,22 @@ func main() {
|
||||
return err
|
||||
}
|
||||
|
||||
// determine log verbosity
|
||||
// determine default log verbosity
|
||||
verbosity := ctx.String(cmd.VerbosityFlag.Name)
|
||||
verbosityLevel, err := logrus.ParseLevel(verbosity)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse log verbosity")
|
||||
}
|
||||
logs.SetLoggingLevel(verbosityLevel)
|
||||
|
||||
// determine per package verbosity. if not set, maxLevel will be 0.
|
||||
vmoduleInput := strings.Join(ctx.StringSlice(cmd.LogVModuleFlag.Name), ",")
|
||||
vmodule, maxLevel, err := cmd.ParseVModule(vmoduleInput)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to parse log vmodule")
|
||||
}
|
||||
|
||||
// set the global logging level to allow for the highest verbosity requested
|
||||
logs.SetLoggingLevel(max(maxLevel, verbosityLevel))
|
||||
|
||||
logFileName := ctx.String(cmd.LogFileName.Name)
|
||||
|
||||
@@ -172,11 +183,13 @@ func main() {
|
||||
formatter.FullTimestamp = true
|
||||
formatter.ForceFormatting = true
|
||||
formatter.ForceColors = true
|
||||
formatter.VModule = vmodule
|
||||
formatter.BaseVerbosity = verbosityLevel
|
||||
|
||||
logrus.AddHook(&logs.WriterHook{
|
||||
Formatter: formatter,
|
||||
Writer: os.Stderr,
|
||||
AllowedLevels: logrus.AllLevels[:verbosityLevel+1],
|
||||
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
|
||||
})
|
||||
case "fluentd":
|
||||
f := joonix.NewFormatter()
|
||||
@@ -197,7 +210,7 @@ func main() {
|
||||
}
|
||||
|
||||
if logFileName != "" {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, verbosityLevel); err != nil {
|
||||
if err := logs.ConfigurePersistentLogging(logFileName, format, verbosityLevel, vmodule); err != nil {
|
||||
log.WithError(err).Error("Failed to configuring logging to disk.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,6 +76,7 @@ var appHelpFlagGroups = []flagGroup{
|
||||
cmd.AcceptTosFlag,
|
||||
cmd.ApiTimeoutFlag,
|
||||
flags.DisableEphemeralLogFile,
|
||||
cmd.LogVModuleFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -88,7 +88,7 @@ type BeaconChainConfig struct {
|
||||
IntervalsPerSlot uint64 `yaml:"INTERVALS_PER_SLOT"` // IntervalsPerSlot defines the number of fork choice intervals in a slot defined in the fork choice spec.
|
||||
ProposerReorgCutoffBPS primitives.BP `yaml:"PROPOSER_REORG_CUTOFF_BPS" spec:"true"` // ProposerReorgCutoffBPS defines the proposer reorg deadline in basis points of the slot.
|
||||
AttestationDueBPS primitives.BP `yaml:"ATTESTATION_DUE_BPS" spec:"true"` // AttestationDueBPS defines the attestation due time in basis points of the slot.
|
||||
AggregrateDueBPS primitives.BP `yaml:"AGGREGRATE_DUE_BPS" spec:"true"` // AggregrateDueBPS defines the aggregate due time in basis points of the slot.
|
||||
AggregateDueBPS primitives.BP `yaml:"AGGREGATE_DUE_BPS" spec:"true"` // AggregateDueBPS defines the aggregate due time in basis points of the slot.
|
||||
SyncMessageDueBPS primitives.BP `yaml:"SYNC_MESSAGE_DUE_BPS" spec:"true"` // SyncMessageDueBPS defines the sync message due time in basis points of the slot.
|
||||
ContributionDueBPS primitives.BP `yaml:"CONTRIBUTION_DUE_BPS" spec:"true"` // ContributionDueBPS defines the contribution due time in basis points of the slot.
|
||||
|
||||
@@ -293,6 +293,10 @@ type BeaconChainConfig struct {
|
||||
ValidatorCustodyRequirement uint64 `yaml:"VALIDATOR_CUSTODY_REQUIREMENT" spec:"true"` // ValidatorCustodyRequirement is the minimum number of custody groups an honest node with validators attached custodies and serves samples from
|
||||
BalancePerAdditionalCustodyGroup uint64 `yaml:"BALANCE_PER_ADDITIONAL_CUSTODY_GROUP" spec:"true"` // BalancePerAdditionalCustodyGroup is the balance increment corresponding to one additional group to custody.
|
||||
|
||||
// Values introduced in Gloas upgrade
|
||||
BuilderPaymentThresholdNumerator uint64 `yaml:"BUILDER_PAYMENT_THRESHOLD_NUMERATOR" spec:"true"` // BuilderPaymentThresholdNumerator is the numerator for builder payment quorum threshold calculation.
|
||||
BuilderPaymentThresholdDenominator uint64 `yaml:"BUILDER_PAYMENT_THRESHOLD_DENOMINATOR" spec:"true"` // BuilderPaymentThresholdDenominator is the denominator for builder payment quorum threshold calculation.
|
||||
|
||||
// Networking Specific Parameters
|
||||
MaxPayloadSize uint64 `yaml:"MAX_PAYLOAD_SIZE" spec:"true"` // MAX_PAYLOAD_SIZE is the maximum allowed size of uncompressed payload in gossip messages and rpc chunks.
|
||||
AttestationSubnetCount uint64 `yaml:"ATTESTATION_SUBNET_COUNT" spec:"true"` // AttestationSubnetCount is the number of attestation subnets used in the gossipsub protocol.
|
||||
|
||||
@@ -243,7 +243,7 @@ func ConfigToYaml(cfg *BeaconChainConfig) []byte {
|
||||
fmt.Sprintf("MAX_BLOBS_PER_BLOCK: %d", cfg.DeprecatedMaxBlobsPerBlock),
|
||||
fmt.Sprintf("PROPOSER_REORG_CUTOFF_BPS: %d", cfg.ProposerReorgCutoffBPS),
|
||||
fmt.Sprintf("ATTESTATION_DUE_BPS: %d", cfg.AttestationDueBPS),
|
||||
fmt.Sprintf("AGGREGRATE_DUE_BPS: %d", cfg.AggregrateDueBPS),
|
||||
fmt.Sprintf("AGGREGATE_DUE_BPS: %d", cfg.AggregateDueBPS),
|
||||
fmt.Sprintf("SYNC_MESSAGE_DUE_BPS: %d", cfg.SyncMessageDueBPS),
|
||||
fmt.Sprintf("CONTRIBUTION_DUE_BPS: %d", cfg.ContributionDueBPS),
|
||||
}
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
// These are variables that we don't use in Prysm. (i.e. future hardfork, light client... etc)
|
||||
// IMPORTANT: Use one field per line and sort these alphabetically to reduce conflicts.
|
||||
var placeholderFields = []string{
|
||||
"AGGREGATE_DUE_BPS",
|
||||
"AGGREGATE_DUE_BPS_GLOAS",
|
||||
"ATTESTATION_DEADLINE",
|
||||
"ATTESTATION_DUE_BPS_GLOAS",
|
||||
@@ -99,7 +98,7 @@ func assertEqualConfigs(t *testing.T, name string, fields []string, expected, ac
|
||||
assert.Equal(t, expected.HysteresisDownwardMultiplier, actual.HysteresisDownwardMultiplier, "%s: HysteresisDownwardMultiplier", name)
|
||||
assert.Equal(t, expected.HysteresisUpwardMultiplier, actual.HysteresisUpwardMultiplier, "%s: HysteresisUpwardMultiplier", name)
|
||||
assert.Equal(t, expected.AttestationDueBPS, actual.AttestationDueBPS, "%s: AttestationDueBPS", name)
|
||||
assert.Equal(t, expected.AggregrateDueBPS, actual.AggregrateDueBPS, "%s: AggregrateDueBPS", name)
|
||||
assert.Equal(t, expected.AggregateDueBPS, actual.AggregateDueBPS, "%s: AggregateDueBPS", name)
|
||||
assert.Equal(t, expected.ContributionDueBPS, actual.ContributionDueBPS, "%s: ContributionDueBPS", name)
|
||||
assert.Equal(t, expected.ProposerReorgCutoffBPS, actual.ProposerReorgCutoffBPS, "%s: ProposerReorgCutoffBPS", name)
|
||||
assert.Equal(t, expected.SyncMessageDueBPS, actual.SyncMessageDueBPS, "%s: SyncMessageDueBPS", name)
|
||||
|
||||
@@ -123,7 +123,7 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
// Time-based protocol parameters.
|
||||
ProposerReorgCutoffBPS: primitives.BP(1667),
|
||||
AttestationDueBPS: primitives.BP(3333),
|
||||
AggregrateDueBPS: primitives.BP(6667),
|
||||
AggregateDueBPS: primitives.BP(6667),
|
||||
SyncMessageDueBPS: primitives.BP(3333),
|
||||
ContributionDueBPS: primitives.BP(6667),
|
||||
|
||||
@@ -331,6 +331,11 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
MinEpochsForDataColumnSidecarsRequest: 4096,
|
||||
ValidatorCustodyRequirement: 8,
|
||||
BalancePerAdditionalCustodyGroup: 32_000_000_000,
|
||||
|
||||
// Values related to gloas
|
||||
BuilderPaymentThresholdNumerator: 6,
|
||||
BuilderPaymentThresholdDenominator: 10,
|
||||
|
||||
// Values related to networking parameters.
|
||||
MaxPayloadSize: 10 * 1 << 20, // 10 MiB
|
||||
AttestationSubnetCount: 64,
|
||||
|
||||
@@ -30,7 +30,7 @@ func addLogWriter(w io.Writer) {
|
||||
}
|
||||
|
||||
// ConfigurePersistentLogging adds a log-to-file writer. File content is identical to stdout.
|
||||
func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Level) error {
|
||||
func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Level, vmodule map[string]logrus.Level) error {
|
||||
logrus.WithField("logFileName", logFileName).Info("Logs will be made persistent")
|
||||
if err := file.MkdirAll(filepath.Dir(logFileName)); err != nil {
|
||||
return err
|
||||
@@ -47,6 +47,13 @@ func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Le
|
||||
return nil
|
||||
}
|
||||
|
||||
maxVmoduleLevel := logrus.PanicLevel
|
||||
for _, level := range vmodule {
|
||||
if level > maxVmoduleLevel {
|
||||
maxVmoduleLevel = level
|
||||
}
|
||||
}
|
||||
|
||||
// Create formatter and writer hook
|
||||
formatter := new(prefixed.TextFormatter)
|
||||
formatter.TimestampFormat = "2006-01-02 15:04:05.00"
|
||||
@@ -54,11 +61,13 @@ func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Le
|
||||
// If persistent log files are written - we disable the log messages coloring because
|
||||
// the colors are ANSI codes and seen as gibberish in the log files.
|
||||
formatter.DisableColors = true
|
||||
formatter.BaseVerbosity = lvl
|
||||
formatter.VModule = vmodule
|
||||
|
||||
logrus.AddHook(&WriterHook{
|
||||
Formatter: formatter,
|
||||
Writer: f,
|
||||
AllowedLevels: logrus.AllLevels[:lvl+1],
|
||||
AllowedLevels: logrus.AllLevels[:max(lvl, maxVmoduleLevel)+1],
|
||||
})
|
||||
|
||||
logrus.Info("File logging initialized")
|
||||
|
||||
@@ -28,20 +28,20 @@ func TestMaskCredentialsLogging(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigurePersistantLogging(t *testing.T) {
|
||||
func TestConfigurePersistentLogging(t *testing.T) {
|
||||
testParentDir := t.TempDir()
|
||||
|
||||
// 1. Test creation of file in an existing parent directory
|
||||
logFileName := "test.log"
|
||||
existingDirectory := "test-1-existing-testing-dir"
|
||||
|
||||
err := ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, existingDirectory, logFileName), "text", logrus.InfoLevel)
|
||||
err := ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, existingDirectory, logFileName), "text", logrus.InfoLevel, map[string]logrus.Level{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// 2. Test creation of file along with parent directory
|
||||
nonExistingDirectory := "test-2-non-existing-testing-dir"
|
||||
|
||||
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, nonExistingDirectory, logFileName), "text", logrus.InfoLevel)
|
||||
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s", testParentDir, nonExistingDirectory, logFileName), "text", logrus.InfoLevel, map[string]logrus.Level{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// 3. Test creation of file in an existing parent directory with a non-existing sub-directory
|
||||
@@ -52,7 +52,7 @@ func TestConfigurePersistantLogging(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s/%s", testParentDir, existingDirectory, nonExistingSubDirectory, logFileName), "text", logrus.InfoLevel)
|
||||
err = ConfigurePersistentLogging(fmt.Sprintf("%s/%s/%s/%s", testParentDir, existingDirectory, nonExistingSubDirectory, logFileName), "text", logrus.InfoLevel, map[string]logrus.Level{})
|
||||
require.NoError(t, err)
|
||||
|
||||
//4. Create log file in a directory without 700 permissions
|
||||
|
||||
@@ -120,6 +120,13 @@ type TextFormatter struct {
|
||||
|
||||
// Timestamp format to use for display when a full timestamp is printed.
|
||||
TimestampFormat string
|
||||
|
||||
// VModule overrides the allowed log level for exact package paths.
|
||||
// When using VModule, you should also set BaseVerbosity to the default verbosity level provided by the user.
|
||||
VModule map[string]logrus.Level
|
||||
|
||||
// BaseVerbosity is the default verbosity level for all packages.
|
||||
BaseVerbosity logrus.Level
|
||||
}
|
||||
|
||||
func getCompiledColor(main string, fallback string) func(string) string {
|
||||
@@ -168,6 +175,11 @@ func (f *TextFormatter) SetColorScheme(colorScheme *ColorScheme) {
|
||||
}
|
||||
|
||||
func (f *TextFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||
// check for vmodule compatibility
|
||||
if !f.shouldLog(entry) {
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
var b *bytes.Buffer
|
||||
keys := make([]string, 0, len(entry.Data))
|
||||
for k := range entry.Data {
|
||||
@@ -236,6 +248,39 @@ func (f *TextFormatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func (f *TextFormatter) shouldLog(entry *logrus.Entry) bool {
|
||||
if len(f.VModule) == 0 {
|
||||
return true
|
||||
}
|
||||
packagePath, ok := entry.Data["package"]
|
||||
if !ok {
|
||||
return entry.Level <= f.BaseVerbosity
|
||||
}
|
||||
packageName, ok := packagePath.(string)
|
||||
if !ok {
|
||||
return entry.Level <= f.BaseVerbosity
|
||||
}
|
||||
|
||||
packageLevel := f.bestMatchLevel(packageName)
|
||||
|
||||
return entry.Level <= packageLevel
|
||||
}
|
||||
|
||||
// bestMatchLevel returns the level of the most specific path that matches package name.
|
||||
func (f *TextFormatter) bestMatchLevel(pkg string) logrus.Level {
|
||||
bestLen := 0
|
||||
bestLevel := f.BaseVerbosity
|
||||
for k, v := range f.VModule {
|
||||
if k == pkg || strings.HasPrefix(pkg, k+"/") {
|
||||
if len(k) > bestLen {
|
||||
bestLen = len(k)
|
||||
bestLevel = v
|
||||
}
|
||||
}
|
||||
}
|
||||
return bestLevel
|
||||
}
|
||||
|
||||
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *logrus.Entry, keys []string, timestampFormat string, colorScheme *compiledColorScheme) (err error) {
|
||||
var levelColor func(string) string
|
||||
var levelText string
|
||||
|
||||
@@ -13,10 +13,6 @@ specrefs:
|
||||
|
||||
exceptions:
|
||||
presets:
|
||||
# Not implemented
|
||||
- CELLS_PER_EXT_BLOB#fulu
|
||||
- UPDATE_TIMEOUT#altair
|
||||
|
||||
# Not implemented: gloas (future fork)
|
||||
- BUILDER_PENDING_WITHDRAWALS_LIMIT#gloas
|
||||
- MAX_PAYLOAD_ATTESTATIONS#gloas
|
||||
@@ -65,14 +61,6 @@ exceptions:
|
||||
- PTC_TIMELINESS_INDEX#gloas
|
||||
|
||||
configs:
|
||||
# Not implemented (placeholders)
|
||||
- AGGREGATE_DUE_BPS#phase0
|
||||
- ATTESTATION_DUE_BPS#phase0
|
||||
- CONTRIBUTION_DUE_BPS#altair
|
||||
- PROPOSER_REORG_CUTOFF_BPS#phase0
|
||||
- SLOT_DURATION_MS#phase0
|
||||
- SYNC_MESSAGE_DUE_BPS#altair
|
||||
|
||||
# Not implemented: gloas (future fork)
|
||||
- AGGREGATE_DUE_BPS_GLOAS#gloas
|
||||
- ATTESTATION_DUE_BPS_GLOAS#gloas
|
||||
@@ -423,10 +411,8 @@ exceptions:
|
||||
- update_proposer_boost_root#phase0
|
||||
|
||||
presets:
|
||||
- CELLS_PER_EXT_BLOB#fulu
|
||||
- BUILDER_PENDING_WITHDRAWALS_LIMIT#gloas
|
||||
- BUILDER_REGISTRY_LIMIT#gloas
|
||||
- MAX_BUILDERS_PER_WITHDRAWALS_SWEEP#gloas
|
||||
- MAX_PAYLOAD_ATTESTATIONS#gloas
|
||||
- PTC_SIZE#gloas
|
||||
- UPDATE_TIMEOUT#altair
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
- name: AGGREGATE_DUE_BPS
|
||||
sources: []
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
search: AggregateDueBPS\s+primitives.BP
|
||||
regex: true
|
||||
spec: |
|
||||
<spec config_var="AGGREGATE_DUE_BPS" fork="phase0" hash="7eaa811a">
|
||||
AGGREGATE_DUE_BPS: uint64 = 6667
|
||||
@@ -26,7 +29,10 @@
|
||||
</spec>
|
||||
|
||||
- name: ATTESTATION_DUE_BPS
|
||||
sources: []
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
search: AttestationDueBPS\s+primitives.BP
|
||||
regex: true
|
||||
spec: |
|
||||
<spec config_var="ATTESTATION_DUE_BPS" fork="phase0" hash="929dd1c9">
|
||||
ATTESTATION_DUE_BPS: uint64 = 3333
|
||||
@@ -172,7 +178,10 @@
|
||||
</spec>
|
||||
|
||||
- name: CONTRIBUTION_DUE_BPS
|
||||
sources: []
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
search: ContributionDueBPS\s+primitives.BP
|
||||
regex: true
|
||||
spec: |
|
||||
<spec config_var="CONTRIBUTION_DUE_BPS" fork="altair" hash="a3808203">
|
||||
CONTRIBUTION_DUE_BPS: uint64 = 6667
|
||||
@@ -549,7 +558,10 @@
|
||||
</spec>
|
||||
|
||||
- name: PROPOSER_REORG_CUTOFF_BPS
|
||||
sources: []
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
search: ProposerReorgCutoffBPS\s+primitives.BP
|
||||
regex: true
|
||||
spec: |
|
||||
<spec config_var="PROPOSER_REORG_CUTOFF_BPS" fork="phase0" hash="a487cc43">
|
||||
PROPOSER_REORG_CUTOFF_BPS: uint64 = 1667
|
||||
@@ -636,7 +648,10 @@
|
||||
</spec>
|
||||
|
||||
- name: SLOT_DURATION_MS
|
||||
sources: []
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
search: SlotDurationMilliseconds\s+uint64
|
||||
regex: true
|
||||
spec: |
|
||||
<spec config_var="SLOT_DURATION_MS" fork="phase0" hash="b6d4ba6d">
|
||||
SLOT_DURATION_MS: uint64 = 12000
|
||||
@@ -653,7 +668,10 @@
|
||||
</spec>
|
||||
|
||||
- name: SYNC_MESSAGE_DUE_BPS
|
||||
sources: []
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
search: SyncMessageDueBPS\s+primitives.BP
|
||||
regex: true
|
||||
spec: |
|
||||
<spec config_var="SYNC_MESSAGE_DUE_BPS" fork="altair" hash="791b29d8">
|
||||
SYNC_MESSAGE_DUE_BPS: uint64 = 3333
|
||||
|
||||
@@ -19,7 +19,10 @@
|
||||
</spec>
|
||||
|
||||
- name: CELLS_PER_EXT_BLOB
|
||||
sources: []
|
||||
sources:
|
||||
- file: beacon-chain/rpc/eth/config/handlers.go
|
||||
search: data\["CELLS_PER_EXT_BLOB"\]
|
||||
regex: true
|
||||
spec: |
|
||||
<spec preset_var="CELLS_PER_EXT_BLOB" fork="fulu" hash="217075f3">
|
||||
CELLS_PER_EXT_BLOB = 128
|
||||
@@ -685,6 +688,16 @@
|
||||
TARGET_COMMITTEE_SIZE: uint64 = 128
|
||||
</spec>
|
||||
|
||||
- name: UPDATE_TIMEOUT
|
||||
sources:
|
||||
- file: beacon-chain/rpc/eth/config/handlers.go
|
||||
search: data\["UPDATE_TIMEOUT"\]
|
||||
regex: true
|
||||
spec: |
|
||||
<spec preset_var="UPDATE_TIMEOUT" fork="altair" hash="e633d57e">
|
||||
UPDATE_TIMEOUT = 8192
|
||||
</spec>
|
||||
|
||||
- name: VALIDATOR_REGISTRY_LIMIT
|
||||
sources:
|
||||
- file: config/fieldparams/mainnet.go
|
||||
|
||||
@@ -296,7 +296,7 @@ func (r *testRunner) waitForMatchingHead(ctx context.Context, timeout time.Durat
|
||||
}
|
||||
|
||||
func (r *testRunner) testCheckpointSync(ctx context.Context, g *errgroup.Group, i int, conns []*grpc.ClientConn, bnAPI, enr, minerEnr string) error {
|
||||
matchTimeout := 3 * time.Minute
|
||||
matchTimeout := 5 * time.Minute
|
||||
ethNode := eth1.NewNode(i, minerEnr)
|
||||
g.Go(func() error {
|
||||
return ethNode.Start(ctx)
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
e2e "github.com/OffchainLabs/prysm/v7/testing/endtoend/params"
|
||||
@@ -128,8 +129,42 @@ func finishedSyncing(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) e
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForMidEpoch waits until we're at least halfway into the current epoch
|
||||
// and 3/4 into the current slot. This prevents race conditions at epoch
|
||||
// boundaries and slot boundaries where different nodes may report different heads.
|
||||
func waitForMidEpoch(conn *grpc.ClientConn) error {
|
||||
beaconClient := eth.NewBeaconChainClient(conn)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
|
||||
midEpochSlot := slotsPerEpoch / 2
|
||||
|
||||
for {
|
||||
chainHead, err := beaconClient.GetChainHead(context.Background(), &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
slotInEpoch := chainHead.HeadSlot % slotsPerEpoch
|
||||
// If we're at least halfway into the epoch, we're safe
|
||||
if slotInEpoch >= midEpochSlot {
|
||||
// Wait 3/4 into the slot to ensure block propagation
|
||||
time.Sleep(time.Duration(secondsPerSlot) * time.Second * 3 / 4)
|
||||
return nil
|
||||
}
|
||||
// Wait for the remaining slots until mid-epoch
|
||||
slotsToWait := midEpochSlot - slotInEpoch
|
||||
time.Sleep(time.Duration(slotsToWait) * time.Duration(secondsPerSlot) * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func allNodesHaveSameHead(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) error {
|
||||
// Wait until we're at least halfway into the epoch to avoid race conditions
|
||||
// at epoch boundaries where nodes may report different epochs.
|
||||
if err := waitForMidEpoch(conns[0]); err != nil {
|
||||
return errors.Wrap(err, "failed waiting for mid-epoch")
|
||||
}
|
||||
|
||||
headEpochs := make([]primitives.Epoch, len(conns))
|
||||
headBlockRoots := make([][]byte, len(conns))
|
||||
justifiedRoots := make([][]byte, len(conns))
|
||||
prevJustifiedRoots := make([][]byte, len(conns))
|
||||
finalizedRoots := make([][]byte, len(conns))
|
||||
@@ -146,6 +181,7 @@ func allNodesHaveSameHead(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientCo
|
||||
return errors.Wrapf(err, "connection number=%d", conIdx)
|
||||
}
|
||||
headEpochs[conIdx] = chainHead.HeadEpoch
|
||||
headBlockRoots[conIdx] = chainHead.HeadBlockRoot
|
||||
justifiedRoots[conIdx] = chainHead.JustifiedBlockRoot
|
||||
prevJustifiedRoots[conIdx] = chainHead.PreviousJustifiedBlockRoot
|
||||
finalizedRoots[conIdx] = chainHead.FinalizedBlockRoot
|
||||
@@ -166,6 +202,14 @@ func allNodesHaveSameHead(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientCo
|
||||
headEpochs[i],
|
||||
)
|
||||
}
|
||||
if !bytes.Equal(headBlockRoots[0], headBlockRoots[i]) {
|
||||
return fmt.Errorf(
|
||||
"received conflicting head block roots on node %d, expected %#x, received %#x",
|
||||
i,
|
||||
headBlockRoots[0],
|
||||
headBlockRoots[i],
|
||||
)
|
||||
}
|
||||
if !bytes.Equal(justifiedRoots[0], justifiedRoots[i]) {
|
||||
return fmt.Errorf(
|
||||
"received conflicting justified block roots on node %d, expected %#x, received %#x: %s and %s",
|
||||
|
||||
@@ -200,7 +200,10 @@ go_test(
|
||||
"fulu__sanity__blocks_test.go",
|
||||
"fulu__sanity__slots_test.go",
|
||||
"fulu__ssz_static__ssz_static_test.go",
|
||||
"gloas__epoch_processing__process_builder_pending_payments_test.go",
|
||||
"gloas__operations__execution_payload_header_test.go",
|
||||
"gloas__operations__proposer_slashing_test.go",
|
||||
"gloas__sanity__slots_test.go",
|
||||
"gloas__ssz_static__ssz_static_test.go",
|
||||
"phase0__epoch_processing__effective_balance_updates_test.go",
|
||||
"phase0__epoch_processing__epoch_processing_test.go",
|
||||
@@ -279,7 +282,9 @@ go_test(
|
||||
"//testing/spectest/shared/fulu/rewards:go_default_library",
|
||||
"//testing/spectest/shared/fulu/sanity:go_default_library",
|
||||
"//testing/spectest/shared/fulu/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/gloas/epoch_processing:go_default_library",
|
||||
"//testing/spectest/shared/gloas/operations:go_default_library",
|
||||
"//testing/spectest/shared/gloas/sanity:go_default_library",
|
||||
"//testing/spectest/shared/gloas/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/phase0/epoch_processing:go_default_library",
|
||||
"//testing/spectest/shared/phase0/finality:go_default_library",
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
package mainnet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/epoch_processing"
|
||||
)
|
||||
|
||||
func TestMainnet_Gloas_EpochProcessing_ProcessBuilderPendingPayments(t *testing.T) {
|
||||
epoch_processing.RunBuilderPendingPaymentsTests(t, "mainnet")
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
package mainnet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations"
|
||||
)
|
||||
|
||||
func TestMainnet_Gloas_Operations_ProposerSlashing(t *testing.T) {
|
||||
operations.RunProposerSlashingTest(t, "mainnet")
|
||||
}
|
||||
11
testing/spectest/mainnet/gloas__sanity__slots_test.go
Normal file
11
testing/spectest/mainnet/gloas__sanity__slots_test.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package mainnet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/sanity"
|
||||
)
|
||||
|
||||
func TestMainnet_Gloas_Sanity_Slots(t *testing.T) {
|
||||
sanity.RunSlotProcessingTests(t, "mainnet")
|
||||
}
|
||||
@@ -206,7 +206,10 @@ go_test(
|
||||
"fulu__sanity__blocks_test.go",
|
||||
"fulu__sanity__slots_test.go",
|
||||
"fulu__ssz_static__ssz_static_test.go",
|
||||
"gloas__epoch_processing__process_builder_pending_payments_test.go",
|
||||
"gloas__operations__execution_payload_bid_test.go",
|
||||
"gloas__operations__proposer_slashing_test.go",
|
||||
"gloas__sanity__slots_test.go",
|
||||
"gloas__ssz_static__ssz_static_test.go",
|
||||
"phase0__epoch_processing__effective_balance_updates_test.go",
|
||||
"phase0__epoch_processing__epoch_processing_test.go",
|
||||
@@ -289,7 +292,9 @@ go_test(
|
||||
"//testing/spectest/shared/fulu/rewards:go_default_library",
|
||||
"//testing/spectest/shared/fulu/sanity:go_default_library",
|
||||
"//testing/spectest/shared/fulu/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/gloas/epoch_processing:go_default_library",
|
||||
"//testing/spectest/shared/gloas/operations:go_default_library",
|
||||
"//testing/spectest/shared/gloas/sanity:go_default_library",
|
||||
"//testing/spectest/shared/gloas/ssz_static:go_default_library",
|
||||
"//testing/spectest/shared/phase0/epoch_processing:go_default_library",
|
||||
"//testing/spectest/shared/phase0/finality:go_default_library",
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
package minimal
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/epoch_processing"
|
||||
)
|
||||
|
||||
func TestMinimal_Gloas_EpochProcessing_ProcessBuilderPendingPayments(t *testing.T) {
|
||||
epoch_processing.RunBuilderPendingPaymentsTests(t, "minimal")
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
package minimal
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations"
|
||||
)
|
||||
|
||||
func TestMinimal_Gloas_Operations_ProposerSlashing(t *testing.T) {
|
||||
operations.RunProposerSlashingTest(t, "minimal")
|
||||
}
|
||||
11
testing/spectest/minimal/gloas__sanity__slots_test.go
Normal file
11
testing/spectest/minimal/gloas__sanity__slots_test.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package minimal
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/sanity"
|
||||
)
|
||||
|
||||
func TestMinimal_Gloas_Sanity_Slots(t *testing.T) {
|
||||
sanity.RunSlotProcessingTests(t, "minimal")
|
||||
}
|
||||
26
testing/spectest/shared/gloas/epoch_processing/BUILD.bazel
Normal file
26
testing/spectest/shared/gloas/epoch_processing/BUILD.bazel
Normal file
@@ -0,0 +1,26 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
testonly = True,
|
||||
srcs = [
|
||||
"builder_pending_payments.go",
|
||||
"helpers.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/epoch_processing",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/gloas:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/spectest/utils:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_google_go_cmp//cmp:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_google_protobuf//testing/protocmp:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -0,0 +1,26 @@
|
||||
package epoch_processing
|
||||
|
||||
import (
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/utils"
|
||||
)
|
||||
|
||||
func RunBuilderPendingPaymentsTests(t *testing.T, config string) {
|
||||
require.NoError(t, utils.SetConfig(t, config))
|
||||
testFolders, testsFolderPath := utils.TestFolders(t, config, "gloas", "epoch_processing/builder_pending_payments/pyspec_tests")
|
||||
for _, folder := range testFolders {
|
||||
t.Run(folder.Name(), func(t *testing.T) {
|
||||
folderPath := path.Join(testsFolderPath, folder.Name())
|
||||
RunEpochOperationTest(t, folderPath, processBuilderPendingPayments)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func processBuilderPendingPayments(t *testing.T, st state.BeaconState) (state.BeaconState, error) {
|
||||
return st, gloas.ProcessBuilderPendingPayments(st)
|
||||
}
|
||||
76
testing/spectest/shared/gloas/epoch_processing/helpers.go
Normal file
76
testing/spectest/shared/gloas/epoch_processing/helpers.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package epoch_processing
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/bazelbuild/rules_go/go/tools/bazel"
|
||||
"github.com/golang/snappy"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/testing/protocmp"
|
||||
)
|
||||
|
||||
type epochOperation func(*testing.T, state.BeaconState) (state.BeaconState, error)
|
||||
|
||||
// RunEpochOperationTest takes in the prestate and processes it through the
|
||||
// passed in epoch operation function and checks the post state with the expected post state.
|
||||
func RunEpochOperationTest(
|
||||
t *testing.T,
|
||||
testFolderPath string,
|
||||
operationFn epochOperation,
|
||||
) {
|
||||
preBeaconStateFile, err := util.BazelFileBytes(path.Join(testFolderPath, "pre.ssz_snappy"))
|
||||
require.NoError(t, err)
|
||||
preBeaconStateSSZ, err := snappy.Decode(nil /* dst */, preBeaconStateFile)
|
||||
require.NoError(t, err, "Failed to decompress")
|
||||
preBeaconStateBase := ðpb.BeaconStateGloas{}
|
||||
if err := preBeaconStateBase.UnmarshalSSZ(preBeaconStateSSZ); err != nil {
|
||||
t.Fatalf("Failed to unmarshal: %v", err)
|
||||
}
|
||||
preBeaconState, err := state_native.InitializeFromProtoGloas(preBeaconStateBase)
|
||||
require.NoError(t, err)
|
||||
|
||||
// If the post.ssz is not present, it means the test should fail on our end.
|
||||
postSSZFilepath, err := bazel.Runfile(path.Join(testFolderPath, "post.ssz_snappy"))
|
||||
postSSZExists := true
|
||||
if err != nil && strings.Contains(err.Error(), "could not locate file") {
|
||||
postSSZExists = false
|
||||
} else if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
beaconState, err := operationFn(t, preBeaconState)
|
||||
if postSSZExists {
|
||||
require.NoError(t, err)
|
||||
|
||||
postBeaconStateFile, err := os.ReadFile(postSSZFilepath) // #nosec G304
|
||||
require.NoError(t, err)
|
||||
postBeaconStateSSZ, err := snappy.Decode(nil /* dst */, postBeaconStateFile)
|
||||
require.NoError(t, err, "Failed to decompress")
|
||||
postBeaconState := ðpb.BeaconStateGloas{}
|
||||
if err := postBeaconState.UnmarshalSSZ(postBeaconStateSSZ); err != nil {
|
||||
t.Fatalf("Failed to unmarshal: %v", err)
|
||||
}
|
||||
|
||||
pbState, err := state_native.ProtobufBeaconStateGloas(beaconState.ToProtoUnsafe())
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(pbState, postBeaconState) {
|
||||
t.Log(cmp.Diff(postBeaconState, pbState, protocmp.Transform()))
|
||||
t.Fatal("Post state does not match expected")
|
||||
}
|
||||
} else {
|
||||
if err == nil {
|
||||
t.Fatal("Did not fail when expected")
|
||||
}
|
||||
t.Logf("Expected failure; failure reason = %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
srcs = [
|
||||
"execution_payload_bid.go",
|
||||
"helpers.go",
|
||||
"proposer_slashing.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations",
|
||||
visibility = ["//visibility:public"],
|
||||
|
||||
@@ -3,6 +3,8 @@ package operations
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -13,3 +15,19 @@ func sszToState(b []byte) (state.BeaconState, error) {
|
||||
}
|
||||
return state_native.InitializeFromProtoGloas(base)
|
||||
}
|
||||
|
||||
func sszToBlock(b []byte) (interfaces.SignedBeaconBlock, error) {
|
||||
base := ðpb.BeaconBlockGloas{}
|
||||
if err := base.UnmarshalSSZ(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockGloas{Block: base})
|
||||
}
|
||||
|
||||
func sszToBlockBody(b []byte) (interfaces.ReadOnlyBeaconBlockBody, error) {
|
||||
base := ðpb.BeaconBlockBodyGloas{}
|
||||
if err := base.UnmarshalSSZ(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blocks.NewBeaconBlockBody(base)
|
||||
}
|
||||
|
||||
@@ -0,0 +1,28 @@
|
||||
package operations
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
common "github.com/OffchainLabs/prysm/v7/testing/spectest/shared/common/operations"
|
||||
)
|
||||
|
||||
func blockWithProposerSlashing(ssz []byte) (interfaces.SignedBeaconBlock, error) {
|
||||
ps := ðpb.ProposerSlashing{}
|
||||
if err := ps.UnmarshalSSZ(ssz); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b := ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Body: ðpb.BeaconBlockBodyGloas{ProposerSlashings: []*ethpb.ProposerSlashing{ps}},
|
||||
},
|
||||
}
|
||||
return blocks.NewSignedBeaconBlock(b)
|
||||
}
|
||||
|
||||
func RunProposerSlashingTest(t *testing.T, config string) {
|
||||
common.RunProposerSlashingTest(t, config, version.String(version.Gloas), blockWithProposerSlashing, sszToState)
|
||||
}
|
||||
19
testing/spectest/shared/gloas/sanity/BUILD.bazel
Normal file
19
testing/spectest/shared/gloas/sanity/BUILD.bazel
Normal file
@@ -0,0 +1,19 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
testonly = True,
|
||||
srcs = ["slot_processing.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/sanity",
|
||||
visibility = ["//testing/spectest:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/spectest/utils:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
61
testing/spectest/shared/gloas/sanity/slot_processing.go
Normal file
61
testing/spectest/shared/gloas/sanity/slot_processing.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package sanity
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/utils"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/golang/snappy"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func init() {
|
||||
transition.SkipSlotCache.Disable()
|
||||
}
|
||||
|
||||
// RunSlotProcessingTests executes "sanity/slots" tests.
|
||||
func RunSlotProcessingTests(t *testing.T, config string) {
|
||||
require.NoError(t, utils.SetConfig(t, config))
|
||||
|
||||
testFolders, testsFolderPath := utils.TestFolders(t, config, "gloas", "sanity/slots/pyspec_tests")
|
||||
|
||||
for _, folder := range testFolders {
|
||||
t.Run(folder.Name(), func(t *testing.T) {
|
||||
preBeaconStateFile, err := util.BazelFileBytes(testsFolderPath, folder.Name(), "pre.ssz_snappy")
|
||||
require.NoError(t, err)
|
||||
preBeaconStateSSZ, err := snappy.Decode(nil /* dst */, preBeaconStateFile)
|
||||
require.NoError(t, err, "Failed to decompress")
|
||||
base := ðpb.BeaconStateGloas{}
|
||||
require.NoError(t, base.UnmarshalSSZ(preBeaconStateSSZ), "Failed to unmarshal")
|
||||
beaconState, err := state_native.InitializeFromProtoGloas(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
file, err := util.BazelFileBytes(testsFolderPath, folder.Name(), "slots.yaml")
|
||||
require.NoError(t, err)
|
||||
fileStr := string(file)
|
||||
slotsCount, err := strconv.ParseUint(fileStr[:len(fileStr)-5], 10, 64)
|
||||
require.NoError(t, err)
|
||||
|
||||
postBeaconStateFile, err := util.BazelFileBytes(testsFolderPath, folder.Name(), "post.ssz_snappy")
|
||||
require.NoError(t, err)
|
||||
postBeaconStateSSZ, err := snappy.Decode(nil /* dst */, postBeaconStateFile)
|
||||
require.NoError(t, err, "Failed to decompress")
|
||||
postBeaconState := ðpb.BeaconStateGloas{}
|
||||
require.NoError(t, postBeaconState.UnmarshalSSZ(postBeaconStateSSZ), "Failed to unmarshal")
|
||||
postState, err := transition.ProcessSlots(context.Background(), beaconState, beaconState.Slot().Add(slotsCount))
|
||||
require.NoError(t, err)
|
||||
|
||||
pbState, err := state_native.ProtobufBeaconStateGloas(postState.ToProto())
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(pbState, postBeaconState) {
|
||||
t.Fatal("Did not receive expected post state")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -112,7 +112,7 @@ func NewBeaconState(options ...NewBeaconStateOption) (state.BeaconState, error)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st.Copy(), nil
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// NewBeaconStateAltair creates a beacon state with minimum marshalable fields.
|
||||
@@ -167,7 +167,7 @@ func NewBeaconStateAltair(options ...func(state *ethpb.BeaconStateAltair) error)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st.Copy(), nil
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// NewBeaconStateBellatrix creates a beacon state with minimum marshalable fields.
|
||||
@@ -234,7 +234,7 @@ func NewBeaconStateBellatrix(options ...func(state *ethpb.BeaconStateBellatrix)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st.Copy(), nil
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// NewBeaconStateCapella creates a beacon state with minimum marshalable fields.
|
||||
@@ -302,7 +302,7 @@ func NewBeaconStateCapella(options ...func(state *ethpb.BeaconStateCapella) erro
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st.Copy(), nil
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// NewBeaconStateDeneb creates a beacon state with minimum marshalable fields.
|
||||
@@ -370,7 +370,7 @@ func NewBeaconStateDeneb(options ...func(state *ethpb.BeaconStateDeneb) error) (
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st.Copy(), nil
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// NewBeaconStateElectra creates a beacon state with minimum marshalable fields.
|
||||
@@ -438,7 +438,7 @@ func NewBeaconStateElectra(options ...func(state *ethpb.BeaconStateElectra) erro
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st.Copy(), nil
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// NewBeaconStateFulu creates a beacon state with minimum marshalable fields.
|
||||
@@ -507,7 +507,86 @@ func NewBeaconStateFulu(options ...func(state *ethpb.BeaconStateFulu) error) (st
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st.Copy(), nil
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// NewBeaconStateGloas creates a beacon state with minimum marshalable fields.
|
||||
func NewBeaconStateGloas(options ...func(state *ethpb.BeaconStateGloas) error) (state.BeaconState, error) {
|
||||
pubkeys := make([][]byte, 512)
|
||||
for i := range pubkeys {
|
||||
pubkeys[i] = make([]byte, 48)
|
||||
}
|
||||
|
||||
builderPendingPayments := make([]*ethpb.BuilderPendingPayment, 64)
|
||||
for i := range builderPendingPayments {
|
||||
builderPendingPayments[i] = ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
seed := ðpb.BeaconStateGloas{
|
||||
BlockRoots: filledByteSlice2D(uint64(params.BeaconConfig().SlotsPerHistoricalRoot), 32),
|
||||
StateRoots: filledByteSlice2D(uint64(params.BeaconConfig().SlotsPerHistoricalRoot), 32),
|
||||
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
|
||||
RandaoMixes: filledByteSlice2D(uint64(params.BeaconConfig().EpochsPerHistoricalVector), 32),
|
||||
Validators: make([]*ethpb.Validator, 0),
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: make([]byte, 4),
|
||||
CurrentVersion: make([]byte, 4),
|
||||
},
|
||||
Eth1DataVotes: make([]*ethpb.Eth1Data, 0),
|
||||
HistoricalRoots: make([][]byte, 0),
|
||||
JustificationBits: bitfield.Bitvector4{0x0},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
LatestBlockHeader: HydrateBeaconHeader(ðpb.BeaconBlockHeader{}),
|
||||
PreviousJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
PreviousEpochParticipation: make([]byte, 0),
|
||||
CurrentEpochParticipation: make([]byte, 0),
|
||||
CurrentSyncCommittee: ðpb.SyncCommittee{
|
||||
Pubkeys: pubkeys,
|
||||
AggregatePubkey: make([]byte, 48),
|
||||
},
|
||||
NextSyncCommittee: ðpb.SyncCommittee{
|
||||
Pubkeys: pubkeys,
|
||||
AggregatePubkey: make([]byte, 48),
|
||||
},
|
||||
ProposerLookahead: make([]uint64, 64),
|
||||
LatestExecutionPayloadBid: ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: make([]byte, 32),
|
||||
ParentBlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
PrevRandao: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitmentsRoot: make([]byte, 32),
|
||||
},
|
||||
Builders: make([]*ethpb.Builder, 0),
|
||||
ExecutionPayloadAvailability: make([]byte, 1024),
|
||||
BuilderPendingPayments: builderPendingPayments,
|
||||
BuilderPendingWithdrawals: make([]*ethpb.BuilderPendingWithdrawal, 0),
|
||||
LatestBlockHash: make([]byte, 32),
|
||||
PayloadExpectedWithdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
}
|
||||
|
||||
for _, opt := range options {
|
||||
err := opt(seed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var st, err = state_native.InitializeFromProtoUnsafeGloas(seed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// SSZ will fill 2D byte slices with their respective values, so we must fill these in too for round
|
||||
|
||||
@@ -68,6 +68,26 @@ func TestNewBeaconStateElectra(t *testing.T) {
|
||||
assert.DeepEqual(t, st.ToProtoUnsafe(), got)
|
||||
}
|
||||
|
||||
func TestNewBeaconStateFulu(t *testing.T) {
|
||||
st, err := NewBeaconStateFulu()
|
||||
require.NoError(t, err)
|
||||
b, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
got := ðpb.BeaconStateFulu{}
|
||||
require.NoError(t, got.UnmarshalSSZ(b))
|
||||
assert.DeepEqual(t, st.ToProtoUnsafe(), got)
|
||||
}
|
||||
|
||||
func TestNewBeaconStateGloas(t *testing.T) {
|
||||
st, err := NewBeaconStateGloas()
|
||||
require.NoError(t, err)
|
||||
b, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
got := ðpb.BeaconStateGloas{}
|
||||
require.NoError(t, got.UnmarshalSSZ(b))
|
||||
assert.DeepEqual(t, st.ToProtoUnsafe(), got)
|
||||
}
|
||||
|
||||
func TestNewBeaconState_HashTreeRoot(t *testing.T) {
|
||||
st, err := NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -71,7 +71,7 @@ func main() {
|
||||
flag.Parse()
|
||||
|
||||
if *logFileName != "" {
|
||||
if err := logs.ConfigurePersistentLogging(*logFileName, "text", logrus.DebugLevel); err != nil {
|
||||
if err := logs.ConfigurePersistentLogging(*logFileName, "text", logrus.DebugLevel, map[string]logrus.Level{}); err != nil {
|
||||
log.WithError(err).Error("Failed to configuring logging to disk.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,7 +25,6 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//api/grpc:go_default_library",
|
||||
"//api/rest:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//cmd/validator/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
|
||||
@@ -3,13 +3,14 @@ package accounts
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
grpcutil "github.com/OffchainLabs/prysm/v7/api/grpc"
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/accounts/wallet"
|
||||
beaconApi "github.com/OffchainLabs/prysm/v7/validator/client/beacon-api"
|
||||
iface "github.com/OffchainLabs/prysm/v7/validator/client/iface"
|
||||
nodeClientFactory "github.com/OffchainLabs/prysm/v7/validator/client/node-client-factory"
|
||||
validatorClientFactory "github.com/OffchainLabs/prysm/v7/validator/client/validator-client-factory"
|
||||
@@ -76,17 +77,22 @@ func (acm *CLIManager) prepareBeaconClients(ctx context.Context) (*iface.Validat
|
||||
}
|
||||
|
||||
ctx = grpcutil.AppendHeaders(ctx, acm.grpcHeaders)
|
||||
|
||||
conn, err := validatorHelpers.NewNodeConnection(
|
||||
validatorHelpers.WithGrpc(ctx, acm.beaconRPCProvider, acm.dialOpts),
|
||||
validatorHelpers.WithREST(acm.beaconApiEndpoint, rest.WithHttpTimeout(acm.beaconApiTimeout)),
|
||||
)
|
||||
grpcConn, err := grpc.DialContext(ctx, acm.beaconRPCProvider, acm.dialOpts...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, errors.Wrapf(err, "could not dial endpoint %s", acm.beaconRPCProvider)
|
||||
}
|
||||
conn := validatorHelpers.NewNodeConnection(
|
||||
grpcConn,
|
||||
acm.beaconApiEndpoint,
|
||||
validatorHelpers.WithBeaconApiTimeout(acm.beaconApiTimeout),
|
||||
)
|
||||
|
||||
validatorClient := validatorClientFactory.NewValidatorClient(conn)
|
||||
nodeClient := nodeClientFactory.NewNodeClient(conn)
|
||||
restHandler := beaconApi.NewBeaconApiRestHandler(
|
||||
http.Client{Timeout: acm.beaconApiTimeout},
|
||||
acm.beaconApiEndpoint,
|
||||
)
|
||||
validatorClient := validatorClientFactory.NewValidatorClient(conn, restHandler)
|
||||
nodeClient := nodeClientFactory.NewNodeClient(conn, restHandler)
|
||||
|
||||
return &validatorClient, &nodeClient, nil
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ go_library(
|
||||
"log.go",
|
||||
"log_helpers.go",
|
||||
"metrics.go",
|
||||
"multiple_endpoints_grpc_resolver.go",
|
||||
"propose.go",
|
||||
"registration.go",
|
||||
"runner.go",
|
||||
@@ -28,7 +29,6 @@ go_library(
|
||||
"//api/client:go_default_library",
|
||||
"//api/client/event:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//api/rest:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//async:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
@@ -58,6 +58,7 @@ go_library(
|
||||
"//time/slots:go_default_library",
|
||||
"//validator/accounts/iface:go_default_library",
|
||||
"//validator/accounts/wallet:go_default_library",
|
||||
"//validator/client/beacon-api:go_default_library",
|
||||
"//validator/client/beacon-chain-client-factory:go_default_library",
|
||||
"//validator/client/iface:go_default_library",
|
||||
"//validator/client/node-client-factory:go_default_library",
|
||||
@@ -85,11 +86,13 @@ go_library(
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc//:go_default_library",
|
||||
"@io_opentelemetry_go_contrib_instrumentation_net_http_otelhttp//:go_default_library",
|
||||
"@io_opentelemetry_go_otel_trace//:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//credentials:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
"@org_golang_google_grpc//resolver:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
@@ -121,8 +124,6 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api/grpc:go_default_library",
|
||||
"//api/rest:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
|
||||
@@ -207,7 +207,7 @@ func (v *validator) signSlotWithSelectionProof(ctx context.Context, pubKey [fiel
|
||||
// such that any attestations from this slot have time to reach the beacon node
|
||||
// before creating the aggregated attestation.
|
||||
func (v *validator) waitToSlotTwoThirds(ctx context.Context, slot primitives.Slot) {
|
||||
v.waitUntilSlotComponent(ctx, slot, params.BeaconConfig().AggregrateDueBPS)
|
||||
v.waitUntilSlotComponent(ctx, slot, params.BeaconConfig().AggregateDueBPS)
|
||||
}
|
||||
|
||||
// This returns the signature of validator signing over aggregate and
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user