mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-28 22:58:13 -05:00
Compare commits
57 Commits
debug-stat
...
gRPC-fallb
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e9ba7c000e | ||
|
|
2ef6a3c25e | ||
|
|
919bd5d6aa | ||
|
|
0476eeda57 | ||
|
|
e6e199fa0d | ||
|
|
c8eec6ec24 | ||
|
|
4f659b236e | ||
|
|
d448ebef45 | ||
|
|
1c65c8866a | ||
|
|
87739d92ad | ||
|
|
c3bfc611a2 | ||
|
|
ed2e58ad6c | ||
|
|
dcc67bb61e | ||
|
|
922aaa4f0f | ||
|
|
63cd7ac88e | ||
|
|
a030d88d42 | ||
|
|
d30ef5a30f | ||
|
|
545f450f70 | ||
|
|
9a5f5ce733 | ||
|
|
fe7b2bf20e | ||
|
|
7cc6ded31a | ||
|
|
5fd3300fdb | ||
|
|
f74a9cb3ec | ||
|
|
3d903d5d75 | ||
|
|
5f335b1b58 | ||
|
|
1f7f7c6833 | ||
|
|
4e952354d1 | ||
|
|
5268da43f1 | ||
|
|
a6fc327cfb | ||
|
|
cf04b457a6 | ||
|
|
4586b0accf | ||
|
|
3a71ad2ec1 | ||
|
|
588766e520 | ||
|
|
ecc19bc6ed | ||
|
|
164d2d50fd | ||
|
|
1355a9ff4d | ||
|
|
34478f30c8 | ||
|
|
214b4428e6 | ||
|
|
db82f3cc9d | ||
|
|
9c874037d1 | ||
|
|
9bb231fb3b | ||
|
|
6432140603 | ||
|
|
5e0a9ff992 | ||
|
|
0bfd661baf | ||
|
|
b21acc0bbb | ||
|
|
f6f65987c6 | ||
|
|
d26cdd74ee | ||
|
|
d1905cb018 | ||
|
|
9f828bdd88 | ||
|
|
17413b52ed | ||
|
|
a651e7f0ac | ||
|
|
3e1cb45e92 | ||
|
|
fc2dcb0e88 | ||
|
|
888db581dd | ||
|
|
f1d2ee72e2 | ||
|
|
31f18b9f60 | ||
|
|
6462c997e9 |
@@ -3,13 +3,16 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"grpc_connection_provider.go",
|
||||
"grpcutils.go",
|
||||
"log.go",
|
||||
"mock_grpc_provider.go",
|
||||
"parameters.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/api/grpc",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
@@ -18,12 +21,17 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["grpcutils_test.go"],
|
||||
srcs = [
|
||||
"grpc_connection_provider_test.go",
|
||||
"grpcutils_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//credentials/insecure:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
173
api/grpc/grpc_connection_provider.go
Normal file
173
api/grpc/grpc_connection_provider.go
Normal file
@@ -0,0 +1,173 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// GrpcConnectionProvider manages gRPC connections for failover support.
|
||||
// It allows switching between different beacon node endpoints when the current one becomes unavailable.
|
||||
// Only one connection is maintained at a time - when switching hosts, the old connection is closed.
|
||||
type GrpcConnectionProvider interface {
|
||||
// CurrentConn returns the currently active gRPC connection.
|
||||
// The connection is created lazily on first call.
|
||||
// Returns nil if the provider has been closed.
|
||||
CurrentConn() *grpc.ClientConn
|
||||
// CurrentHost returns the address of the currently active endpoint.
|
||||
CurrentHost() string
|
||||
// Hosts returns all configured endpoint addresses.
|
||||
Hosts() []string
|
||||
// SwitchHost switches to the endpoint at the given index.
|
||||
// The new connection is created lazily on next CurrentConn() call.
|
||||
SwitchHost(index int) error
|
||||
// Close closes the current connection.
|
||||
Close()
|
||||
}
|
||||
|
||||
type grpcConnectionProvider struct {
|
||||
// Immutable after construction - no lock needed for reads
|
||||
endpoints []string
|
||||
ctx context.Context
|
||||
dialOpts []grpc.DialOption
|
||||
|
||||
// Current connection state (protected by mutex)
|
||||
currentIndex uint64
|
||||
conn *grpc.ClientConn
|
||||
|
||||
mu sync.Mutex
|
||||
closed bool
|
||||
}
|
||||
|
||||
// NewGrpcConnectionProvider creates a new connection provider that manages gRPC connections.
|
||||
// The endpoint parameter can be a comma-separated list of addresses (e.g., "host1:4000,host2:4000").
|
||||
// Only one connection is maintained at a time, created lazily on first use.
|
||||
func NewGrpcConnectionProvider(
|
||||
ctx context.Context,
|
||||
endpoint string,
|
||||
dialOpts []grpc.DialOption,
|
||||
) (GrpcConnectionProvider, error) {
|
||||
endpoints := parseEndpoints(endpoint)
|
||||
if len(endpoints) == 0 {
|
||||
return nil, errors.New("no gRPC endpoints provided")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"endpoints": endpoints,
|
||||
"count": len(endpoints),
|
||||
}).Info("Initialized gRPC connection provider")
|
||||
|
||||
return &grpcConnectionProvider{
|
||||
endpoints: endpoints,
|
||||
ctx: ctx,
|
||||
dialOpts: dialOpts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseEndpoints splits a comma-separated endpoint string into individual endpoints.
|
||||
func parseEndpoints(endpoint string) []string {
|
||||
if endpoint == "" {
|
||||
return nil
|
||||
}
|
||||
endpoints := make([]string, 0, 1)
|
||||
for p := range strings.SplitSeq(endpoint, ",") {
|
||||
if p = strings.TrimSpace(p); p != "" {
|
||||
endpoints = append(endpoints, p)
|
||||
}
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) CurrentConn() *grpc.ClientConn {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if p.closed {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return existing connection if available
|
||||
if p.conn != nil {
|
||||
return p.conn
|
||||
}
|
||||
|
||||
// Create connection lazily
|
||||
ep := p.endpoints[p.currentIndex]
|
||||
conn, err := grpc.DialContext(p.ctx, ep, p.dialOpts...)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("endpoint", ep).Error("Failed to create gRPC connection")
|
||||
return nil
|
||||
}
|
||||
|
||||
p.conn = conn
|
||||
log.WithField("endpoint", ep).Debug("Created gRPC connection")
|
||||
return conn
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) CurrentHost() string {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
return p.endpoints[p.currentIndex]
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) Hosts() []string {
|
||||
// Return a copy to maintain immutability
|
||||
hosts := make([]string, len(p.endpoints))
|
||||
copy(hosts, p.endpoints)
|
||||
return hosts
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) SwitchHost(index int) error {
|
||||
if index < 0 || index >= len(p.endpoints) {
|
||||
return errors.Errorf("invalid host index %d, must be between 0 and %d", index, len(p.endpoints)-1)
|
||||
}
|
||||
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if uint64(index) == p.currentIndex {
|
||||
return nil // Already on this host
|
||||
}
|
||||
|
||||
oldHost := p.endpoints[p.currentIndex]
|
||||
oldConn := p.conn
|
||||
|
||||
p.conn = nil // Clear immediately - new connection created lazily
|
||||
p.currentIndex = uint64(index)
|
||||
|
||||
// Close old connection asynchronously to avoid blocking the caller
|
||||
if oldConn != nil {
|
||||
go func() {
|
||||
if err := oldConn.Close(); err != nil {
|
||||
log.WithError(err).WithField("endpoint", oldHost).Debug("Failed to close previous connection")
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"previousHost": oldHost,
|
||||
"newHost": p.endpoints[index],
|
||||
}).Debug("Switched gRPC endpoint")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) Close() {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if p.closed {
|
||||
return
|
||||
}
|
||||
p.closed = true
|
||||
|
||||
if p.conn != nil {
|
||||
if err := p.conn.Close(); err != nil {
|
||||
log.WithError(err).WithField("endpoint", p.endpoints[p.currentIndex]).Debug("Failed to close gRPC connection")
|
||||
}
|
||||
p.conn = nil
|
||||
}
|
||||
}
|
||||
207
api/grpc/grpc_connection_provider_test.go
Normal file
207
api/grpc/grpc_connection_provider_test.go
Normal file
@@ -0,0 +1,207 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
func TestParseEndpoints(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected []string
|
||||
}{
|
||||
{"single endpoint", "localhost:4000", []string{"localhost:4000"}},
|
||||
{"multiple endpoints", "host1:4000,host2:4000,host3:4000", []string{"host1:4000", "host2:4000", "host3:4000"}},
|
||||
{"endpoints with spaces", "host1:4000, host2:4000 , host3:4000", []string{"host1:4000", "host2:4000", "host3:4000"}},
|
||||
{"empty string", "", nil},
|
||||
{"only commas", ",,,", []string{}},
|
||||
{"trailing comma", "host1:4000,host2:4000,", []string{"host1:4000", "host2:4000"}},
|
||||
{"leading comma", ",host1:4000,host2:4000", []string{"host1:4000", "host2:4000"}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := parseEndpoints(tt.input)
|
||||
if !reflect.DeepEqual(tt.expected, got) {
|
||||
t.Errorf("parseEndpoints(%q) = %v, want %v", tt.input, got, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewGrpcConnectionProvider_Errors(t *testing.T) {
|
||||
t.Run("no endpoints", func(t *testing.T) {
|
||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
_, err := NewGrpcConnectionProvider(context.Background(), "", dialOpts)
|
||||
require.ErrorContains(t, "no gRPC endpoints provided", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGrpcConnectionProvider_LazyConnection(t *testing.T) {
|
||||
// Start only one server but configure provider with two endpoints
|
||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
server := grpc.NewServer()
|
||||
go func() { _ = server.Serve(lis) }()
|
||||
defer server.Stop()
|
||||
|
||||
validAddr := lis.Addr().String()
|
||||
invalidAddr := "127.0.0.1:1" // Port 1 is unlikely to be listening
|
||||
|
||||
// Provider should succeed even though second endpoint is invalid (lazy connections)
|
||||
endpoint := validAddr + "," + invalidAddr
|
||||
ctx := context.Background()
|
||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
|
||||
require.NoError(t, err, "Provider creation should succeed with lazy connections")
|
||||
defer func() { provider.Close() }()
|
||||
|
||||
// First endpoint should work
|
||||
conn := provider.CurrentConn()
|
||||
assert.NotNil(t, conn, "First connection should be created lazily")
|
||||
}
|
||||
|
||||
func TestGrpcConnectionProvider_SingleConnectionModel(t *testing.T) {
|
||||
// Create provider with 3 endpoints
|
||||
var addrs []string
|
||||
var servers []*grpc.Server
|
||||
|
||||
for range 3 {
|
||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
server := grpc.NewServer()
|
||||
go func() { _ = server.Serve(lis) }()
|
||||
addrs = append(addrs, lis.Addr().String())
|
||||
servers = append(servers, server)
|
||||
}
|
||||
defer func() {
|
||||
for _, s := range servers {
|
||||
s.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
endpoint := strings.Join(addrs, ",")
|
||||
ctx := context.Background()
|
||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
|
||||
require.NoError(t, err)
|
||||
defer func() { provider.Close() }()
|
||||
|
||||
// Access the internal state to verify single connection behavior
|
||||
p := provider.(*grpcConnectionProvider)
|
||||
|
||||
// Initially no connection
|
||||
p.mu.Lock()
|
||||
assert.Equal(t, (*grpc.ClientConn)(nil), p.conn, "Connection should be nil before access")
|
||||
p.mu.Unlock()
|
||||
|
||||
// Access connection - should create one
|
||||
conn0 := provider.CurrentConn()
|
||||
assert.NotNil(t, conn0)
|
||||
|
||||
p.mu.Lock()
|
||||
assert.NotNil(t, p.conn, "Connection should be created after CurrentConn()")
|
||||
firstConn := p.conn
|
||||
p.mu.Unlock()
|
||||
|
||||
// Call CurrentConn again - should return same connection
|
||||
conn0Again := provider.CurrentConn()
|
||||
assert.Equal(t, conn0, conn0Again, "Should return same connection")
|
||||
|
||||
// Switch to different host - old connection should be closed, new one created lazily
|
||||
require.NoError(t, provider.SwitchHost(1))
|
||||
|
||||
p.mu.Lock()
|
||||
assert.Equal(t, (*grpc.ClientConn)(nil), p.conn, "Connection should be nil after SwitchHost (lazy)")
|
||||
p.mu.Unlock()
|
||||
|
||||
// Get new connection
|
||||
conn1 := provider.CurrentConn()
|
||||
assert.NotNil(t, conn1)
|
||||
assert.NotEqual(t, firstConn, conn1, "Should be a different connection after switching hosts")
|
||||
}
|
||||
|
||||
// testProvider creates a provider with n test servers and returns cleanup function.
|
||||
func testProvider(t *testing.T, n int) (GrpcConnectionProvider, []string, func()) {
|
||||
var addrs []string
|
||||
var cleanups []func()
|
||||
|
||||
for range n {
|
||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
server := grpc.NewServer()
|
||||
go func() { _ = server.Serve(lis) }()
|
||||
addrs = append(addrs, lis.Addr().String())
|
||||
cleanups = append(cleanups, server.Stop)
|
||||
}
|
||||
|
||||
endpoint := strings.Join(addrs, ",")
|
||||
|
||||
ctx := context.Background()
|
||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
|
||||
require.NoError(t, err)
|
||||
|
||||
cleanup := func() {
|
||||
provider.Close()
|
||||
for _, c := range cleanups {
|
||||
c()
|
||||
}
|
||||
}
|
||||
return provider, addrs, cleanup
|
||||
}
|
||||
|
||||
func TestGrpcConnectionProvider(t *testing.T) {
|
||||
provider, addrs, cleanup := testProvider(t, 3)
|
||||
defer cleanup()
|
||||
|
||||
t.Run("initial state", func(t *testing.T) {
|
||||
assert.Equal(t, 3, len(provider.Hosts()))
|
||||
assert.Equal(t, addrs[0], provider.CurrentHost())
|
||||
assert.NotNil(t, provider.CurrentConn())
|
||||
})
|
||||
|
||||
t.Run("SwitchHost", func(t *testing.T) {
|
||||
require.NoError(t, provider.SwitchHost(1))
|
||||
assert.Equal(t, addrs[1], provider.CurrentHost())
|
||||
assert.NotNil(t, provider.CurrentConn()) // New connection created lazily
|
||||
require.NoError(t, provider.SwitchHost(0))
|
||||
assert.Equal(t, addrs[0], provider.CurrentHost())
|
||||
require.ErrorContains(t, "invalid host index", provider.SwitchHost(-1))
|
||||
require.ErrorContains(t, "invalid host index", provider.SwitchHost(3))
|
||||
})
|
||||
|
||||
t.Run("SwitchHost circular", func(t *testing.T) {
|
||||
// Test round-robin style switching using SwitchHost with manual index
|
||||
indices := []int{1, 2, 0, 1} // Simulate circular switching
|
||||
for i, idx := range indices {
|
||||
require.NoError(t, provider.SwitchHost(idx))
|
||||
assert.Equal(t, addrs[idx], provider.CurrentHost(), "iteration %d", i)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Hosts returns copy", func(t *testing.T) {
|
||||
hosts := provider.Hosts()
|
||||
original := hosts[0]
|
||||
hosts[0] = "modified"
|
||||
assert.Equal(t, original, provider.Hosts()[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestGrpcConnectionProvider_Close(t *testing.T) {
|
||||
provider, _, cleanup := testProvider(t, 1)
|
||||
defer cleanup()
|
||||
|
||||
assert.NotNil(t, provider.CurrentConn())
|
||||
provider.Close()
|
||||
assert.Equal(t, (*grpc.ClientConn)(nil), provider.CurrentConn())
|
||||
provider.Close() // Double close is safe
|
||||
}
|
||||
20
api/grpc/mock_grpc_provider.go
Normal file
20
api/grpc/mock_grpc_provider.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package grpc
|
||||
|
||||
import "google.golang.org/grpc"
|
||||
|
||||
// MockGrpcProvider implements GrpcConnectionProvider for testing.
|
||||
type MockGrpcProvider struct {
|
||||
MockConn *grpc.ClientConn
|
||||
MockHosts []string
|
||||
}
|
||||
|
||||
func (m *MockGrpcProvider) CurrentConn() *grpc.ClientConn { return m.MockConn }
|
||||
func (m *MockGrpcProvider) CurrentHost() string {
|
||||
if len(m.MockHosts) > 0 {
|
||||
return m.MockHosts[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func (m *MockGrpcProvider) Hosts() []string { return m.MockHosts }
|
||||
func (m *MockGrpcProvider) SwitchHost(int) error { return nil }
|
||||
func (m *MockGrpcProvider) Close() {}
|
||||
34
api/rest/BUILD.bazel
Normal file
34
api/rest/BUILD.bazel
Normal file
@@ -0,0 +1,34 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"log.go",
|
||||
"mock_rest_provider.go",
|
||||
"rest_connection_provider.go",
|
||||
"rest_handler.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/api/rest",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/apiutil:go_default_library",
|
||||
"//api/client:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opentelemetry_go_contrib_instrumentation_net_http_otelhttp//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["rest_connection_provider_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,9 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package gloas
|
||||
package rest
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/core/gloas")
|
||||
var log = logrus.WithField("package", "api/rest")
|
||||
49
api/rest/mock_rest_provider.go
Normal file
49
api/rest/mock_rest_provider.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// MockRestProvider implements RestConnectionProvider for testing.
|
||||
type MockRestProvider struct {
|
||||
MockClient *http.Client
|
||||
MockHandler RestHandler
|
||||
MockHosts []string
|
||||
HostIndex int
|
||||
}
|
||||
|
||||
func (m *MockRestProvider) HttpClient() *http.Client { return m.MockClient }
|
||||
func (m *MockRestProvider) RestHandler() RestHandler { return m.MockHandler }
|
||||
func (m *MockRestProvider) CurrentHost() string {
|
||||
if len(m.MockHosts) > 0 {
|
||||
return m.MockHosts[m.HostIndex%len(m.MockHosts)]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func (m *MockRestProvider) Hosts() []string { return m.MockHosts }
|
||||
func (m *MockRestProvider) SwitchHost(index int) error { m.HostIndex = index; return nil }
|
||||
|
||||
// MockRestHandler implements RestHandler for testing.
|
||||
type MockRestHandler struct {
|
||||
MockHost string
|
||||
MockClient *http.Client
|
||||
}
|
||||
|
||||
func (m *MockRestHandler) Get(_ context.Context, _ string, _ any) error { return nil }
|
||||
func (m *MockRestHandler) GetStatusCode(_ context.Context, _ string) (int, error) {
|
||||
return http.StatusOK, nil
|
||||
}
|
||||
func (m *MockRestHandler) GetSSZ(_ context.Context, _ string) ([]byte, http.Header, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
func (m *MockRestHandler) Post(_ context.Context, _ string, _ map[string]string, _ *bytes.Buffer, _ any) error {
|
||||
return nil
|
||||
}
|
||||
func (m *MockRestHandler) PostSSZ(_ context.Context, _ string, _ map[string]string, _ *bytes.Buffer) ([]byte, http.Header, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
func (m *MockRestHandler) HttpClient() *http.Client { return m.MockClient }
|
||||
func (m *MockRestHandler) Host() string { return m.MockHost }
|
||||
func (m *MockRestHandler) SwitchHost(host string) { m.MockHost = host }
|
||||
158
api/rest/rest_connection_provider.go
Normal file
158
api/rest/rest_connection_provider.go
Normal file
@@ -0,0 +1,158 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/client"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
)
|
||||
|
||||
// RestConnectionProvider manages HTTP client configuration for REST API with failover support.
|
||||
// It allows switching between different beacon node REST endpoints when the current one becomes unavailable.
|
||||
type RestConnectionProvider interface {
|
||||
// HttpClient returns the configured HTTP client with headers, timeout, and optional tracing.
|
||||
HttpClient() *http.Client
|
||||
// RestHandler returns the REST handler for making API requests.
|
||||
RestHandler() RestHandler
|
||||
// CurrentHost returns the current REST API endpoint URL.
|
||||
CurrentHost() string
|
||||
// Hosts returns all configured REST API endpoint URLs.
|
||||
Hosts() []string
|
||||
// SwitchHost switches to the endpoint at the given index.
|
||||
SwitchHost(index int) error
|
||||
}
|
||||
|
||||
// RestConnectionProviderOption is a functional option for configuring the REST connection provider.
|
||||
type RestConnectionProviderOption func(*restConnectionProvider)
|
||||
|
||||
// WithHttpTimeout sets the HTTP client timeout.
|
||||
func WithHttpTimeout(timeout time.Duration) RestConnectionProviderOption {
|
||||
return func(p *restConnectionProvider) {
|
||||
p.timeout = timeout
|
||||
}
|
||||
}
|
||||
|
||||
// WithHttpHeaders sets custom HTTP headers to include in all requests.
|
||||
func WithHttpHeaders(headers map[string][]string) RestConnectionProviderOption {
|
||||
return func(p *restConnectionProvider) {
|
||||
p.headers = headers
|
||||
}
|
||||
}
|
||||
|
||||
// WithTracing enables OpenTelemetry tracing for HTTP requests.
|
||||
func WithTracing() RestConnectionProviderOption {
|
||||
return func(p *restConnectionProvider) {
|
||||
p.enableTracing = true
|
||||
}
|
||||
}
|
||||
|
||||
type restConnectionProvider struct {
|
||||
endpoints []string
|
||||
httpClient *http.Client
|
||||
restHandler RestHandler
|
||||
currentIndex atomic.Uint64
|
||||
timeout time.Duration
|
||||
headers map[string][]string
|
||||
enableTracing bool
|
||||
}
|
||||
|
||||
// NewRestConnectionProvider creates a new REST connection provider that manages HTTP client configuration.
|
||||
// The endpoint parameter can be a comma-separated list of URLs (e.g., "http://host1:3500,http://host2:3500").
|
||||
func NewRestConnectionProvider(endpoint string, opts ...RestConnectionProviderOption) (RestConnectionProvider, error) {
|
||||
endpoints := parseEndpoints(endpoint)
|
||||
if len(endpoints) == 0 {
|
||||
return nil, errors.New("no REST API endpoints provided")
|
||||
}
|
||||
|
||||
p := &restConnectionProvider{
|
||||
endpoints: endpoints,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(p)
|
||||
}
|
||||
|
||||
// Build the HTTP transport chain
|
||||
var transport http.RoundTripper = http.DefaultTransport
|
||||
|
||||
// Add custom headers if configured
|
||||
if len(p.headers) > 0 {
|
||||
transport = client.NewCustomHeadersTransport(transport, p.headers)
|
||||
}
|
||||
|
||||
// Add tracing if enabled
|
||||
if p.enableTracing {
|
||||
transport = otelhttp.NewTransport(transport)
|
||||
}
|
||||
|
||||
p.httpClient = &http.Client{
|
||||
Timeout: p.timeout,
|
||||
Transport: transport,
|
||||
}
|
||||
|
||||
// Create the REST handler with the HTTP client and initial host
|
||||
p.restHandler = newRestHandler(*p.httpClient, endpoints[0])
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"endpoints": endpoints,
|
||||
"count": len(endpoints),
|
||||
}).Info("Initialized REST connection provider")
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// parseEndpoints splits a comma-separated endpoint string into individual endpoints.
|
||||
func parseEndpoints(endpoint string) []string {
|
||||
if endpoint == "" {
|
||||
return nil
|
||||
}
|
||||
endpoints := make([]string, 0, 1)
|
||||
for p := range strings.SplitSeq(endpoint, ",") {
|
||||
if p = strings.TrimSpace(p); p != "" {
|
||||
endpoints = append(endpoints, p)
|
||||
}
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) HttpClient() *http.Client {
|
||||
return p.httpClient
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) RestHandler() RestHandler {
|
||||
return p.restHandler
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) CurrentHost() string {
|
||||
return p.endpoints[p.currentIndex.Load()]
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) Hosts() []string {
|
||||
// Return a copy to maintain immutability
|
||||
hosts := make([]string, len(p.endpoints))
|
||||
copy(hosts, p.endpoints)
|
||||
return hosts
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) SwitchHost(index int) error {
|
||||
if index < 0 || index >= len(p.endpoints) {
|
||||
return errors.Errorf("invalid host index %d, must be between 0 and %d", index, len(p.endpoints)-1)
|
||||
}
|
||||
|
||||
oldIdx := p.currentIndex.Load()
|
||||
p.currentIndex.Store(uint64(index))
|
||||
|
||||
// Update the rest handler's host
|
||||
p.restHandler.SwitchHost(p.endpoints[index])
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"previousHost": p.endpoints[oldIdx],
|
||||
"newHost": p.endpoints[index],
|
||||
}).Debug("Switched REST endpoint")
|
||||
return nil
|
||||
}
|
||||
80
api/rest/rest_connection_provider_test.go
Normal file
80
api/rest/rest_connection_provider_test.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestParseEndpoints(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected []string
|
||||
}{
|
||||
{"single endpoint", "http://localhost:3500", []string{"http://localhost:3500"}},
|
||||
{"multiple endpoints", "http://host1:3500,http://host2:3500,http://host3:3500", []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"}},
|
||||
{"endpoints with spaces", "http://host1:3500, http://host2:3500 , http://host3:3500", []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"}},
|
||||
{"empty string", "", nil},
|
||||
{"only commas", ",,,", []string{}},
|
||||
{"trailing comma", "http://host1:3500,http://host2:3500,", []string{"http://host1:3500", "http://host2:3500"}},
|
||||
{"leading comma", ",http://host1:3500,http://host2:3500", []string{"http://host1:3500", "http://host2:3500"}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := parseEndpoints(tt.input)
|
||||
if !reflect.DeepEqual(tt.expected, got) {
|
||||
t.Errorf("parseEndpoints(%q) = %v, want %v", tt.input, got, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewRestConnectionProvider_Errors(t *testing.T) {
|
||||
t.Run("no endpoints", func(t *testing.T) {
|
||||
_, err := NewRestConnectionProvider("")
|
||||
require.ErrorContains(t, "no REST API endpoints provided", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRestConnectionProvider(t *testing.T) {
|
||||
provider, err := NewRestConnectionProvider("http://host1:3500,http://host2:3500,http://host3:3500")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("initial state", func(t *testing.T) {
|
||||
assert.Equal(t, 3, len(provider.Hosts()))
|
||||
assert.Equal(t, "http://host1:3500", provider.CurrentHost())
|
||||
assert.NotNil(t, provider.HttpClient())
|
||||
})
|
||||
|
||||
t.Run("SwitchHost", func(t *testing.T) {
|
||||
require.NoError(t, provider.SwitchHost(1))
|
||||
assert.Equal(t, "http://host2:3500", provider.CurrentHost())
|
||||
require.NoError(t, provider.SwitchHost(0))
|
||||
assert.Equal(t, "http://host1:3500", provider.CurrentHost())
|
||||
require.ErrorContains(t, "invalid host index", provider.SwitchHost(-1))
|
||||
require.ErrorContains(t, "invalid host index", provider.SwitchHost(3))
|
||||
})
|
||||
|
||||
t.Run("Hosts returns copy", func(t *testing.T) {
|
||||
hosts := provider.Hosts()
|
||||
original := hosts[0]
|
||||
hosts[0] = "modified"
|
||||
assert.Equal(t, original, provider.Hosts()[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestRestConnectionProvider_WithOptions(t *testing.T) {
|
||||
headers := map[string][]string{"Authorization": {"Bearer token"}}
|
||||
provider, err := NewRestConnectionProvider(
|
||||
"http://localhost:3500",
|
||||
WithHttpHeaders(headers),
|
||||
WithHttpTimeout(30000000000), // 30 seconds in nanoseconds
|
||||
WithTracing(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, provider.HttpClient())
|
||||
assert.Equal(t, "http://localhost:3500", provider.CurrentHost())
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package beacon_api
|
||||
package rest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
|
||||
type reqOption func(*http.Request)
|
||||
|
||||
// RestHandler defines the interface for making REST API requests.
|
||||
type RestHandler interface {
|
||||
Get(ctx context.Context, endpoint string, resp any) error
|
||||
GetStatusCode(ctx context.Context, endpoint string) (int, error)
|
||||
@@ -29,29 +30,34 @@ type RestHandler interface {
|
||||
PostSSZ(ctx context.Context, endpoint string, headers map[string]string, data *bytes.Buffer) ([]byte, http.Header, error)
|
||||
HttpClient() *http.Client
|
||||
Host() string
|
||||
SetHost(host string)
|
||||
SwitchHost(host string)
|
||||
}
|
||||
|
||||
type BeaconApiRestHandler struct {
|
||||
type restHandler struct {
|
||||
client http.Client
|
||||
host string
|
||||
reqOverrides []reqOption
|
||||
}
|
||||
|
||||
// NewBeaconApiRestHandler returns a RestHandler
|
||||
func NewBeaconApiRestHandler(client http.Client, host string) RestHandler {
|
||||
brh := &BeaconApiRestHandler{
|
||||
// newRestHandler returns a RestHandler (internal use)
|
||||
func newRestHandler(client http.Client, host string) RestHandler {
|
||||
return NewRestHandler(client, host)
|
||||
}
|
||||
|
||||
// NewRestHandler returns a RestHandler
|
||||
func NewRestHandler(client http.Client, host string) RestHandler {
|
||||
rh := &restHandler{
|
||||
client: client,
|
||||
host: host,
|
||||
}
|
||||
brh.appendAcceptOverride()
|
||||
return brh
|
||||
rh.appendAcceptOverride()
|
||||
return rh
|
||||
}
|
||||
|
||||
// appendAcceptOverride enables the Accept header to be customized at runtime via an environment variable.
|
||||
// This is specified as an env var because it is a niche option that prysm may use for performance testing or debugging
|
||||
// bug which users are unlikely to need. Using an env var keeps the set of user-facing flags cleaner.
|
||||
func (c *BeaconApiRestHandler) appendAcceptOverride() {
|
||||
func (c *restHandler) appendAcceptOverride() {
|
||||
if accept := os.Getenv(params.EnvNameOverrideAccept); accept != "" {
|
||||
c.reqOverrides = append(c.reqOverrides, func(req *http.Request) {
|
||||
req.Header.Set("Accept", accept)
|
||||
@@ -60,18 +66,18 @@ func (c *BeaconApiRestHandler) appendAcceptOverride() {
|
||||
}
|
||||
|
||||
// HttpClient returns the underlying HTTP client of the handler
|
||||
func (c *BeaconApiRestHandler) HttpClient() *http.Client {
|
||||
func (c *restHandler) HttpClient() *http.Client {
|
||||
return &c.client
|
||||
}
|
||||
|
||||
// Host returns the underlying HTTP host
|
||||
func (c *BeaconApiRestHandler) Host() string {
|
||||
func (c *restHandler) Host() string {
|
||||
return c.host
|
||||
}
|
||||
|
||||
// Get sends a GET request and decodes the response body as a JSON object into the passed in object.
|
||||
// If an HTTP error is returned, the body is decoded as a DefaultJsonError JSON object and returned as the first return value.
|
||||
func (c *BeaconApiRestHandler) Get(ctx context.Context, endpoint string, resp any) error {
|
||||
func (c *restHandler) Get(ctx context.Context, endpoint string, resp any) error {
|
||||
url := c.host + endpoint
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
@@ -94,7 +100,7 @@ func (c *BeaconApiRestHandler) Get(ctx context.Context, endpoint string, resp an
|
||||
// GetStatusCode sends a GET request and returns only the HTTP status code.
|
||||
// This is useful for endpoints like /eth/v1/node/health that communicate status via HTTP codes
|
||||
// (200 = ready, 206 = syncing, 503 = unavailable) rather than response bodies.
|
||||
func (c *BeaconApiRestHandler) GetStatusCode(ctx context.Context, endpoint string) (int, error) {
|
||||
func (c *restHandler) GetStatusCode(ctx context.Context, endpoint string) (int, error) {
|
||||
url := c.host + endpoint
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
@@ -113,7 +119,7 @@ func (c *BeaconApiRestHandler) GetStatusCode(ctx context.Context, endpoint strin
|
||||
return httpResp.StatusCode, nil
|
||||
}
|
||||
|
||||
func (c *BeaconApiRestHandler) GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error) {
|
||||
func (c *restHandler) GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error) {
|
||||
url := c.host + endpoint
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
@@ -168,7 +174,7 @@ func (c *BeaconApiRestHandler) GetSSZ(ctx context.Context, endpoint string) ([]b
|
||||
|
||||
// Post sends a POST request and decodes the response body as a JSON object into the passed in object.
|
||||
// If an HTTP error is returned, the body is decoded as a DefaultJsonError JSON object and returned as the first return value.
|
||||
func (c *BeaconApiRestHandler) Post(
|
||||
func (c *restHandler) Post(
|
||||
ctx context.Context,
|
||||
apiEndpoint string,
|
||||
headers map[string]string,
|
||||
@@ -204,7 +210,7 @@ func (c *BeaconApiRestHandler) Post(
|
||||
}
|
||||
|
||||
// PostSSZ sends a POST request and prefers an SSZ (application/octet-stream) response body.
|
||||
func (c *BeaconApiRestHandler) PostSSZ(
|
||||
func (c *restHandler) PostSSZ(
|
||||
ctx context.Context,
|
||||
apiEndpoint string,
|
||||
headers map[string]string,
|
||||
@@ -305,6 +311,6 @@ func decodeResp(httpResp *http.Response, resp any) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *BeaconApiRestHandler) SetHost(host string) {
|
||||
func (c *restHandler) SwitchHost(host string) {
|
||||
c.host = host
|
||||
}
|
||||
@@ -9,7 +9,6 @@ go_library(
|
||||
"conversions_blob.go",
|
||||
"conversions_block.go",
|
||||
"conversions_block_execution.go",
|
||||
"conversions_gloas.go",
|
||||
"conversions_lightclient.go",
|
||||
"conversions_state.go",
|
||||
"endpoints_beacon.go",
|
||||
@@ -58,12 +57,10 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
],
|
||||
|
||||
@@ -1,81 +0,0 @@
|
||||
package structs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
func ROExecutionPayloadBidFromConsensus(b interfaces.ROExecutionPayloadBid) *ExecutionPayloadBid {
|
||||
pbh := b.ParentBlockHash()
|
||||
pbr := b.ParentBlockRoot()
|
||||
bh := b.BlockHash()
|
||||
pr := b.PrevRandao()
|
||||
fr := b.FeeRecipient()
|
||||
bcr := b.BlobKzgCommitmentsRoot()
|
||||
return &ExecutionPayloadBid{
|
||||
ParentBlockHash: hexutil.Encode(pbh[:]),
|
||||
ParentBlockRoot: hexutil.Encode(pbr[:]),
|
||||
BlockHash: hexutil.Encode(bh[:]),
|
||||
PrevRandao: hexutil.Encode(pr[:]),
|
||||
FeeRecipient: hexutil.Encode(fr[:]),
|
||||
GasLimit: fmt.Sprintf("%d", b.GasLimit()),
|
||||
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex()),
|
||||
Slot: fmt.Sprintf("%d", b.Slot()),
|
||||
Value: fmt.Sprintf("%d", b.Value()),
|
||||
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment()),
|
||||
BlobKzgCommitmentsRoot: hexutil.Encode(bcr[:]),
|
||||
}
|
||||
}
|
||||
|
||||
func BuildersFromConsensus(builders []*ethpb.Builder) []*Builder {
|
||||
newBuilders := make([]*Builder, len(builders))
|
||||
for i, b := range builders {
|
||||
newBuilders[i] = BuilderFromConsensus(b)
|
||||
}
|
||||
return newBuilders
|
||||
}
|
||||
|
||||
func BuilderFromConsensus(b *ethpb.Builder) *Builder {
|
||||
return &Builder{
|
||||
Pubkey: hexutil.Encode(b.Pubkey),
|
||||
Version: hexutil.Encode(b.Version),
|
||||
ExecutionAddress: hexutil.Encode(b.ExecutionAddress),
|
||||
Balance: fmt.Sprintf("%d", b.Balance),
|
||||
DepositEpoch: fmt.Sprintf("%d", b.DepositEpoch),
|
||||
WithdrawableEpoch: fmt.Sprintf("%d", b.WithdrawableEpoch),
|
||||
}
|
||||
}
|
||||
|
||||
func BuilderPendingPaymentsFromConsensus(payments []*ethpb.BuilderPendingPayment) []*BuilderPendingPayment {
|
||||
newPayments := make([]*BuilderPendingPayment, len(payments))
|
||||
for i, p := range payments {
|
||||
newPayments[i] = BuilderPendingPaymentFromConsensus(p)
|
||||
}
|
||||
return newPayments
|
||||
}
|
||||
|
||||
func BuilderPendingPaymentFromConsensus(p *ethpb.BuilderPendingPayment) *BuilderPendingPayment {
|
||||
return &BuilderPendingPayment{
|
||||
Weight: fmt.Sprintf("%d", p.Weight),
|
||||
Withdrawal: BuilderPendingWithdrawalFromConsensus(p.Withdrawal),
|
||||
}
|
||||
}
|
||||
|
||||
func BuilderPendingWithdrawalsFromConsensus(withdrawals []*ethpb.BuilderPendingWithdrawal) []*BuilderPendingWithdrawal {
|
||||
newWithdrawals := make([]*BuilderPendingWithdrawal, len(withdrawals))
|
||||
for i, w := range withdrawals {
|
||||
newWithdrawals[i] = BuilderPendingWithdrawalFromConsensus(w)
|
||||
}
|
||||
return newWithdrawals
|
||||
}
|
||||
|
||||
func BuilderPendingWithdrawalFromConsensus(w *ethpb.BuilderPendingWithdrawal) *BuilderPendingWithdrawal {
|
||||
return &BuilderPendingWithdrawal{
|
||||
FeeRecipient: hexutil.Encode(w.FeeRecipient),
|
||||
Amount: fmt.Sprintf("%d", w.Amount),
|
||||
BuilderIndex: fmt.Sprintf("%d", w.BuilderIndex),
|
||||
}
|
||||
}
|
||||
@@ -972,223 +972,3 @@ func BeaconStateFuluFromConsensus(st beaconState.BeaconState) (*BeaconStateFulu,
|
||||
ProposerLookahead: lookahead,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Gloas
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
func BeaconStateGloasFromConsensus(st beaconState.BeaconState) (*BeaconStateGloas, error) {
|
||||
srcBr := st.BlockRoots()
|
||||
br := make([]string, len(srcBr))
|
||||
for i, r := range srcBr {
|
||||
br[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcSr := st.StateRoots()
|
||||
sr := make([]string, len(srcSr))
|
||||
for i, r := range srcSr {
|
||||
sr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcHr := st.HistoricalRoots()
|
||||
hr := make([]string, len(srcHr))
|
||||
for i, r := range srcHr {
|
||||
hr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcVotes := st.Eth1DataVotes()
|
||||
votes := make([]*Eth1Data, len(srcVotes))
|
||||
for i, e := range srcVotes {
|
||||
votes[i] = Eth1DataFromConsensus(e)
|
||||
}
|
||||
srcVals := st.Validators()
|
||||
vals := make([]*Validator, len(srcVals))
|
||||
for i, v := range srcVals {
|
||||
vals[i] = ValidatorFromConsensus(v)
|
||||
}
|
||||
srcBals := st.Balances()
|
||||
bals := make([]string, len(srcBals))
|
||||
for i, b := range srcBals {
|
||||
bals[i] = fmt.Sprintf("%d", b)
|
||||
}
|
||||
srcRm := st.RandaoMixes()
|
||||
rm := make([]string, len(srcRm))
|
||||
for i, m := range srcRm {
|
||||
rm[i] = hexutil.Encode(m)
|
||||
}
|
||||
srcSlashings := st.Slashings()
|
||||
slashings := make([]string, len(srcSlashings))
|
||||
for i, s := range srcSlashings {
|
||||
slashings[i] = fmt.Sprintf("%d", s)
|
||||
}
|
||||
srcPrevPart, err := st.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prevPart := make([]string, len(srcPrevPart))
|
||||
for i, p := range srcPrevPart {
|
||||
prevPart[i] = fmt.Sprintf("%d", p)
|
||||
}
|
||||
srcCurrPart, err := st.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currPart := make([]string, len(srcCurrPart))
|
||||
for i, p := range srcCurrPart {
|
||||
currPart[i] = fmt.Sprintf("%d", p)
|
||||
}
|
||||
srcIs, err := st.InactivityScores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
is := make([]string, len(srcIs))
|
||||
for i, s := range srcIs {
|
||||
is[i] = fmt.Sprintf("%d", s)
|
||||
}
|
||||
currSc, err := st.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextSc, err := st.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcHs, err := st.HistoricalSummaries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hs := make([]*HistoricalSummary, len(srcHs))
|
||||
for i, s := range srcHs {
|
||||
hs[i] = HistoricalSummaryFromConsensus(s)
|
||||
}
|
||||
nwi, err := st.NextWithdrawalIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nwvi, err := st.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
drsi, err := st.DepositRequestsStartIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbtc, err := st.DepositBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ebtc, err := st.ExitBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eee, err := st.EarliestExitEpoch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cbtc, err := st.ConsolidationBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ece, err := st.EarliestConsolidationEpoch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pbd, err := st.PendingDeposits()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ppw, err := st.PendingPartialWithdrawals()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pc, err := st.PendingConsolidations()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcLookahead, err := st.ProposerLookahead()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lookahead := make([]string, len(srcLookahead))
|
||||
for i, v := range srcLookahead {
|
||||
lookahead[i] = fmt.Sprintf("%d", uint64(v))
|
||||
}
|
||||
// Gloas-specific fields
|
||||
lepb, err := st.LatestExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
builders, err := st.Builders()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nwbi, err := st.NextWithdrawalBuilderIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
epa, err := st.ExecutionPayloadAvailability()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bpp, err := st.BuilderPendingPayments()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bpw, err := st.BuilderPendingWithdrawals()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lbh, err := st.LatestBlockHash()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pew, err := st.PayloadExpectedWithdrawals()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &BeaconStateGloas{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
LatestBlockHeader: BeaconBlockHeaderFromConsensus(st.LatestBlockHeader()),
|
||||
BlockRoots: br,
|
||||
StateRoots: sr,
|
||||
HistoricalRoots: hr,
|
||||
Eth1Data: Eth1DataFromConsensus(st.Eth1Data()),
|
||||
Eth1DataVotes: votes,
|
||||
Eth1DepositIndex: fmt.Sprintf("%d", st.Eth1DepositIndex()),
|
||||
Validators: vals,
|
||||
Balances: bals,
|
||||
RandaoMixes: rm,
|
||||
Slashings: slashings,
|
||||
PreviousEpochParticipation: prevPart,
|
||||
CurrentEpochParticipation: currPart,
|
||||
JustificationBits: hexutil.Encode(st.JustificationBits()),
|
||||
PreviousJustifiedCheckpoint: CheckpointFromConsensus(st.PreviousJustifiedCheckpoint()),
|
||||
CurrentJustifiedCheckpoint: CheckpointFromConsensus(st.CurrentJustifiedCheckpoint()),
|
||||
FinalizedCheckpoint: CheckpointFromConsensus(st.FinalizedCheckpoint()),
|
||||
InactivityScores: is,
|
||||
CurrentSyncCommittee: SyncCommitteeFromConsensus(currSc),
|
||||
NextSyncCommittee: SyncCommitteeFromConsensus(nextSc),
|
||||
NextWithdrawalIndex: fmt.Sprintf("%d", nwi),
|
||||
NextWithdrawalValidatorIndex: fmt.Sprintf("%d", nwvi),
|
||||
HistoricalSummaries: hs,
|
||||
DepositRequestsStartIndex: fmt.Sprintf("%d", drsi),
|
||||
DepositBalanceToConsume: fmt.Sprintf("%d", dbtc),
|
||||
ExitBalanceToConsume: fmt.Sprintf("%d", ebtc),
|
||||
EarliestExitEpoch: fmt.Sprintf("%d", eee),
|
||||
ConsolidationBalanceToConsume: fmt.Sprintf("%d", cbtc),
|
||||
EarliestConsolidationEpoch: fmt.Sprintf("%d", ece),
|
||||
PendingDeposits: PendingDepositsFromConsensus(pbd),
|
||||
PendingPartialWithdrawals: PendingPartialWithdrawalsFromConsensus(ppw),
|
||||
PendingConsolidations: PendingConsolidationsFromConsensus(pc),
|
||||
ProposerLookahead: lookahead,
|
||||
LatestExecutionPayloadBid: ROExecutionPayloadBidFromConsensus(lepb),
|
||||
Builders: BuildersFromConsensus(builders),
|
||||
NextWithdrawalBuilderIndex: fmt.Sprintf("%d", nwbi),
|
||||
ExecutionPayloadAvailability: hexutil.Encode(epa),
|
||||
BuilderPendingPayments: BuilderPendingPaymentsFromConsensus(bpp),
|
||||
BuilderPendingWithdrawals: BuilderPendingWithdrawalsFromConsensus(bpw),
|
||||
LatestBlockHash: hexutil.Encode(lbh[:]),
|
||||
PayloadExpectedWithdrawals: WithdrawalsFromConsensus(pew),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -1,15 +1,11 @@
|
||||
package structs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
@@ -350,176 +346,6 @@ func TestAttesterSlashing_FromConsensus(t *testing.T) {
|
||||
assert.DeepEqual(t, expectedResult, result)
|
||||
}
|
||||
|
||||
func TestROExecutionPayloadBidFromConsensus(t *testing.T) {
|
||||
bid := ð.ExecutionPayloadBid{
|
||||
ParentBlockHash: bytes.Repeat([]byte{0x01}, 32),
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x02}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x03}, 32),
|
||||
PrevRandao: bytes.Repeat([]byte{0x04}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x05}, 20),
|
||||
GasLimit: 100,
|
||||
BuilderIndex: 7,
|
||||
Slot: 9,
|
||||
Value: 11,
|
||||
ExecutionPayment: 22,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x06}, 32),
|
||||
}
|
||||
roBid, err := blocks.WrappedROExecutionPayloadBid(bid)
|
||||
require.NoError(t, err)
|
||||
|
||||
got := ROExecutionPayloadBidFromConsensus(roBid)
|
||||
want := &ExecutionPayloadBid{
|
||||
ParentBlockHash: hexutil.Encode(bid.ParentBlockHash),
|
||||
ParentBlockRoot: hexutil.Encode(bid.ParentBlockRoot),
|
||||
BlockHash: hexutil.Encode(bid.BlockHash),
|
||||
PrevRandao: hexutil.Encode(bid.PrevRandao),
|
||||
FeeRecipient: hexutil.Encode(bid.FeeRecipient),
|
||||
GasLimit: "100",
|
||||
BuilderIndex: "7",
|
||||
Slot: "9",
|
||||
Value: "11",
|
||||
ExecutionPayment: "22",
|
||||
BlobKzgCommitmentsRoot: hexutil.Encode(bid.BlobKzgCommitmentsRoot),
|
||||
}
|
||||
assert.DeepEqual(t, want, got)
|
||||
}
|
||||
|
||||
func TestBuilderConversionsFromConsensus(t *testing.T) {
|
||||
builder := ð.Builder{
|
||||
Pubkey: bytes.Repeat([]byte{0xAA}, 48),
|
||||
Version: bytes.Repeat([]byte{0x01}, 4),
|
||||
ExecutionAddress: bytes.Repeat([]byte{0xBB}, 20),
|
||||
Balance: 42,
|
||||
DepositEpoch: 3,
|
||||
WithdrawableEpoch: 4,
|
||||
}
|
||||
wantBuilder := &Builder{
|
||||
Pubkey: hexutil.Encode(builder.Pubkey),
|
||||
Version: hexutil.Encode(builder.Version),
|
||||
ExecutionAddress: hexutil.Encode(builder.ExecutionAddress),
|
||||
Balance: "42",
|
||||
DepositEpoch: "3",
|
||||
WithdrawableEpoch: "4",
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, wantBuilder, BuilderFromConsensus(builder))
|
||||
assert.DeepEqual(t, []*Builder{wantBuilder}, BuildersFromConsensus([]*eth.Builder{builder}))
|
||||
}
|
||||
|
||||
func TestBuilderPendingPaymentConversionsFromConsensus(t *testing.T) {
|
||||
withdrawal := ð.BuilderPendingWithdrawal{
|
||||
FeeRecipient: bytes.Repeat([]byte{0x10}, 20),
|
||||
Amount: 15,
|
||||
BuilderIndex: 2,
|
||||
}
|
||||
payment := ð.BuilderPendingPayment{
|
||||
Weight: 5,
|
||||
Withdrawal: withdrawal,
|
||||
}
|
||||
wantWithdrawal := &BuilderPendingWithdrawal{
|
||||
FeeRecipient: hexutil.Encode(withdrawal.FeeRecipient),
|
||||
Amount: "15",
|
||||
BuilderIndex: "2",
|
||||
}
|
||||
wantPayment := &BuilderPendingPayment{
|
||||
Weight: "5",
|
||||
Withdrawal: wantWithdrawal,
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, wantPayment, BuilderPendingPaymentFromConsensus(payment))
|
||||
assert.DeepEqual(t, []*BuilderPendingPayment{wantPayment}, BuilderPendingPaymentsFromConsensus([]*eth.BuilderPendingPayment{payment}))
|
||||
assert.DeepEqual(t, wantWithdrawal, BuilderPendingWithdrawalFromConsensus(withdrawal))
|
||||
assert.DeepEqual(t, []*BuilderPendingWithdrawal{wantWithdrawal}, BuilderPendingWithdrawalsFromConsensus([]*eth.BuilderPendingWithdrawal{withdrawal}))
|
||||
}
|
||||
|
||||
func TestBeaconStateGloasFromConsensus(t *testing.T) {
|
||||
st, err := util.NewBeaconStateGloas(func(state *eth.BeaconStateGloas) error {
|
||||
state.GenesisTime = 123
|
||||
state.GenesisValidatorsRoot = bytes.Repeat([]byte{0x10}, 32)
|
||||
state.Slot = 5
|
||||
state.ProposerLookahead = []uint64{1, 2}
|
||||
state.LatestExecutionPayloadBid = ð.ExecutionPayloadBid{
|
||||
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32),
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x12}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x13}, 32),
|
||||
PrevRandao: bytes.Repeat([]byte{0x14}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x15}, 20),
|
||||
GasLimit: 64,
|
||||
BuilderIndex: 3,
|
||||
Slot: 5,
|
||||
Value: 99,
|
||||
ExecutionPayment: 7,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x16}, 32),
|
||||
}
|
||||
state.Builders = []*eth.Builder{
|
||||
{
|
||||
Pubkey: bytes.Repeat([]byte{0x20}, 48),
|
||||
Version: bytes.Repeat([]byte{0x21}, 4),
|
||||
ExecutionAddress: bytes.Repeat([]byte{0x22}, 20),
|
||||
Balance: 88,
|
||||
DepositEpoch: 1,
|
||||
WithdrawableEpoch: 2,
|
||||
},
|
||||
}
|
||||
state.NextWithdrawalBuilderIndex = 9
|
||||
state.ExecutionPayloadAvailability = []byte{0x01, 0x02}
|
||||
state.BuilderPendingPayments = []*eth.BuilderPendingPayment{
|
||||
{
|
||||
Weight: 3,
|
||||
Withdrawal: ð.BuilderPendingWithdrawal{
|
||||
FeeRecipient: bytes.Repeat([]byte{0x23}, 20),
|
||||
Amount: 4,
|
||||
BuilderIndex: 5,
|
||||
},
|
||||
},
|
||||
}
|
||||
state.BuilderPendingWithdrawals = []*eth.BuilderPendingWithdrawal{
|
||||
{
|
||||
FeeRecipient: bytes.Repeat([]byte{0x24}, 20),
|
||||
Amount: 6,
|
||||
BuilderIndex: 7,
|
||||
},
|
||||
}
|
||||
state.LatestBlockHash = bytes.Repeat([]byte{0x25}, 32)
|
||||
state.PayloadExpectedWithdrawals = []*enginev1.Withdrawal{
|
||||
{Index: 1, ValidatorIndex: 2, Address: bytes.Repeat([]byte{0x26}, 20), Amount: 10},
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := BeaconStateGloasFromConsensus(st)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, "123", got.GenesisTime)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x10}, 32)), got.GenesisValidatorsRoot)
|
||||
require.Equal(t, "5", got.Slot)
|
||||
require.DeepEqual(t, []string{"1", "2"}, got.ProposerLookahead)
|
||||
require.Equal(t, "9", got.NextWithdrawalBuilderIndex)
|
||||
require.Equal(t, hexutil.Encode([]byte{0x01, 0x02}), got.ExecutionPayloadAvailability)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x25}, 32)), got.LatestBlockHash)
|
||||
|
||||
require.NotNil(t, got.LatestExecutionPayloadBid)
|
||||
require.Equal(t, "64", got.LatestExecutionPayloadBid.GasLimit)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x11}, 32)), got.LatestExecutionPayloadBid.ParentBlockHash)
|
||||
|
||||
require.NotNil(t, got.Builders)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x20}, 48)), got.Builders[0].Pubkey)
|
||||
require.Equal(t, "88", got.Builders[0].Balance)
|
||||
|
||||
require.Equal(t, "3", got.BuilderPendingPayments[0].Weight)
|
||||
require.Equal(t, "4", got.BuilderPendingPayments[0].Withdrawal.Amount)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x23}, 20)), got.BuilderPendingPayments[0].Withdrawal.FeeRecipient)
|
||||
|
||||
require.Equal(t, "6", got.BuilderPendingWithdrawals[0].Amount)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x24}, 20)), got.BuilderPendingWithdrawals[0].FeeRecipient)
|
||||
|
||||
require.Equal(t, "1", got.PayloadExpectedWithdrawals[0].WithdrawalIndex)
|
||||
require.Equal(t, "2", got.PayloadExpectedWithdrawals[0].ValidatorIndex)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x26}, 20)), got.PayloadExpectedWithdrawals[0].ExecutionAddress)
|
||||
require.Equal(t, "10", got.PayloadExpectedWithdrawals[0].Amount)
|
||||
}
|
||||
|
||||
func TestIndexedAttestation_ToConsensus(t *testing.T) {
|
||||
a := &IndexedAttestation{
|
||||
AttestingIndices: []string{"1"},
|
||||
|
||||
@@ -262,23 +262,3 @@ type PendingConsolidation struct {
|
||||
SourceIndex string `json:"source_index"`
|
||||
TargetIndex string `json:"target_index"`
|
||||
}
|
||||
|
||||
type Builder struct {
|
||||
Pubkey string `json:"pubkey"`
|
||||
Version string `json:"version"`
|
||||
ExecutionAddress string `json:"execution_address"`
|
||||
Balance string `json:"balance"`
|
||||
DepositEpoch string `json:"deposit_epoch"`
|
||||
WithdrawableEpoch string `json:"withdrawable_epoch"`
|
||||
}
|
||||
|
||||
type BuilderPendingPayment struct {
|
||||
Weight string `json:"weight"`
|
||||
Withdrawal *BuilderPendingWithdrawal `json:"withdrawal"`
|
||||
}
|
||||
|
||||
type BuilderPendingWithdrawal struct {
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
Amount string `json:"amount"`
|
||||
BuilderIndex string `json:"builder_index"`
|
||||
}
|
||||
|
||||
@@ -221,51 +221,3 @@ type BeaconStateFulu struct {
|
||||
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
|
||||
ProposerLookahead []string `json:"proposer_lookahead"`
|
||||
}
|
||||
|
||||
type BeaconStateGloas struct {
|
||||
GenesisTime string `json:"genesis_time"`
|
||||
GenesisValidatorsRoot string `json:"genesis_validators_root"`
|
||||
Slot string `json:"slot"`
|
||||
Fork *Fork `json:"fork"`
|
||||
LatestBlockHeader *BeaconBlockHeader `json:"latest_block_header"`
|
||||
BlockRoots []string `json:"block_roots"`
|
||||
StateRoots []string `json:"state_roots"`
|
||||
HistoricalRoots []string `json:"historical_roots"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Eth1DataVotes []*Eth1Data `json:"eth1_data_votes"`
|
||||
Eth1DepositIndex string `json:"eth1_deposit_index"`
|
||||
Validators []*Validator `json:"validators"`
|
||||
Balances []string `json:"balances"`
|
||||
RandaoMixes []string `json:"randao_mixes"`
|
||||
Slashings []string `json:"slashings"`
|
||||
PreviousEpochParticipation []string `json:"previous_epoch_participation"`
|
||||
CurrentEpochParticipation []string `json:"current_epoch_participation"`
|
||||
JustificationBits string `json:"justification_bits"`
|
||||
PreviousJustifiedCheckpoint *Checkpoint `json:"previous_justified_checkpoint"`
|
||||
CurrentJustifiedCheckpoint *Checkpoint `json:"current_justified_checkpoint"`
|
||||
FinalizedCheckpoint *Checkpoint `json:"finalized_checkpoint"`
|
||||
InactivityScores []string `json:"inactivity_scores"`
|
||||
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
|
||||
NextSyncCommittee *SyncCommittee `json:"next_sync_committee"`
|
||||
NextWithdrawalIndex string `json:"next_withdrawal_index"`
|
||||
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
|
||||
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
|
||||
DepositRequestsStartIndex string `json:"deposit_requests_start_index"`
|
||||
DepositBalanceToConsume string `json:"deposit_balance_to_consume"`
|
||||
ExitBalanceToConsume string `json:"exit_balance_to_consume"`
|
||||
EarliestExitEpoch string `json:"earliest_exit_epoch"`
|
||||
ConsolidationBalanceToConsume string `json:"consolidation_balance_to_consume"`
|
||||
EarliestConsolidationEpoch string `json:"earliest_consolidation_epoch"`
|
||||
PendingDeposits []*PendingDeposit `json:"pending_deposits"`
|
||||
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
|
||||
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
|
||||
ProposerLookahead []string `json:"proposer_lookahead"`
|
||||
LatestExecutionPayloadBid *ExecutionPayloadBid `json:"latest_execution_payload_bid"`
|
||||
Builders []*Builder `json:"builders"`
|
||||
NextWithdrawalBuilderIndex string `json:"next_withdrawal_builder_index"`
|
||||
ExecutionPayloadAvailability string `json:"execution_payload_availability"`
|
||||
BuilderPendingPayments []*BuilderPendingPayment `json:"builder_pending_payments"`
|
||||
BuilderPendingWithdrawals []*BuilderPendingWithdrawal `json:"builder_pending_withdrawals"`
|
||||
LatestBlockHash string `json:"latest_block_hash"`
|
||||
PayloadExpectedWithdrawals []*Withdrawal `json:"payload_expected_withdrawals"`
|
||||
}
|
||||
|
||||
@@ -4,9 +4,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"bid.go",
|
||||
"deposit_request.go",
|
||||
"log.go",
|
||||
"payload.go",
|
||||
"payload_attestation.go",
|
||||
"pending_payment.go",
|
||||
"proposer_slashing.go",
|
||||
@@ -15,7 +12,6 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/requests:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
@@ -29,13 +25,9 @@ go_library(
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -43,9 +35,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"bid_test.go",
|
||||
"deposit_request_test.go",
|
||||
"payload_attestation_test.go",
|
||||
"payload_test.go",
|
||||
"pending_payment_test.go",
|
||||
"proposer_slashing_test.go",
|
||||
],
|
||||
@@ -55,7 +45,6 @@ go_test(
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/testing:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
@@ -63,7 +52,6 @@ go_test(
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/validator-client:go_default_library",
|
||||
|
||||
@@ -1,160 +0,0 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func processDepositRequests(ctx context.Context, beaconState state.BeaconState, requests []*enginev1.DepositRequest) error {
|
||||
if len(requests) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, receipt := range requests {
|
||||
if err := processDepositRequest(beaconState, receipt); err != nil {
|
||||
return errors.Wrap(err, "could not apply deposit request")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// processDepositRequest processes the specific deposit request
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
|
||||
//
|
||||
// # [New in Gloas:EIP7732]
|
||||
// builder_pubkeys = [b.pubkey for b in state.builders]
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
//
|
||||
// # [New in Gloas:EIP7732]
|
||||
// # Regardless of the withdrawal credentials prefix, if a builder/validator
|
||||
// # already exists with this pubkey, apply the deposit to their balance
|
||||
// is_builder = deposit_request.pubkey in builder_pubkeys
|
||||
// is_validator = deposit_request.pubkey in validator_pubkeys
|
||||
// is_builder_prefix = is_builder_withdrawal_credential(deposit_request.withdrawal_credentials)
|
||||
// if is_builder or (is_builder_prefix and not is_validator):
|
||||
//
|
||||
// # Apply builder deposits immediately
|
||||
// apply_deposit_for_builder(
|
||||
// state,
|
||||
// deposit_request.pubkey,
|
||||
// deposit_request.withdrawal_credentials,
|
||||
// deposit_request.amount,
|
||||
// deposit_request.signature,
|
||||
// )
|
||||
// return
|
||||
//
|
||||
// # Add validator deposits to the queue
|
||||
// state.pending_deposits.append(
|
||||
// PendingDeposit(
|
||||
// pubkey=deposit_request.pubkey,
|
||||
// withdrawal_credentials=deposit_request.withdrawal_credentials,
|
||||
// amount=deposit_request.amount,
|
||||
// signature=deposit_request.signature,
|
||||
// slot=state.slot,
|
||||
// )
|
||||
// )
|
||||
func processDepositRequest(beaconState state.BeaconState, request *enginev1.DepositRequest) error {
|
||||
if request == nil {
|
||||
return errors.New("nil deposit request")
|
||||
}
|
||||
|
||||
if beaconState.Version() >= version.Gloas {
|
||||
pubkey := bytesutil.ToBytes48(request.Pubkey)
|
||||
_, isValidator := beaconState.ValidatorIndexByPubkey(pubkey)
|
||||
_, isBuilder := beaconState.BuilderIndexByPubkey(pubkey)
|
||||
isBuilderPrefix := IsBuilderWithdrawalCredential(request.WithdrawalCredentials)
|
||||
if isBuilder || (isBuilderPrefix && !isValidator) {
|
||||
if err := ApplyDepositForBuilder(
|
||||
beaconState,
|
||||
request.Pubkey,
|
||||
request.WithdrawalCredentials,
|
||||
request.Amount,
|
||||
request.Signature,
|
||||
); err != nil {
|
||||
return errors.Wrap(err, "could not apply builder deposit")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if err := beaconState.AppendPendingDeposit(ðpb.PendingDeposit{
|
||||
PublicKey: request.Pubkey,
|
||||
WithdrawalCredentials: request.WithdrawalCredentials,
|
||||
Amount: request.Amount,
|
||||
Signature: request.Signature,
|
||||
Slot: beaconState.Slot(),
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "could not append deposit request")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ApplyDepositForBuilder processes an execution-layer deposit for a builder.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// def apply_deposit_for_builder(
|
||||
//
|
||||
// state: BeaconState,
|
||||
// pubkey: BLSPubkey,
|
||||
// withdrawal_credentials: Bytes32,
|
||||
// amount: uint64,
|
||||
// signature: BLSSignature,
|
||||
//
|
||||
// ) -> None:
|
||||
//
|
||||
// builder_pubkeys = [b.pubkey for b in state.builders]
|
||||
// if pubkey not in builder_pubkeys:
|
||||
// # Verify the deposit signature (proof of possession) which is not checked by the deposit contract
|
||||
// if is_valid_deposit_signature(pubkey, withdrawal_credentials, amount, signature):
|
||||
// add_builder_to_registry(state, pubkey, withdrawal_credentials, amount)
|
||||
// else:
|
||||
// # Increase balance by deposit amount
|
||||
// builder_index = builder_pubkeys.index(pubkey)
|
||||
// state.builders[builder_index].balance += amount
|
||||
func ApplyDepositForBuilder(
|
||||
beaconState state.BeaconState,
|
||||
pubkey []byte,
|
||||
withdrawalCredentials []byte,
|
||||
amount uint64,
|
||||
signature []byte,
|
||||
) error {
|
||||
pubkeyBytes := bytesutil.ToBytes48(pubkey)
|
||||
if idx, exists := beaconState.BuilderIndexByPubkey(pubkeyBytes); exists {
|
||||
return beaconState.IncreaseBuilderBalance(idx, amount)
|
||||
}
|
||||
|
||||
valid, err := helpers.IsValidDepositSignature(ðpb.Deposit_Data{
|
||||
PublicKey: pubkey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
Amount: amount,
|
||||
Signature: signature,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not verify deposit signature")
|
||||
}
|
||||
if !valid {
|
||||
log.WithFields(logrus.Fields{
|
||||
"pubkey": fmt.Sprintf("%x", pubkey),
|
||||
}).Warn("ignoring builder deposit: invalid signature")
|
||||
return nil
|
||||
}
|
||||
|
||||
withdrawalCredBytes := bytesutil.ToBytes32(withdrawalCredentials)
|
||||
return beaconState.AddBuilderFromDeposit(pubkeyBytes, withdrawalCredBytes, amount)
|
||||
}
|
||||
|
||||
func IsBuilderWithdrawalCredential(withdrawalCredentials []byte) bool {
|
||||
return len(withdrawalCredentials) == fieldparams.RootLength &&
|
||||
withdrawalCredentials[0] == params.BeaconConfig().BuilderWithdrawalPrefixByte
|
||||
}
|
||||
@@ -1,150 +0,0 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
stateTesting "github.com/OffchainLabs/prysm/v7/beacon-chain/state/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestProcessDepositRequests_EmptyAndNil(t *testing.T) {
|
||||
st := newGloasState(t, nil, nil)
|
||||
|
||||
t.Run("empty requests continues", func(t *testing.T) {
|
||||
err := processDepositRequests(t.Context(), st, []*enginev1.DepositRequest{})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("nil request errors", func(t *testing.T) {
|
||||
err := processDepositRequests(t.Context(), st, []*enginev1.DepositRequest{nil})
|
||||
require.ErrorContains(t, "nil deposit request", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProcessDepositRequest_BuilderDepositAddsBuilder(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
cred := builderWithdrawalCredentials()
|
||||
pd := stateTesting.GeneratePendingDeposit(t, sk, 1234, cred, 0)
|
||||
req := depositRequestFromPending(pd, 1)
|
||||
|
||||
st := newGloasState(t, nil, nil)
|
||||
err = processDepositRequest(st, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
idx, ok := st.BuilderIndexByPubkey(toBytes48(req.Pubkey))
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
builder, err := st.Builder(idx)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, builder)
|
||||
require.DeepEqual(t, req.Pubkey, builder.Pubkey)
|
||||
require.DeepEqual(t, []byte{cred[0]}, builder.Version)
|
||||
require.DeepEqual(t, cred[12:], builder.ExecutionAddress)
|
||||
require.Equal(t, uint64(1234), uint64(builder.Balance))
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, builder.WithdrawableEpoch)
|
||||
|
||||
pending, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(pending))
|
||||
}
|
||||
|
||||
func TestProcessDepositRequest_ExistingBuilderIncreasesBalance(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
pubkey := sk.PublicKey().Marshal()
|
||||
builders := []*ethpb.Builder{
|
||||
{
|
||||
Pubkey: pubkey,
|
||||
Version: []byte{0},
|
||||
ExecutionAddress: bytes.Repeat([]byte{0x11}, 20),
|
||||
Balance: 5,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
},
|
||||
}
|
||||
st := newGloasState(t, nil, builders)
|
||||
|
||||
cred := validatorWithdrawalCredentials()
|
||||
pd := stateTesting.GeneratePendingDeposit(t, sk, 200, cred, 0)
|
||||
req := depositRequestFromPending(pd, 9)
|
||||
|
||||
err = processDepositRequest(st, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
idx, ok := st.BuilderIndexByPubkey(toBytes48(pubkey))
|
||||
require.Equal(t, true, ok)
|
||||
builder, err := st.Builder(idx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(205), uint64(builder.Balance))
|
||||
|
||||
pending, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(pending))
|
||||
}
|
||||
|
||||
func TestApplyDepositForBuilder_InvalidSignatureIgnoresDeposit(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
cred := builderWithdrawalCredentials()
|
||||
st := newGloasState(t, nil, nil)
|
||||
err = ApplyDepositForBuilder(st, sk.PublicKey().Marshal(), cred[:], 100, make([]byte, 96))
|
||||
require.NoError(t, err)
|
||||
|
||||
_, ok := st.BuilderIndexByPubkey(toBytes48(sk.PublicKey().Marshal()))
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
func newGloasState(t *testing.T, validators []*ethpb.Validator, builders []*ethpb.Builder) state.BeaconState {
|
||||
t.Helper()
|
||||
|
||||
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
DepositRequestsStartIndex: params.BeaconConfig().UnsetDepositRequestsStartIndex,
|
||||
Validators: validators,
|
||||
Balances: make([]uint64, len(validators)),
|
||||
PendingDeposits: []*ethpb.PendingDeposit{},
|
||||
Builders: builders,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
return st
|
||||
}
|
||||
|
||||
func depositRequestFromPending(pd *ethpb.PendingDeposit, index uint64) *enginev1.DepositRequest {
|
||||
return &enginev1.DepositRequest{
|
||||
Pubkey: pd.PublicKey,
|
||||
WithdrawalCredentials: pd.WithdrawalCredentials,
|
||||
Amount: pd.Amount,
|
||||
Signature: pd.Signature,
|
||||
Index: index,
|
||||
}
|
||||
}
|
||||
|
||||
func builderWithdrawalCredentials() [32]byte {
|
||||
var cred [32]byte
|
||||
cred[0] = params.BeaconConfig().BuilderWithdrawalPrefixByte
|
||||
copy(cred[12:], bytes.Repeat([]byte{0x22}, 20))
|
||||
return cred
|
||||
}
|
||||
|
||||
func validatorWithdrawalCredentials() [32]byte {
|
||||
var cred [32]byte
|
||||
cred[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
copy(cred[12:], bytes.Repeat([]byte{0x33}, 20))
|
||||
return cred
|
||||
}
|
||||
|
||||
func toBytes48(b []byte) [48]byte {
|
||||
var out [48]byte
|
||||
copy(out[:], b)
|
||||
return out
|
||||
}
|
||||
@@ -1,330 +0,0 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
requests "github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/ssz"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ProcessExecutionPayload processes the signed execution payload envelope for the Gloas fork.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// def process_execution_payload(
|
||||
//
|
||||
// state: BeaconState,
|
||||
// signed_envelope: SignedExecutionPayloadEnvelope,
|
||||
// execution_engine: ExecutionEngine,
|
||||
// verify: bool = True,
|
||||
//
|
||||
// ) -> None:
|
||||
//
|
||||
// envelope = signed_envelope.message
|
||||
// payload = envelope.payload
|
||||
//
|
||||
// if verify:
|
||||
// assert verify_execution_payload_envelope_signature(state, signed_envelope)
|
||||
//
|
||||
// previous_state_root = hash_tree_root(state)
|
||||
// if state.latest_block_header.state_root == Root():
|
||||
// state.latest_block_header.state_root = previous_state_root
|
||||
//
|
||||
// assert envelope.beacon_block_root == hash_tree_root(state.latest_block_header)
|
||||
// assert envelope.slot == state.slot
|
||||
//
|
||||
// committed_bid = state.latest_execution_payload_bid
|
||||
// assert envelope.builder_index == committed_bid.builder_index
|
||||
// assert committed_bid.blob_kzg_commitments_root == hash_tree_root(envelope.blob_kzg_commitments)
|
||||
// assert committed_bid.prev_randao == payload.prev_randao
|
||||
//
|
||||
// assert hash_tree_root(payload.withdrawals) == hash_tree_root(state.payload_expected_withdrawals)
|
||||
//
|
||||
// assert committed_bid.gas_limit == payload.gas_limit
|
||||
// assert committed_bid.block_hash == payload.block_hash
|
||||
// assert payload.parent_hash == state.latest_block_hash
|
||||
// assert payload.timestamp == compute_time_at_slot(state, state.slot)
|
||||
// assert (
|
||||
// len(envelope.blob_kzg_commitments)
|
||||
// <= get_blob_parameters(get_current_epoch(state)).max_blobs_per_block
|
||||
// )
|
||||
// versioned_hashes = [
|
||||
// kzg_commitment_to_versioned_hash(commitment) for commitment in envelope.blob_kzg_commitments
|
||||
// ]
|
||||
// requests = envelope.execution_requests
|
||||
// assert execution_engine.verify_and_notify_new_payload(
|
||||
// NewPayloadRequest(
|
||||
// execution_payload=payload,
|
||||
// versioned_hashes=versioned_hashes,
|
||||
// parent_beacon_block_root=state.latest_block_header.parent_root,
|
||||
// execution_requests=requests,
|
||||
// )
|
||||
// )
|
||||
//
|
||||
// for op in requests.deposits: process_deposit_request(state, op)
|
||||
// for op in requests.withdrawals: process_withdrawal_request(state, op)
|
||||
// for op in requests.consolidations: process_consolidation_request(state, op)
|
||||
//
|
||||
// payment = state.builder_pending_payments[SLOTS_PER_EPOCH + state.slot % SLOTS_PER_EPOCH]
|
||||
// amount = payment.withdrawal.amount
|
||||
// if amount > 0:
|
||||
// state.builder_pending_withdrawals.append(payment.withdrawal)
|
||||
// state.builder_pending_payments[SLOTS_PER_EPOCH + state.slot % SLOTS_PER_EPOCH] = (
|
||||
// BuilderPendingPayment()
|
||||
// )
|
||||
//
|
||||
// state.execution_payload_availability[state.slot % SLOTS_PER_HISTORICAL_ROOT] = 0b1
|
||||
// state.latest_block_hash = payload.block_hash
|
||||
//
|
||||
// if verify:
|
||||
// assert envelope.state_root == hash_tree_root(state)
|
||||
func ProcessExecutionPayload(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope,
|
||||
) error {
|
||||
if err := verifyExecutionPayloadEnvelopeSignature(st, signedEnvelope); err != nil {
|
||||
return errors.Wrap(err, "signature verification failed")
|
||||
}
|
||||
|
||||
latestHeader := st.LatestBlockHeader()
|
||||
if len(latestHeader.StateRoot) == 0 || bytes.Equal(latestHeader.StateRoot, make([]byte, 32)) {
|
||||
previousStateRoot, err := st.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute state root")
|
||||
}
|
||||
latestHeader.StateRoot = previousStateRoot[:]
|
||||
if err := st.SetLatestBlockHeader(latestHeader); err != nil {
|
||||
return errors.Wrap(err, "could not set latest block header")
|
||||
}
|
||||
}
|
||||
|
||||
blockHeaderRoot, err := latestHeader.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute block header root")
|
||||
}
|
||||
envelope, err := signedEnvelope.Envelope()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get envelope from signed envelope")
|
||||
}
|
||||
beaconBlockRoot := envelope.BeaconBlockRoot()
|
||||
if !bytes.Equal(beaconBlockRoot[:], blockHeaderRoot[:]) {
|
||||
return errors.Errorf("envelope beacon block root does not match state latest block header root: envelope=%#x, header=%#x", beaconBlockRoot, blockHeaderRoot)
|
||||
}
|
||||
|
||||
if envelope.Slot() != st.Slot() {
|
||||
return errors.Errorf("envelope slot does not match state slot: envelope=%d, state=%d", envelope.Slot(), st.Slot())
|
||||
}
|
||||
|
||||
latestBid, err := st.LatestExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get latest execution payload bid")
|
||||
}
|
||||
if latestBid == nil {
|
||||
return errors.New("latest execution payload bid is nil")
|
||||
}
|
||||
if envelope.BuilderIndex() != latestBid.BuilderIndex() {
|
||||
return errors.Errorf("envelope builder index does not match committed bid builder index: envelope=%d, bid=%d", envelope.BuilderIndex(), latestBid.BuilderIndex())
|
||||
}
|
||||
|
||||
envelopeBlobCommitments := envelope.BlobKzgCommitments()
|
||||
envelopeBlobRoot, err := ssz.KzgCommitmentsRoot(envelopeBlobCommitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute envelope blob KZG commitments root")
|
||||
}
|
||||
committedBlobRoot := latestBid.BlobKzgCommitmentsRoot()
|
||||
if !bytes.Equal(committedBlobRoot[:], envelopeBlobRoot[:]) {
|
||||
return errors.Errorf("committed bid blob KZG commitments root does not match envelope: bid=%#x, envelope=%#x", committedBlobRoot, envelopeBlobRoot)
|
||||
}
|
||||
|
||||
payload, err := envelope.Execution()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get execution payload from envelope")
|
||||
}
|
||||
withdrawals, err := payload.Withdrawals()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get withdrawals from payload")
|
||||
}
|
||||
|
||||
ok, err := st.WithdrawalsMatchPayloadExpected(withdrawals)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not validate payload withdrawals")
|
||||
}
|
||||
if !ok {
|
||||
return errors.New("payload withdrawals do not match expected withdrawals")
|
||||
}
|
||||
|
||||
if latestBid.GasLimit() != payload.GasLimit() {
|
||||
return errors.Errorf("committed bid gas limit does not match payload gas limit: bid=%d, payload=%d", latestBid.GasLimit(), payload.GasLimit())
|
||||
}
|
||||
|
||||
latestBidPrevRandao := latestBid.PrevRandao()
|
||||
if !bytes.Equal(payload.PrevRandao(), latestBidPrevRandao[:]) {
|
||||
return errors.Errorf("payload prev randao does not match committed bid prev randao: payload=%#x, bid=%#x", payload.PrevRandao(), latestBidPrevRandao)
|
||||
}
|
||||
|
||||
bidBlockHash := latestBid.BlockHash()
|
||||
payloadBlockHash := payload.BlockHash()
|
||||
if !bytes.Equal(bidBlockHash[:], payloadBlockHash) {
|
||||
return errors.Errorf("committed bid block hash does not match payload block hash: bid=%#x, payload=%#x", bidBlockHash, payloadBlockHash)
|
||||
}
|
||||
|
||||
latestBlockHash, err := st.LatestBlockHash()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get latest block hash")
|
||||
}
|
||||
if !bytes.Equal(payload.ParentHash(), latestBlockHash[:]) {
|
||||
return errors.Errorf("payload parent hash does not match state latest block hash: payload=%#x, state=%#x", payload.ParentHash(), latestBlockHash)
|
||||
}
|
||||
|
||||
t, err := slots.StartTime(st.GenesisTime(), st.Slot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute timestamp")
|
||||
}
|
||||
if payload.Timestamp() != uint64(t.Unix()) {
|
||||
return errors.Errorf("payload timestamp does not match expected timestamp: payload=%d, expected=%d", payload.Timestamp(), uint64(t.Unix()))
|
||||
}
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
maxBlobsPerBlock := cfg.MaxBlobsPerBlock(envelope.Slot())
|
||||
if len(envelopeBlobCommitments) > maxBlobsPerBlock {
|
||||
return errors.Errorf("too many blob KZG commitments: got=%d, max=%d", len(envelopeBlobCommitments), maxBlobsPerBlock)
|
||||
}
|
||||
|
||||
if err := processExecutionRequests(ctx, st, envelope.ExecutionRequests()); err != nil {
|
||||
return errors.Wrap(err, "could not process execution requests")
|
||||
}
|
||||
|
||||
if err := st.QueueBuilderPayment(); err != nil {
|
||||
return errors.Wrap(err, "could not queue builder payment")
|
||||
}
|
||||
|
||||
if err := st.SetExecutionPayloadAvailability(st.Slot(), true); err != nil {
|
||||
return errors.Wrap(err, "could not set execution payload availability")
|
||||
}
|
||||
|
||||
if err := st.SetLatestBlockHash([32]byte(payload.BlockHash())); err != nil {
|
||||
return errors.Wrap(err, "could not set latest block hash")
|
||||
}
|
||||
|
||||
r, err := st.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get hash tree root")
|
||||
}
|
||||
if r != envelope.StateRoot() {
|
||||
return fmt.Errorf("state root mismatch: expected %#x, got %#x", envelope.StateRoot(), r)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// processExecutionRequests processes deposits, withdrawals, and consolidations from execution requests.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// for op in requests.deposits: process_deposit_request(state, op)
|
||||
// for op in requests.withdrawals: process_withdrawal_request(state, op)
|
||||
// for op in requests.consolidations: process_consolidation_request(state, op)
|
||||
func processExecutionRequests(ctx context.Context, st state.BeaconState, rqs *enginev1.ExecutionRequests) error {
|
||||
if err := processDepositRequests(ctx, st, rqs.Deposits); err != nil {
|
||||
return errors.Wrap(err, "could not process deposit requests")
|
||||
}
|
||||
|
||||
var err error
|
||||
st, err = requests.ProcessWithdrawalRequests(ctx, st, rqs.Withdrawals)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not process withdrawal requests")
|
||||
}
|
||||
err = requests.ProcessConsolidationRequests(ctx, st, rqs.Consolidations)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not process consolidation requests")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyExecutionPayloadEnvelopeSignature verifies the BLS signature on a signed execution payload envelope.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// builder_index = signed_envelope.message.builder_index
|
||||
// if builder_index == BUILDER_INDEX_SELF_BUILD:
|
||||
//
|
||||
// validator_index = state.latest_block_header.proposer_index
|
||||
// pubkey = state.validators[validator_index].pubkey
|
||||
//
|
||||
// else:
|
||||
//
|
||||
// pubkey = state.builders[builder_index].pubkey
|
||||
//
|
||||
// signing_root = compute_signing_root(
|
||||
//
|
||||
// signed_envelope.message, get_domain(state, DOMAIN_BEACON_BUILDER)
|
||||
//
|
||||
// )
|
||||
// return bls.Verify(pubkey, signing_root, signed_envelope.signature)
|
||||
func verifyExecutionPayloadEnvelopeSignature(st state.BeaconState, signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope) error {
|
||||
envelope, err := signedEnvelope.Envelope()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get envelope: %w", err)
|
||||
}
|
||||
|
||||
builderIdx := envelope.BuilderIndex()
|
||||
var publicKey bls.PublicKey
|
||||
if builderIdx == params.BeaconConfig().BuilderIndexSelfBuild {
|
||||
header := st.LatestBlockHeader()
|
||||
if header == nil {
|
||||
return fmt.Errorf("latest block header is nil")
|
||||
}
|
||||
proposerPubkey := st.PubkeyAtIndex(header.ProposerIndex)
|
||||
key, err := bls.PublicKeyFromBytes(proposerPubkey[:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid proposer public key: %w", err)
|
||||
}
|
||||
publicKey = key
|
||||
} else {
|
||||
builder, err := st.Builder(builderIdx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get builder: %w", err)
|
||||
}
|
||||
if builder == nil {
|
||||
return fmt.Errorf("builder at index %d not found", builderIdx)
|
||||
}
|
||||
key, err := bls.PublicKeyFromBytes(builder.Pubkey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid builder public key: %w", err)
|
||||
}
|
||||
publicKey = key
|
||||
}
|
||||
|
||||
signatureBytes := signedEnvelope.Signature()
|
||||
signature, err := bls.SignatureFromBytes(signatureBytes[:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid signature format: %w", err)
|
||||
}
|
||||
|
||||
currentEpoch := slots.ToEpoch(envelope.Slot())
|
||||
domain, err := signing.Domain(
|
||||
st.Fork(),
|
||||
currentEpoch,
|
||||
params.BeaconConfig().DomainBeaconBuilder,
|
||||
st.GenesisValidatorsRoot(),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to compute signing domain: %w", err)
|
||||
}
|
||||
|
||||
signingRoot, err := signedEnvelope.SigningRoot(domain)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to compute signing root: %w", err)
|
||||
}
|
||||
|
||||
if !signature.Verify(publicKey, signingRoot[:]) {
|
||||
return fmt.Errorf("signature verification failed: %w", signing.ErrSigFailedToVerify)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,360 +0,0 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/ssz"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type payloadFixture struct {
|
||||
state state.BeaconState
|
||||
signed interfaces.ROSignedExecutionPayloadEnvelope
|
||||
signedProto *ethpb.SignedExecutionPayloadEnvelope
|
||||
envelope *ethpb.ExecutionPayloadEnvelope
|
||||
payload *enginev1.ExecutionPayloadDeneb
|
||||
slot primitives.Slot
|
||||
}
|
||||
|
||||
func buildPayloadFixture(t *testing.T, mutate func(payload *enginev1.ExecutionPayloadDeneb, bid *ethpb.ExecutionPayloadBid, envelope *ethpb.ExecutionPayloadEnvelope)) payloadFixture {
|
||||
t.Helper()
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
slot := primitives.Slot(5)
|
||||
builderIdx := primitives.BuilderIndex(0)
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pk := sk.PublicKey().Marshal()
|
||||
|
||||
randao := bytes.Repeat([]byte{0xAA}, 32)
|
||||
parentHash := bytes.Repeat([]byte{0xBB}, 32)
|
||||
blockHash := bytes.Repeat([]byte{0xCC}, 32)
|
||||
|
||||
withdrawals := []*enginev1.Withdrawal{
|
||||
{Index: 0, ValidatorIndex: 1, Address: bytes.Repeat([]byte{0x01}, 20), Amount: 0},
|
||||
}
|
||||
|
||||
blobCommitments := [][]byte{}
|
||||
blobRoot, err := ssz.KzgCommitmentsRoot(blobCommitments)
|
||||
require.NoError(t, err)
|
||||
|
||||
payload := &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: parentHash,
|
||||
FeeRecipient: bytes.Repeat([]byte{0x01}, 20),
|
||||
StateRoot: bytes.Repeat([]byte{0x02}, 32),
|
||||
ReceiptsRoot: bytes.Repeat([]byte{0x03}, 32),
|
||||
LogsBloom: bytes.Repeat([]byte{0x04}, 256),
|
||||
PrevRandao: randao,
|
||||
BlockNumber: 1,
|
||||
GasLimit: 1,
|
||||
GasUsed: 0,
|
||||
Timestamp: 100,
|
||||
ExtraData: []byte{},
|
||||
BaseFeePerGas: bytes.Repeat([]byte{0x05}, 32),
|
||||
BlockHash: blockHash,
|
||||
Transactions: [][]byte{},
|
||||
Withdrawals: withdrawals,
|
||||
BlobGasUsed: 0,
|
||||
ExcessBlobGas: 0,
|
||||
}
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: parentHash,
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xDD}, 32),
|
||||
BlockHash: blockHash,
|
||||
PrevRandao: randao,
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 0,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: blobRoot[:],
|
||||
FeeRecipient: bytes.Repeat([]byte{0xEE}, 20),
|
||||
}
|
||||
|
||||
header := ðpb.BeaconBlockHeader{
|
||||
Slot: slot,
|
||||
ParentRoot: bytes.Repeat([]byte{0x11}, 32),
|
||||
StateRoot: bytes.Repeat([]byte{0x22}, 32),
|
||||
BodyRoot: bytes.Repeat([]byte{0x33}, 32),
|
||||
}
|
||||
headerRoot, err := header.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
envelope := ðpb.ExecutionPayloadEnvelope{
|
||||
Slot: slot,
|
||||
BuilderIndex: builderIdx,
|
||||
BeaconBlockRoot: headerRoot[:],
|
||||
Payload: payload,
|
||||
BlobKzgCommitments: blobCommitments,
|
||||
ExecutionRequests: &enginev1.ExecutionRequests{},
|
||||
}
|
||||
|
||||
if mutate != nil {
|
||||
mutate(payload, bid, envelope)
|
||||
}
|
||||
|
||||
genesisRoot := bytes.Repeat([]byte{0xAB}, 32)
|
||||
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||
stateRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = bytes.Repeat([]byte{0x44}, 32)
|
||||
stateRoots[i] = bytes.Repeat([]byte{0x55}, 32)
|
||||
}
|
||||
randaoMixes := make([][]byte, cfg.EpochsPerHistoricalVector)
|
||||
for i := range randaoMixes {
|
||||
randaoMixes[i] = randao
|
||||
}
|
||||
|
||||
withdrawalCreds := make([]byte, 32)
|
||||
withdrawalCreds[0] = cfg.ETH1AddressWithdrawalPrefixByte
|
||||
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositRoot: bytes.Repeat([]byte{0x66}, 32),
|
||||
DepositCount: 0,
|
||||
BlockHash: bytes.Repeat([]byte{0x77}, 32),
|
||||
}
|
||||
|
||||
vals := []*ethpb.Validator{
|
||||
{
|
||||
PublicKey: pk,
|
||||
WithdrawalCredentials: withdrawalCreds,
|
||||
EffectiveBalance: cfg.MinActivationBalance + 1_000,
|
||||
},
|
||||
}
|
||||
balances := []uint64{cfg.MinActivationBalance + 1_000}
|
||||
|
||||
payments := make([]*ethpb.BuilderPendingPayment, cfg.SlotsPerEpoch*2)
|
||||
for i := range payments {
|
||||
payments[i] = ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
executionPayloadAvailability := make([]byte, cfg.SlotsPerHistoricalRoot/8)
|
||||
|
||||
builders := make([]*ethpb.Builder, builderIdx+1)
|
||||
builders[builderIdx] = ðpb.Builder{
|
||||
Pubkey: pk,
|
||||
Version: []byte{0},
|
||||
ExecutionAddress: bytes.Repeat([]byte{0x09}, 20),
|
||||
Balance: 0,
|
||||
DepositEpoch: 0,
|
||||
WithdrawableEpoch: 0,
|
||||
}
|
||||
|
||||
genesisTime := uint64(0)
|
||||
slotSeconds := cfg.SecondsPerSlot * uint64(slot)
|
||||
if payload.Timestamp > slotSeconds {
|
||||
genesisTime = payload.Timestamp - slotSeconds
|
||||
}
|
||||
|
||||
stProto := ðpb.BeaconStateGloas{
|
||||
Slot: slot,
|
||||
GenesisTime: genesisTime,
|
||||
GenesisValidatorsRoot: genesisRoot,
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: bytes.Repeat([]byte{0x01}, 4),
|
||||
PreviousVersion: bytes.Repeat([]byte{0x01}, 4),
|
||||
Epoch: 0,
|
||||
},
|
||||
LatestBlockHeader: header,
|
||||
BlockRoots: blockRoots,
|
||||
StateRoots: stateRoots,
|
||||
RandaoMixes: randaoMixes,
|
||||
Eth1Data: eth1Data,
|
||||
Validators: vals,
|
||||
Balances: balances,
|
||||
LatestBlockHash: payload.ParentHash,
|
||||
LatestExecutionPayloadBid: bid,
|
||||
BuilderPendingPayments: payments,
|
||||
ExecutionPayloadAvailability: executionPayloadAvailability,
|
||||
BuilderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{},
|
||||
PayloadExpectedWithdrawals: payload.Withdrawals,
|
||||
Builders: builders,
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoGloas(stProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := st.Copy()
|
||||
ctx := context.Background()
|
||||
require.NoError(t, processExecutionRequests(ctx, expected, envelope.ExecutionRequests))
|
||||
require.NoError(t, expected.QueueBuilderPayment())
|
||||
require.NoError(t, expected.SetExecutionPayloadAvailability(slot, true))
|
||||
var blockHashArr [32]byte
|
||||
copy(blockHashArr[:], payload.BlockHash)
|
||||
require.NoError(t, expected.SetLatestBlockHash(blockHashArr))
|
||||
expectedRoot, err := expected.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
envelope.StateRoot = expectedRoot[:]
|
||||
|
||||
epoch := slots.ToEpoch(slot)
|
||||
domain, err := signing.Domain(st.Fork(), epoch, cfg.DomainBeaconBuilder, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := signing.ComputeSigningRoot(envelope, domain)
|
||||
require.NoError(t, err)
|
||||
signature := sk.Sign(signingRoot[:]).Marshal()
|
||||
|
||||
signedProto := ðpb.SignedExecutionPayloadEnvelope{
|
||||
Message: envelope,
|
||||
Signature: signature,
|
||||
}
|
||||
signed, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
return payloadFixture{
|
||||
state: st,
|
||||
signed: signed,
|
||||
signedProto: signedProto,
|
||||
envelope: envelope,
|
||||
payload: payload,
|
||||
slot: slot,
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayload_Success(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, nil)
|
||||
require.NoError(t, ProcessExecutionPayload(t.Context(), fixture.state, fixture.signed))
|
||||
|
||||
latestHash, err := fixture.state.LatestBlockHash()
|
||||
require.NoError(t, err)
|
||||
var expectedHash [32]byte
|
||||
copy(expectedHash[:], fixture.payload.BlockHash)
|
||||
require.Equal(t, expectedHash, latestHash)
|
||||
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
paymentIndex := slotsPerEpoch + (fixture.slot % slotsPerEpoch)
|
||||
payments, err := fixture.state.BuilderPendingPayments()
|
||||
require.NoError(t, err)
|
||||
payment := payments[paymentIndex]
|
||||
require.NotNil(t, payment)
|
||||
require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayload_PrevRandaoMismatch(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, func(_ *enginev1.ExecutionPayloadDeneb, bid *ethpb.ExecutionPayloadBid, _ *ethpb.ExecutionPayloadEnvelope) {
|
||||
bid.PrevRandao = bytes.Repeat([]byte{0xFF}, 32)
|
||||
})
|
||||
|
||||
err := ProcessExecutionPayload(t.Context(), fixture.state, fixture.signed)
|
||||
require.ErrorContains(t, "prev randao", err)
|
||||
}
|
||||
|
||||
func TestQueueBuilderPayment_ZeroAmountClearsSlot(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, nil)
|
||||
|
||||
require.NoError(t, fixture.state.QueueBuilderPayment())
|
||||
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
paymentIndex := slotsPerEpoch + (fixture.slot % slotsPerEpoch)
|
||||
payments, err := fixture.state.BuilderPendingPayments()
|
||||
require.NoError(t, err)
|
||||
payment := payments[paymentIndex]
|
||||
require.NotNil(t, payment)
|
||||
require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
|
||||
}
|
||||
|
||||
func TestVerifyExecutionPayloadEnvelopeSignature(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, nil)
|
||||
|
||||
t.Run("self build", func(t *testing.T) {
|
||||
proposerSk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
proposerPk := proposerSk.PublicKey().Marshal()
|
||||
|
||||
stPb, ok := fixture.state.ToProtoUnsafe().(*ethpb.BeaconStateGloas)
|
||||
require.Equal(t, true, ok)
|
||||
stPb = proto.Clone(stPb).(*ethpb.BeaconStateGloas)
|
||||
stPb.Validators[0].PublicKey = proposerPk
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(stPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := proto.Clone(fixture.signedProto.Message).(*ethpb.ExecutionPayloadEnvelope)
|
||||
msg.BuilderIndex = params.BeaconConfig().BuilderIndexSelfBuild
|
||||
msg.BlobKzgCommitments = []([]byte){}
|
||||
|
||||
epoch := slots.ToEpoch(msg.Slot)
|
||||
domain, err := signing.Domain(st.Fork(), epoch, params.BeaconConfig().DomainBeaconBuilder, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := signing.ComputeSigningRoot(msg, domain)
|
||||
require.NoError(t, err)
|
||||
signature := proposerSk.Sign(signingRoot[:]).Marshal()
|
||||
|
||||
signedProto := ðpb.SignedExecutionPayloadEnvelope{
|
||||
Message: msg,
|
||||
Signature: signature,
|
||||
}
|
||||
signed, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, verifyExecutionPayloadEnvelopeSignature(st, signed))
|
||||
})
|
||||
|
||||
t.Run("builder", func(t *testing.T) {
|
||||
signed, err := blocks.WrappedROSignedExecutionPayloadEnvelope(fixture.signedProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, verifyExecutionPayloadEnvelopeSignature(fixture.state, signed))
|
||||
})
|
||||
|
||||
t.Run("invalid signature", func(t *testing.T) {
|
||||
t.Run("self build", func(t *testing.T) {
|
||||
proposerSk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
proposerPk := proposerSk.PublicKey().Marshal()
|
||||
|
||||
stPb, ok := fixture.state.ToProtoUnsafe().(*ethpb.BeaconStateGloas)
|
||||
require.Equal(t, true, ok)
|
||||
stPb = proto.Clone(stPb).(*ethpb.BeaconStateGloas)
|
||||
stPb.Validators[0].PublicKey = proposerPk
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(stPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := proto.Clone(fixture.signedProto.Message).(*ethpb.ExecutionPayloadEnvelope)
|
||||
msg.BuilderIndex = params.BeaconConfig().BuilderIndexSelfBuild
|
||||
if msg.BlobKzgCommitments == nil {
|
||||
msg.BlobKzgCommitments = [][]byte{}
|
||||
}
|
||||
|
||||
signedProto := ðpb.SignedExecutionPayloadEnvelope{
|
||||
Message: msg,
|
||||
Signature: bytes.Repeat([]byte{0xFF}, 96),
|
||||
}
|
||||
badSigned, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = verifyExecutionPayloadEnvelopeSignature(st, badSigned)
|
||||
require.ErrorContains(t, "invalid signature format", err)
|
||||
})
|
||||
|
||||
t.Run("builder", func(t *testing.T) {
|
||||
signedProto := ðpb.SignedExecutionPayloadEnvelope{
|
||||
Message: fixture.signedProto.Message,
|
||||
Signature: bytes.Repeat([]byte{0xFF}, 96),
|
||||
}
|
||||
badSigned, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = verifyExecutionPayloadEnvelopeSignature(fixture.state, badSigned)
|
||||
require.ErrorContains(t, "invalid signature format", err)
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -86,7 +86,6 @@ func TestGetSpec(t *testing.T) {
|
||||
config.GloasForkEpoch = 110
|
||||
config.BLSWithdrawalPrefixByte = byte('b')
|
||||
config.ETH1AddressWithdrawalPrefixByte = byte('c')
|
||||
config.BuilderWithdrawalPrefixByte = byte('e')
|
||||
config.GenesisDelay = 24
|
||||
config.SecondsPerSlot = 25
|
||||
config.SlotDurationMilliseconds = 120
|
||||
|
||||
@@ -113,12 +113,6 @@ func (s *Server) getBeaconStateV2(ctx context.Context, w http.ResponseWriter, id
|
||||
httputil.HandleError(w, errMsgStateFromConsensus+": "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
case version.Gloas:
|
||||
respSt, err = structs.BeaconStateGloasFromConsensus(st)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, errMsgStateFromConsensus+": "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
default:
|
||||
httputil.HandleError(w, "Unsupported state version", http.StatusInternalServerError)
|
||||
return
|
||||
|
||||
@@ -232,35 +232,6 @@ func TestGetBeaconStateV2(t *testing.T) {
|
||||
assert.Equal(t, "123", st.Slot)
|
||||
assert.Equal(t, int(params.BeaconConfig().MinSeedLookahead+1)*int(params.BeaconConfig().SlotsPerEpoch), len(st.ProposerLookahead))
|
||||
})
|
||||
t.Run("Gloas", func(t *testing.T) {
|
||||
fakeState, err := util.NewBeaconStateGloas()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fakeState.SetSlot(123))
|
||||
chainService := &blockchainmock.ChainService{}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/debug/beacon/states/{state_id}", nil)
|
||||
request.SetPathValue("state_id", "head")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetBeaconStateV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBeaconStateV2Response{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, version.String(version.Gloas), resp.Version)
|
||||
st := &structs.BeaconStateGloas{}
|
||||
require.NoError(t, json.Unmarshal(resp.Data, st))
|
||||
assert.Equal(t, "123", st.Slot)
|
||||
assert.Equal(t, int(params.BeaconConfig().MinSeedLookahead+1)*int(params.BeaconConfig().SlotsPerEpoch), len(st.ProposerLookahead))
|
||||
})
|
||||
t.Run("execution optimistic", func(t *testing.T) {
|
||||
parentRoot := [32]byte{'a'}
|
||||
blk := util.NewBeaconBlock()
|
||||
@@ -456,78 +427,6 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, sszExpected, writer.Body.Bytes())
|
||||
})
|
||||
t.Run("Electra", func(t *testing.T) {
|
||||
fakeState, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fakeState.SetSlot(123))
|
||||
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/debug/beacon/states/{state_id}", nil)
|
||||
request.SetPathValue("state_id", "head")
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetBeaconStateV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, version.String(version.Electra), writer.Header().Get(api.VersionHeader))
|
||||
sszExpected, err := fakeState.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, sszExpected, writer.Body.Bytes())
|
||||
})
|
||||
t.Run("Fulu", func(t *testing.T) {
|
||||
fakeState, err := util.NewBeaconStateFulu()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fakeState.SetSlot(123))
|
||||
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/debug/beacon/states/{state_id}", nil)
|
||||
request.SetPathValue("state_id", "head")
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetBeaconStateV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, version.String(version.Fulu), writer.Header().Get(api.VersionHeader))
|
||||
sszExpected, err := fakeState.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, sszExpected, writer.Body.Bytes())
|
||||
})
|
||||
t.Run("Gloas", func(t *testing.T) {
|
||||
fakeState, err := util.NewBeaconStateGloas()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fakeState.SetSlot(123))
|
||||
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: fakeState,
|
||||
},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/debug/beacon/states/{state_id}", nil)
|
||||
request.SetPathValue("state_id", "head")
|
||||
request.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetBeaconStateV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, version.String(version.Gloas), writer.Header().Get(api.VersionHeader))
|
||||
sszExpected, err := fakeState.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, sszExpected, writer.Body.Bytes())
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetForkChoiceHeadsV2(t *testing.T) {
|
||||
|
||||
@@ -48,6 +48,7 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
"@org_golang_google_grpc//reflection:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
|
||||
|
||||
@@ -35,18 +35,19 @@ import (
|
||||
// providing RPC endpoints for verifying a beacon node's sync status, genesis and
|
||||
// version information, and services the node implements and runs.
|
||||
type Server struct {
|
||||
LogsStreamer logs.Streamer
|
||||
StreamLogsBufferSize int
|
||||
SyncChecker sync.Checker
|
||||
Server *grpc.Server
|
||||
BeaconDB db.ReadOnlyDatabase
|
||||
PeersFetcher p2p.PeersProvider
|
||||
PeerManager p2p.PeerManager
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
GenesisFetcher blockchain.GenesisFetcher
|
||||
POWChainInfoFetcher execution.ChainInfoFetcher
|
||||
BeaconMonitoringHost string
|
||||
BeaconMonitoringPort int
|
||||
LogsStreamer logs.Streamer
|
||||
StreamLogsBufferSize int
|
||||
SyncChecker sync.Checker
|
||||
Server *grpc.Server
|
||||
BeaconDB db.ReadOnlyDatabase
|
||||
PeersFetcher p2p.PeersProvider
|
||||
PeerManager p2p.PeerManager
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
GenesisFetcher blockchain.GenesisFetcher
|
||||
POWChainInfoFetcher execution.ChainInfoFetcher
|
||||
BeaconMonitoringHost string
|
||||
BeaconMonitoringPort int
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
}
|
||||
|
||||
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
|
||||
@@ -61,21 +62,28 @@ func (ns *Server) GetHealth(ctx context.Context, request *ethpb.HealthRequest) (
|
||||
ctx, cancel := context.WithTimeout(ctx, timeoutDuration)
|
||||
defer cancel() // Important to avoid a context leak
|
||||
|
||||
if ns.SyncChecker.Synced() {
|
||||
// Check optimistic status - validators should not participate when optimistic
|
||||
isOptimistic, err := ns.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not check optimistic status: %v", err)
|
||||
}
|
||||
|
||||
if ns.SyncChecker.Synced() && !isOptimistic {
|
||||
return &empty.Empty{}, nil
|
||||
}
|
||||
if ns.SyncChecker.Syncing() || ns.SyncChecker.Initialized() {
|
||||
if request.SyncingStatus != 0 {
|
||||
// override the 200 success with the provided request status
|
||||
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", strconv.FormatUint(request.SyncingStatus, 10))); err != nil {
|
||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set custom success code header: %v", err)
|
||||
}
|
||||
return &empty.Empty{}, nil
|
||||
}
|
||||
// Set header for REST API clients (via gRPC-gateway)
|
||||
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", strconv.FormatUint(http.StatusPartialContent, 10))); err != nil {
|
||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set custom success code header: %v", err)
|
||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set status code header: %v", err)
|
||||
}
|
||||
return &empty.Empty{}, nil
|
||||
return &empty.Empty{}, status.Error(codes.Unavailable, "node is syncing")
|
||||
}
|
||||
if isOptimistic {
|
||||
// Set header for REST API clients (via gRPC-gateway)
|
||||
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", strconv.FormatUint(http.StatusPartialContent, 10))); err != nil {
|
||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set status code header: %v", err)
|
||||
}
|
||||
return &empty.Empty{}, status.Error(codes.Unavailable, "node is optimistic")
|
||||
}
|
||||
return &empty.Empty{}, status.Errorf(codes.Unavailable, "service unavailable")
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package node
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"maps"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -21,6 +22,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/reflection"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
@@ -187,32 +189,71 @@ func TestNodeServer_GetETH1ConnectionStatus(t *testing.T) {
|
||||
assert.Equal(t, errStr, res.CurrentConnectionError)
|
||||
}
|
||||
|
||||
// mockServerTransportStream implements grpc.ServerTransportStream for testing
|
||||
type mockServerTransportStream struct {
|
||||
headers map[string][]string
|
||||
}
|
||||
|
||||
func (m *mockServerTransportStream) Method() string { return "" }
|
||||
func (m *mockServerTransportStream) SetHeader(md metadata.MD) error {
|
||||
maps.Copy(m.headers, md)
|
||||
return nil
|
||||
}
|
||||
func (m *mockServerTransportStream) SendHeader(metadata.MD) error { return nil }
|
||||
func (m *mockServerTransportStream) SetTrailer(metadata.MD) error { return nil }
|
||||
|
||||
func TestNodeServer_GetHealth(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input *mockSync.Sync
|
||||
customStatus uint64
|
||||
isOptimistic bool
|
||||
wantedErr string
|
||||
}{
|
||||
{
|
||||
name: "happy path",
|
||||
input: &mockSync.Sync{IsSyncing: false, IsSynced: true},
|
||||
name: "happy path - synced and not optimistic",
|
||||
input: &mockSync.Sync{IsSyncing: false, IsSynced: true},
|
||||
isOptimistic: false,
|
||||
},
|
||||
{
|
||||
name: "syncing",
|
||||
input: &mockSync.Sync{IsSyncing: false},
|
||||
wantedErr: "service unavailable",
|
||||
name: "returns error when not synced and not syncing",
|
||||
input: &mockSync.Sync{IsSyncing: false, IsSynced: false},
|
||||
isOptimistic: false,
|
||||
wantedErr: "service unavailable",
|
||||
},
|
||||
{
|
||||
name: "returns error when syncing",
|
||||
input: &mockSync.Sync{IsSyncing: true, IsSynced: false},
|
||||
isOptimistic: false,
|
||||
wantedErr: "node is syncing",
|
||||
},
|
||||
{
|
||||
name: "returns error when synced but optimistic",
|
||||
input: &mockSync.Sync{IsSyncing: false, IsSynced: true},
|
||||
isOptimistic: true,
|
||||
wantedErr: "node is optimistic",
|
||||
},
|
||||
{
|
||||
name: "returns error when syncing and optimistic",
|
||||
input: &mockSync.Sync{IsSyncing: true, IsSynced: false},
|
||||
isOptimistic: true,
|
||||
wantedErr: "node is syncing",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
server := grpc.NewServer()
|
||||
ns := &Server{
|
||||
SyncChecker: tt.input,
|
||||
SyncChecker: tt.input,
|
||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: tt.isOptimistic},
|
||||
}
|
||||
ethpb.RegisterNodeServer(server, ns)
|
||||
reflection.Register(server)
|
||||
_, err := ns.GetHealth(t.Context(), ðpb.HealthRequest{SyncingStatus: tt.customStatus})
|
||||
|
||||
// Create context with mock transport stream so grpc.SetHeader works
|
||||
stream := &mockServerTransportStream{headers: make(map[string][]string)}
|
||||
ctx := grpc.NewContextWithServerTransportStream(t.Context(), stream)
|
||||
|
||||
_, err := ns.GetHealth(ctx, ðpb.HealthRequest{})
|
||||
if tt.wantedErr == "" {
|
||||
require.NoError(t, err)
|
||||
return
|
||||
|
||||
@@ -259,18 +259,19 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
}
|
||||
s.validatorServer = validatorServer
|
||||
nodeServer := &nodev1alpha1.Server{
|
||||
LogsStreamer: logs.NewStreamServer(),
|
||||
StreamLogsBufferSize: 1000, // Enough to handle bursts of beacon node logs for gRPC streaming.
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
Server: s.grpcServer,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
PeersFetcher: s.cfg.PeersFetcher,
|
||||
PeerManager: s.cfg.PeerManager,
|
||||
GenesisFetcher: s.cfg.GenesisFetcher,
|
||||
POWChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
|
||||
BeaconMonitoringHost: s.cfg.BeaconMonitoringHost,
|
||||
BeaconMonitoringPort: s.cfg.BeaconMonitoringPort,
|
||||
LogsStreamer: logs.NewStreamServer(),
|
||||
StreamLogsBufferSize: 1000, // Enough to handle bursts of beacon node logs for gRPC streaming.
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
Server: s.grpcServer,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
PeersFetcher: s.cfg.PeersFetcher,
|
||||
PeerManager: s.cfg.PeerManager,
|
||||
GenesisFetcher: s.cfg.GenesisFetcher,
|
||||
POWChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
|
||||
BeaconMonitoringHost: s.cfg.BeaconMonitoringHost,
|
||||
BeaconMonitoringPort: s.cfg.BeaconMonitoringPort,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
}
|
||||
beaconChainServer := &beaconv1alpha1.Server{
|
||||
Ctx: s.ctx,
|
||||
|
||||
@@ -1,56 +1,24 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
type writeOnlyGloasFields interface {
|
||||
// Bids.
|
||||
SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid) error
|
||||
|
||||
// Builder pending payments / withdrawals.
|
||||
SetBuilderPendingPayment(index primitives.Slot, payment *ethpb.BuilderPendingPayment) error
|
||||
ClearBuilderPendingPayment(index primitives.Slot) error
|
||||
QueueBuilderPayment() error
|
||||
RotateBuilderPendingPayments() error
|
||||
AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal) error
|
||||
|
||||
// Execution payload availability.
|
||||
UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val byte) error
|
||||
|
||||
// Misc.
|
||||
SetLatestBlockHash(hash [32]byte) error
|
||||
SetExecutionPayloadAvailability(index primitives.Slot, available bool) error
|
||||
|
||||
// Builders.
|
||||
IncreaseBuilderBalance(index primitives.BuilderIndex, amount uint64) error
|
||||
AddBuilderFromDeposit(pubkey [fieldparams.BLSPubkeyLength]byte, withdrawalCredentials [fieldparams.RootLength]byte, amount uint64) error
|
||||
}
|
||||
|
||||
type readOnlyGloasFields interface {
|
||||
// Bids.
|
||||
LatestExecutionPayloadBid() (interfaces.ROExecutionPayloadBid, error)
|
||||
|
||||
// Builder pending payments / withdrawals.
|
||||
BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error)
|
||||
WithdrawalsMatchPayloadExpected(withdrawals []*enginev1.Withdrawal) (bool, error)
|
||||
BuilderPendingWithdrawals() ([]*ethpb.BuilderPendingWithdrawal, error)
|
||||
PayloadExpectedWithdrawals() ([]*enginev1.Withdrawal, error)
|
||||
|
||||
// Misc.
|
||||
LatestBlockHash() ([32]byte, error)
|
||||
ExecutionPayloadAvailability() ([]byte, error)
|
||||
|
||||
// Builders.
|
||||
Builder(index primitives.BuilderIndex) (*ethpb.Builder, error)
|
||||
Builders() ([]*ethpb.Builder, error)
|
||||
BuilderPubkey(primitives.BuilderIndex) ([48]byte, error)
|
||||
BuilderIndexByPubkey(pubkey [fieldparams.BLSPubkeyLength]byte) (primitives.BuilderIndex, bool)
|
||||
IsActiveBuilder(primitives.BuilderIndex) (bool, error)
|
||||
CanBuilderCoverBid(primitives.BuilderIndex, primitives.Gwei) (bool, error)
|
||||
NextWithdrawalBuilderIndex() (primitives.BuilderIndex, error)
|
||||
LatestBlockHash() ([32]byte, error)
|
||||
BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error)
|
||||
}
|
||||
|
||||
@@ -1,16 +1,11 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/ssz"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
)
|
||||
@@ -152,138 +147,3 @@ func (b *BeaconState) BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment,
|
||||
|
||||
return b.builderPendingPaymentsVal(), nil
|
||||
}
|
||||
|
||||
// LatestExecutionPayloadBid returns the cached latest execution payload bid for Gloas.
|
||||
func (b *BeaconState) LatestExecutionPayloadBid() (interfaces.ROExecutionPayloadBid, error) {
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
if b.latestExecutionPayloadBid == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return blocks.WrappedROExecutionPayloadBid(b.latestExecutionPayloadBid.Copy())
|
||||
}
|
||||
|
||||
// WithdrawalsMatchPayloadExpected returns true if the given withdrawals root matches the state's
|
||||
// payload_expected_withdrawals root.
|
||||
func (b *BeaconState) WithdrawalsMatchPayloadExpected(withdrawals []*enginev1.Withdrawal) (bool, error) {
|
||||
if b.version < version.Gloas {
|
||||
return false, errNotSupported("WithdrawalsMatchPayloadExpected", b.version)
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
withdrawalsRoot, err := ssz.WithdrawalSliceRoot(withdrawals, cfg.MaxWithdrawalsPerPayload)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("could not compute withdrawals root: %w", err)
|
||||
}
|
||||
|
||||
expected := b.payloadExpectedWithdrawals
|
||||
if expected == nil {
|
||||
expected = []*enginev1.Withdrawal{}
|
||||
}
|
||||
expectedRoot, err := ssz.WithdrawalSliceRoot(expected, cfg.MaxWithdrawalsPerPayload)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("could not compute expected withdrawals root: %w", err)
|
||||
}
|
||||
|
||||
return withdrawalsRoot == expectedRoot, nil
|
||||
}
|
||||
|
||||
// Builder returns the builder at the given index.
|
||||
func (b *BeaconState) Builder(index primitives.BuilderIndex) (*ethpb.Builder, error) {
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
if b.builders == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if uint64(index) >= uint64(len(b.builders)) {
|
||||
return nil, fmt.Errorf("builder index %d out of bounds", index)
|
||||
}
|
||||
if b.builders[index] == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return ethpb.CopyBuilder(b.builders[index]), nil
|
||||
}
|
||||
|
||||
// BuilderIndexByPubkey returns the builder index for the given pubkey, if present.
|
||||
func (b *BeaconState) BuilderIndexByPubkey(pubkey [fieldparams.BLSPubkeyLength]byte) (primitives.BuilderIndex, bool) {
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
for i, builder := range b.builders {
|
||||
if builder == nil {
|
||||
continue
|
||||
}
|
||||
if bytes.Equal(builder.Pubkey, pubkey[:]) {
|
||||
return primitives.BuilderIndex(i), true
|
||||
}
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// BuilderPendingWithdrawals returns a copy of the builder pending withdrawals.
|
||||
func (b *BeaconState) BuilderPendingWithdrawals() ([]*ethpb.BuilderPendingWithdrawal, error) {
|
||||
if b.version < version.Gloas {
|
||||
return nil, errNotSupported("BuilderPendingWithdrawals", b.version)
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
return b.builderPendingWithdrawalsVal(), nil
|
||||
}
|
||||
|
||||
// Builders returns a copy of the builders registry.
|
||||
func (b *BeaconState) Builders() ([]*ethpb.Builder, error) {
|
||||
if b.version < version.Gloas {
|
||||
return nil, errNotSupported("Builders", b.version)
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
return b.buildersVal(), nil
|
||||
}
|
||||
|
||||
// NextWithdrawalBuilderIndex returns the next withdrawal builder index.
|
||||
func (b *BeaconState) NextWithdrawalBuilderIndex() (primitives.BuilderIndex, error) {
|
||||
if b.version < version.Gloas {
|
||||
return 0, errNotSupported("NextWithdrawalBuilderIndex", b.version)
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
return b.nextWithdrawalBuilderIndex, nil
|
||||
}
|
||||
|
||||
// ExecutionPayloadAvailability returns a copy of the execution payload availability.
|
||||
func (b *BeaconState) ExecutionPayloadAvailability() ([]byte, error) {
|
||||
if b.version < version.Gloas {
|
||||
return nil, errNotSupported("ExecutionPayloadAvailability", b.version)
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
return b.executionPayloadAvailabilityVal(), nil
|
||||
}
|
||||
|
||||
// PayloadExpectedWithdrawals returns a copy of the payload expected withdrawals.
|
||||
func (b *BeaconState) PayloadExpectedWithdrawals() ([]*enginev1.Withdrawal, error) {
|
||||
if b.version < version.Gloas {
|
||||
return nil, errNotSupported("PayloadExpectedWithdrawals", b.version)
|
||||
}
|
||||
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
return b.payloadExpectedWithdrawalsVal(), nil
|
||||
}
|
||||
|
||||
@@ -5,10 +5,8 @@ import (
|
||||
"testing"
|
||||
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
@@ -168,279 +166,3 @@ func TestBuilderPendingPayments_UnsupportedVersion(t *testing.T) {
|
||||
_, err = st.BuilderPendingPayments()
|
||||
require.ErrorContains(t, "BuilderPendingPayments", err)
|
||||
}
|
||||
|
||||
func TestBuilderPendingWithdrawals(t *testing.T) {
|
||||
t.Run("returns error before gloas", func(t *testing.T) {
|
||||
stIface, err := state_native.InitializeFromProtoElectra(ðpb.BeaconStateElectra{})
|
||||
require.NoError(t, err)
|
||||
st := stIface.(*state_native.BeaconState)
|
||||
|
||||
_, err = st.BuilderPendingWithdrawals()
|
||||
require.ErrorContains(t, "BuilderPendingWithdrawals", err)
|
||||
})
|
||||
|
||||
t.Run("returns copy", func(t *testing.T) {
|
||||
original := []*ethpb.BuilderPendingWithdrawal{
|
||||
{Amount: 10, BuilderIndex: 1},
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
BuilderPendingWithdrawals: original,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
got1, err := st.BuilderPendingWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, original, got1)
|
||||
|
||||
got1[0].Amount = 99
|
||||
got2, err := st.BuilderPendingWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, original, got2)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuildersGetter(t *testing.T) {
|
||||
t.Run("returns error before gloas", func(t *testing.T) {
|
||||
stIface, err := state_native.InitializeFromProtoElectra(ðpb.BeaconStateElectra{})
|
||||
require.NoError(t, err)
|
||||
st := stIface.(*state_native.BeaconState)
|
||||
|
||||
_, err = st.Builders()
|
||||
require.ErrorContains(t, "Builders", err)
|
||||
})
|
||||
|
||||
t.Run("returns copy", func(t *testing.T) {
|
||||
pubkey := bytes.Repeat([]byte{0xAB}, fieldparams.BLSPubkeyLength)
|
||||
buildr := ðpb.Builder{
|
||||
Pubkey: pubkey,
|
||||
Balance: 42,
|
||||
DepositEpoch: 3,
|
||||
WithdrawableEpoch: 4,
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
Builders: []*ethpb.Builder{buildr},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
got1, err := st.Builders()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, buildr, got1[0])
|
||||
|
||||
got1[0].Pubkey[0] = 0xFF
|
||||
got2, err := st.Builders()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, buildr, got2[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestNextWithdrawalBuilderIndex(t *testing.T) {
|
||||
t.Run("returns error before gloas", func(t *testing.T) {
|
||||
stIface, err := state_native.InitializeFromProtoElectra(ðpb.BeaconStateElectra{})
|
||||
require.NoError(t, err)
|
||||
st := stIface.(*state_native.BeaconState)
|
||||
|
||||
_, err = st.NextWithdrawalBuilderIndex()
|
||||
require.ErrorContains(t, "NextWithdrawalBuilderIndex", err)
|
||||
})
|
||||
|
||||
t.Run("returns configured value", func(t *testing.T) {
|
||||
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
NextWithdrawalBuilderIndex: 2,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := st.NextWithdrawalBuilderIndex()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.BuilderIndex(2), got)
|
||||
})
|
||||
}
|
||||
|
||||
func TestExecutionPayloadAvailability(t *testing.T) {
|
||||
t.Run("returns error before gloas", func(t *testing.T) {
|
||||
stIface, err := state_native.InitializeFromProtoElectra(ðpb.BeaconStateElectra{})
|
||||
require.NoError(t, err)
|
||||
st := stIface.(*state_native.BeaconState)
|
||||
|
||||
_, err = st.ExecutionPayloadAvailability()
|
||||
require.ErrorContains(t, "ExecutionPayloadAvailability", err)
|
||||
})
|
||||
|
||||
t.Run("returns copy", func(t *testing.T) {
|
||||
availability := []byte{0x01, 0x00, 0x01}
|
||||
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
ExecutionPayloadAvailability: availability,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
got1, err := st.ExecutionPayloadAvailability()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, availability, got1)
|
||||
|
||||
got1[0] = 0xFF
|
||||
got2, err := st.ExecutionPayloadAvailability()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, availability, got2)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPayloadExpectedWithdrawals(t *testing.T) {
|
||||
t.Run("returns error before gloas", func(t *testing.T) {
|
||||
stIface, err := state_native.InitializeFromProtoElectra(ðpb.BeaconStateElectra{})
|
||||
require.NoError(t, err)
|
||||
st := stIface.(*state_native.BeaconState)
|
||||
|
||||
_, err = st.PayloadExpectedWithdrawals()
|
||||
require.ErrorContains(t, "PayloadExpectedWithdrawals", err)
|
||||
})
|
||||
|
||||
t.Run("returns copy", func(t *testing.T) {
|
||||
original := enginev1.Withdrawal{
|
||||
Index: 1,
|
||||
ValidatorIndex: 2,
|
||||
Address: bytes.Repeat([]byte{0x01}, 20),
|
||||
Amount: 10,
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
PayloadExpectedWithdrawals: []*enginev1.Withdrawal{&original},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
got1, err := st.PayloadExpectedWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, &original, got1[0])
|
||||
|
||||
got1[0].Amount = 99
|
||||
got2, err := st.PayloadExpectedWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, &original, got2[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestWithdrawalsMatchPayloadExpected(t *testing.T) {
|
||||
t.Run("returns error before gloas", func(t *testing.T) {
|
||||
stIface, _ := util.DeterministicGenesisState(t, 1)
|
||||
native, ok := stIface.(*state_native.BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
_, err := native.WithdrawalsMatchPayloadExpected(nil)
|
||||
require.ErrorContains(t, "is not supported", err)
|
||||
})
|
||||
|
||||
t.Run("returns true when roots match", func(t *testing.T) {
|
||||
withdrawals := []*enginev1.Withdrawal{
|
||||
{Index: 0, ValidatorIndex: 1, Address: bytes.Repeat([]byte{0x01}, 20), Amount: 10},
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
PayloadExpectedWithdrawals: withdrawals,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
ok, err := st.WithdrawalsMatchPayloadExpected(withdrawals)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
})
|
||||
|
||||
t.Run("returns false when roots do not match", func(t *testing.T) {
|
||||
expected := []*enginev1.Withdrawal{
|
||||
{Index: 0, ValidatorIndex: 1, Address: bytes.Repeat([]byte{0x01}, 20), Amount: 10},
|
||||
}
|
||||
actual := []*enginev1.Withdrawal{
|
||||
{Index: 0, ValidatorIndex: 1, Address: bytes.Repeat([]byte{0x01}, 20), Amount: 11},
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
PayloadExpectedWithdrawals: expected,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
ok, err := st.WithdrawalsMatchPayloadExpected(actual)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, ok)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuilder(t *testing.T) {
|
||||
t.Run("nil builders returns nil", func(t *testing.T) {
|
||||
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
Builders: nil,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := st.Builder(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, (*ethpb.Builder)(nil), got)
|
||||
})
|
||||
|
||||
t.Run("out of bounds returns error", func(t *testing.T) {
|
||||
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
Builders: []*ethpb.Builder{{}},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = st.Builder(1)
|
||||
require.ErrorContains(t, "out of bounds", err)
|
||||
})
|
||||
|
||||
t.Run("returns copy", func(t *testing.T) {
|
||||
pubkey := bytes.Repeat([]byte{0xAA}, fieldparams.BLSPubkeyLength)
|
||||
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
Builders: []*ethpb.Builder{
|
||||
{
|
||||
Pubkey: pubkey,
|
||||
Balance: 42,
|
||||
DepositEpoch: 3,
|
||||
WithdrawableEpoch: 4,
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
got1, err := st.Builder(0)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, (*ethpb.Builder)(nil), got1)
|
||||
require.Equal(t, primitives.Gwei(42), got1.Balance)
|
||||
require.DeepEqual(t, pubkey, got1.Pubkey)
|
||||
|
||||
// Mutate returned builder; state should be unchanged.
|
||||
got1.Pubkey[0] = 0xFF
|
||||
got2, err := st.Builder(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, byte(0xAA), got2.Pubkey[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuilderIndexByPubkey(t *testing.T) {
|
||||
t.Run("not found returns false", func(t *testing.T) {
|
||||
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
Builders: []*ethpb.Builder{
|
||||
{Pubkey: bytes.Repeat([]byte{0x11}, fieldparams.BLSPubkeyLength)},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
var pk [fieldparams.BLSPubkeyLength]byte
|
||||
copy(pk[:], bytes.Repeat([]byte{0x22}, fieldparams.BLSPubkeyLength))
|
||||
idx, ok := st.BuilderIndexByPubkey(pk)
|
||||
require.Equal(t, false, ok)
|
||||
require.Equal(t, primitives.BuilderIndex(0), idx)
|
||||
})
|
||||
|
||||
t.Run("skips nil entries and finds match", func(t *testing.T) {
|
||||
wantIdx := primitives.BuilderIndex(1)
|
||||
wantPkBytes := bytes.Repeat([]byte{0xAB}, fieldparams.BLSPubkeyLength)
|
||||
|
||||
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
Builders: []*ethpb.Builder{
|
||||
nil,
|
||||
{Pubkey: wantPkBytes},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
var pk [fieldparams.BLSPubkeyLength]byte
|
||||
copy(pk[:], wantPkBytes)
|
||||
idx, ok := st.BuilderIndexByPubkey(pk)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, wantIdx, idx)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,14 +5,11 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
)
|
||||
|
||||
// RotateBuilderPendingPayments rotates the queue by dropping slots per epoch payments from the
|
||||
@@ -124,41 +121,6 @@ func (b *BeaconState) ClearBuilderPendingPayment(index primitives.Slot) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// QueueBuilderPayment implements the builder payment queuing logic for Gloas.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// payment = state.builder_pending_payments[SLOTS_PER_EPOCH + state.slot % SLOTS_PER_EPOCH]
|
||||
// amount = payment.withdrawal.amount
|
||||
// if amount > 0:
|
||||
//
|
||||
// state.builder_pending_withdrawals.append(payment.withdrawal)
|
||||
//
|
||||
// state.builder_pending_payments[SLOTS_PER_EPOCH + state.slot % SLOTS_PER_EPOCH] = BuilderPendingPayment()
|
||||
func (b *BeaconState) QueueBuilderPayment() error {
|
||||
if b.version < version.Gloas {
|
||||
return errNotSupported("QueueBuilderPayment", b.version)
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
slot := b.slot
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
paymentIndex := slotsPerEpoch + (slot % slotsPerEpoch)
|
||||
if uint64(paymentIndex) >= uint64(len(b.builderPendingPayments)) {
|
||||
return fmt.Errorf("builder pending payments index %d out of range (len=%d)", paymentIndex, len(b.builderPendingPayments))
|
||||
}
|
||||
|
||||
payment := b.builderPendingPayments[paymentIndex]
|
||||
if payment != nil && payment.Withdrawal != nil && payment.Withdrawal.Amount > 0 {
|
||||
b.builderPendingWithdrawals = append(b.builderPendingWithdrawals, ethpb.CopyBuilderPendingWithdrawal(payment.Withdrawal))
|
||||
b.markFieldAsDirty(types.BuilderPendingWithdrawals)
|
||||
}
|
||||
|
||||
b.builderPendingPayments[paymentIndex] = emptyBuilderPendingPayment
|
||||
b.markFieldAsDirty(types.BuilderPendingPayments)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetBuilderPendingPayment sets a builder pending payment at the specified index.
|
||||
func (b *BeaconState) SetBuilderPendingPayment(index primitives.Slot, payment *ethpb.BuilderPendingPayment) error {
|
||||
if b.version < version.Gloas {
|
||||
@@ -199,91 +161,3 @@ func (b *BeaconState) UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val
|
||||
b.markFieldAsDirty(types.ExecutionPayloadAvailability)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetLatestBlockHash sets the latest execution block hash.
|
||||
func (b *BeaconState) SetLatestBlockHash(hash [32]byte) error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.latestBlockHash = hash[:]
|
||||
b.markFieldAsDirty(types.LatestBlockHash)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetExecutionPayloadAvailability sets the execution payload availability bit for a specific slot.
|
||||
func (b *BeaconState) SetExecutionPayloadAvailability(index primitives.Slot, available bool) error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
bitIndex := index % params.BeaconConfig().SlotsPerHistoricalRoot
|
||||
byteIndex := bitIndex / 8
|
||||
bitPosition := bitIndex % 8
|
||||
|
||||
// Set or clear the bit
|
||||
if available {
|
||||
b.executionPayloadAvailability[byteIndex] |= 1 << bitPosition
|
||||
} else {
|
||||
b.executionPayloadAvailability[byteIndex] &^= 1 << bitPosition
|
||||
}
|
||||
|
||||
b.markFieldAsDirty(types.ExecutionPayloadAvailability)
|
||||
return nil
|
||||
}
|
||||
|
||||
// IncreaseBuilderBalance increases the balance of the builder at the given index.
|
||||
func (b *BeaconState) IncreaseBuilderBalance(index primitives.BuilderIndex, amount uint64) error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.builders == nil || uint64(index) >= uint64(len(b.builders)) {
|
||||
return fmt.Errorf("builder index %d out of bounds", index)
|
||||
}
|
||||
if b.builders[index] == nil {
|
||||
return fmt.Errorf("builder at index %d is nil", index)
|
||||
}
|
||||
|
||||
builder := ethpb.CopyBuilder(b.builders[index])
|
||||
builder.Balance += primitives.Gwei(amount)
|
||||
b.builders[index] = builder
|
||||
|
||||
b.markFieldAsDirty(types.Builders)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddBuilderFromDeposit creates or replaces a builder entry derived from a deposit.
|
||||
func (b *BeaconState) AddBuilderFromDeposit(pubkey [fieldparams.BLSPubkeyLength]byte, withdrawalCredentials [fieldparams.RootLength]byte, amount uint64) error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
currentEpoch := slots.ToEpoch(b.slot)
|
||||
index := b.builderInsertionIndex(currentEpoch)
|
||||
|
||||
builder := ðpb.Builder{
|
||||
Pubkey: bytesutil.SafeCopyBytes(pubkey[:]),
|
||||
Version: []byte{withdrawalCredentials[0]},
|
||||
ExecutionAddress: bytesutil.SafeCopyBytes(withdrawalCredentials[12:]),
|
||||
Balance: primitives.Gwei(amount),
|
||||
DepositEpoch: currentEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
|
||||
if index < primitives.BuilderIndex(len(b.builders)) {
|
||||
b.builders[index] = builder
|
||||
} else {
|
||||
gap := index - primitives.BuilderIndex(len(b.builders)) + 1
|
||||
b.builders = append(b.builders, make([]*ethpb.Builder, gap)...)
|
||||
b.builders[index] = builder
|
||||
}
|
||||
|
||||
b.markFieldAsDirty(types.Builders)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) builderInsertionIndex(currentEpoch primitives.Epoch) primitives.BuilderIndex {
|
||||
for i, builder := range b.builders {
|
||||
if builder.WithdrawableEpoch <= currentEpoch && builder.Balance == 0 {
|
||||
return primitives.BuilderIndex(i)
|
||||
}
|
||||
}
|
||||
return primitives.BuilderIndex(len(b.builders))
|
||||
}
|
||||
|
||||
@@ -181,80 +181,6 @@ func TestClearBuilderPendingPayment(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestQueueBuilderPayment(t *testing.T) {
|
||||
t.Run("previous fork returns expected error", func(t *testing.T) {
|
||||
st := &BeaconState{version: version.Fulu}
|
||||
err := st.QueueBuilderPayment()
|
||||
require.ErrorContains(t, "is not supported", err)
|
||||
})
|
||||
|
||||
t.Run("appends withdrawal, clears payment, and marks dirty", func(t *testing.T) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
slot := primitives.Slot(3)
|
||||
paymentIndex := slotsPerEpoch + (slot % slotsPerEpoch)
|
||||
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
slot: slot,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
rebuildTrie: make(map[types.FieldIndex]bool),
|
||||
sharedFieldReferences: make(map[types.FieldIndex]*stateutil.Reference),
|
||||
builderPendingPayments: make([]*ethpb.BuilderPendingPayment, slotsPerEpoch*2),
|
||||
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{},
|
||||
}
|
||||
st.builderPendingPayments[paymentIndex] = ðpb.BuilderPendingPayment{
|
||||
Weight: 1,
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: bytes.Repeat([]byte{0xAB}, 20),
|
||||
Amount: 99,
|
||||
BuilderIndex: 1,
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, st.QueueBuilderPayment())
|
||||
require.Equal(t, emptyBuilderPendingPayment, st.builderPendingPayments[paymentIndex])
|
||||
require.Equal(t, true, st.dirtyFields[types.BuilderPendingPayments])
|
||||
require.Equal(t, true, st.dirtyFields[types.BuilderPendingWithdrawals])
|
||||
require.Equal(t, 1, len(st.builderPendingWithdrawals))
|
||||
require.DeepEqual(t, bytes.Repeat([]byte{0xAB}, 20), st.builderPendingWithdrawals[0].FeeRecipient)
|
||||
require.Equal(t, primitives.Gwei(99), st.builderPendingWithdrawals[0].Amount)
|
||||
|
||||
// Ensure copied withdrawal is not aliased.
|
||||
st.builderPendingPayments[paymentIndex].Withdrawal.FeeRecipient[0] = 0x01
|
||||
require.Equal(t, byte(0xAB), st.builderPendingWithdrawals[0].FeeRecipient[0])
|
||||
})
|
||||
|
||||
t.Run("zero amount does not append withdrawal", func(t *testing.T) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
slot := primitives.Slot(3)
|
||||
paymentIndex := slotsPerEpoch + (slot % slotsPerEpoch)
|
||||
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
slot: slot,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
rebuildTrie: make(map[types.FieldIndex]bool),
|
||||
sharedFieldReferences: make(map[types.FieldIndex]*stateutil.Reference),
|
||||
builderPendingPayments: make([]*ethpb.BuilderPendingPayment, slotsPerEpoch*2),
|
||||
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{},
|
||||
}
|
||||
st.builderPendingPayments[paymentIndex] = ðpb.BuilderPendingPayment{
|
||||
Weight: 1,
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: bytes.Repeat([]byte{0xAB}, 20),
|
||||
Amount: 0,
|
||||
BuilderIndex: 1,
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, st.QueueBuilderPayment())
|
||||
require.Equal(t, emptyBuilderPendingPayment, st.builderPendingPayments[paymentIndex])
|
||||
require.Equal(t, true, st.dirtyFields[types.BuilderPendingPayments])
|
||||
require.Equal(t, false, st.dirtyFields[types.BuilderPendingWithdrawals])
|
||||
require.Equal(t, 0, len(st.builderPendingWithdrawals))
|
||||
})
|
||||
}
|
||||
|
||||
func TestRotateBuilderPendingPayments(t *testing.T) {
|
||||
totalPayments := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
payments := make([]*ethpb.BuilderPendingPayment, totalPayments)
|
||||
@@ -402,134 +328,3 @@ func newGloasStateWithAvailability(t *testing.T, availability []byte) *BeaconSta
|
||||
|
||||
return st.(*BeaconState)
|
||||
}
|
||||
|
||||
func TestSetLatestBlockHash(t *testing.T) {
|
||||
var hash [32]byte
|
||||
copy(hash[:], []byte("latest-block-hash"))
|
||||
|
||||
state := &BeaconState{
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
}
|
||||
|
||||
require.NoError(t, state.SetLatestBlockHash(hash))
|
||||
require.Equal(t, true, state.dirtyFields[types.LatestBlockHash])
|
||||
require.DeepEqual(t, hash[:], state.latestBlockHash)
|
||||
}
|
||||
|
||||
func TestSetExecutionPayloadAvailability(t *testing.T) {
|
||||
state := &BeaconState{
|
||||
executionPayloadAvailability: make([]byte, params.BeaconConfig().SlotsPerHistoricalRoot/8),
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
}
|
||||
|
||||
slot := primitives.Slot(10)
|
||||
bitIndex := slot % params.BeaconConfig().SlotsPerHistoricalRoot
|
||||
byteIndex := bitIndex / 8
|
||||
bitPosition := bitIndex % 8
|
||||
|
||||
require.NoError(t, state.SetExecutionPayloadAvailability(slot, true))
|
||||
require.Equal(t, true, state.dirtyFields[types.ExecutionPayloadAvailability])
|
||||
require.Equal(t, byte(1<<bitPosition), state.executionPayloadAvailability[byteIndex]&(1<<bitPosition))
|
||||
|
||||
require.NoError(t, state.SetExecutionPayloadAvailability(slot, false))
|
||||
require.Equal(t, byte(0), state.executionPayloadAvailability[byteIndex]&(1<<bitPosition))
|
||||
}
|
||||
|
||||
func TestIncreaseBuilderBalance(t *testing.T) {
|
||||
t.Run("out of bounds returns error", func(t *testing.T) {
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
builders: []*ethpb.Builder{},
|
||||
}
|
||||
|
||||
err := st.IncreaseBuilderBalance(0, 1)
|
||||
require.ErrorContains(t, "out of bounds", err)
|
||||
require.Equal(t, false, st.dirtyFields[types.Builders])
|
||||
})
|
||||
|
||||
t.Run("nil builder returns error", func(t *testing.T) {
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
builders: []*ethpb.Builder{nil},
|
||||
}
|
||||
|
||||
err := st.IncreaseBuilderBalance(0, 1)
|
||||
require.ErrorContains(t, "is nil", err)
|
||||
require.Equal(t, false, st.dirtyFields[types.Builders])
|
||||
})
|
||||
|
||||
t.Run("increments and marks dirty", func(t *testing.T) {
|
||||
orig := ðpb.Builder{Balance: 10}
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
builders: []*ethpb.Builder{orig},
|
||||
}
|
||||
|
||||
require.NoError(t, st.IncreaseBuilderBalance(0, 5))
|
||||
require.Equal(t, primitives.Gwei(15), st.builders[0].Balance)
|
||||
require.Equal(t, true, st.dirtyFields[types.Builders])
|
||||
// Copy-on-write semantics: builder pointer replaced.
|
||||
require.NotEqual(t, orig, st.builders[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestAddBuilderFromDeposit(t *testing.T) {
|
||||
t.Run("reuses empty withdrawable slot", func(t *testing.T) {
|
||||
var pubkey [48]byte
|
||||
copy(pubkey[:], bytes.Repeat([]byte{0xAA}, 48))
|
||||
var wc [32]byte
|
||||
copy(wc[:], bytes.Repeat([]byte{0xBB}, 32))
|
||||
wc[0] = 0x42 // version byte
|
||||
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
slot: 0, // epoch 0
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
builders: []*ethpb.Builder{
|
||||
{
|
||||
WithdrawableEpoch: 0,
|
||||
Balance: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, st.AddBuilderFromDeposit(pubkey, wc, 123))
|
||||
require.Equal(t, 1, len(st.builders))
|
||||
got := st.builders[0]
|
||||
require.NotNil(t, got)
|
||||
require.DeepEqual(t, pubkey[:], got.Pubkey)
|
||||
require.DeepEqual(t, []byte{0x42}, got.Version)
|
||||
require.DeepEqual(t, wc[12:], got.ExecutionAddress)
|
||||
require.Equal(t, primitives.Gwei(123), got.Balance)
|
||||
require.Equal(t, primitives.Epoch(0), got.DepositEpoch)
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, got.WithdrawableEpoch)
|
||||
require.Equal(t, true, st.dirtyFields[types.Builders])
|
||||
})
|
||||
|
||||
t.Run("appends new builder when no reusable slot", func(t *testing.T) {
|
||||
var pubkey [48]byte
|
||||
copy(pubkey[:], bytes.Repeat([]byte{0xAA}, 48))
|
||||
var wc [32]byte
|
||||
copy(wc[:], bytes.Repeat([]byte{0xBB}, 32))
|
||||
|
||||
st := &BeaconState{
|
||||
version: version.Gloas,
|
||||
slot: 0,
|
||||
dirtyFields: make(map[types.FieldIndex]bool),
|
||||
builders: []*ethpb.Builder{
|
||||
{
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
Balance: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, st.AddBuilderFromDeposit(pubkey, wc, 5))
|
||||
require.Equal(t, 2, len(st.builders))
|
||||
require.NotNil(t, st.builders[1])
|
||||
require.Equal(t, primitives.Gwei(5), st.builders[1].Balance)
|
||||
})
|
||||
}
|
||||
|
||||
3
changelog/farazdagi_fix-hashtree-darwin-amd64.md
Normal file
3
changelog/farazdagi_fix-hashtree-darwin-amd64.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Fix Bazel build failure on macOS x86_64 (darwin_amd64) (adds missing assembly stub to hashtree patch).
|
||||
@@ -0,0 +1,6 @@
|
||||
### Added
|
||||
|
||||
- Added new proofCollector type to ssz-query
|
||||
|
||||
### Ignored
|
||||
- Added testing covering the production of Merkle proof from Phase0 beacon state and benchmarked against real Hoodi beacon state (Fulu version)
|
||||
7
changelog/james-prysm_grpc-fallback.md
Normal file
7
changelog/james-prysm_grpc-fallback.md
Normal file
@@ -0,0 +1,7 @@
|
||||
### Changed
|
||||
|
||||
- gRPC fallback now matches rest api implementation and will also check and connect to only synced nodes.
|
||||
|
||||
### Removed
|
||||
|
||||
- gRPC resolver for load balancing, the new implementation matches rest api's so we should remove the resolver so it's handled the same way for consistency.
|
||||
3
changelog/james-prysm_update-health-endpoint.md
Normal file
3
changelog/james-prysm_update-health-endpoint.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- gRPC health endpoint will now return an error on syncing or optimistic status showing that it's unavailable.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Added
|
||||
- Add process execution payload for gloas
|
||||
@@ -98,7 +98,6 @@ func compareConfigs(t *testing.T, expected, actual *BeaconChainConfig) {
|
||||
require.DeepEqual(t, expected.EjectionBalance, actual.EjectionBalance)
|
||||
require.DeepEqual(t, expected.EffectiveBalanceIncrement, actual.EffectiveBalanceIncrement)
|
||||
require.DeepEqual(t, expected.BLSWithdrawalPrefixByte, actual.BLSWithdrawalPrefixByte)
|
||||
require.DeepEqual(t, expected.BuilderWithdrawalPrefixByte, actual.BuilderWithdrawalPrefixByte)
|
||||
require.DeepEqual(t, expected.ZeroHash, actual.ZeroHash)
|
||||
require.DeepEqual(t, expected.GenesisDelay, actual.GenesisDelay)
|
||||
require.DeepEqual(t, expected.MinAttestationInclusionDelay, actual.MinAttestationInclusionDelay)
|
||||
|
||||
@@ -119,7 +119,6 @@ func assertEqualConfigs(t *testing.T, name string, fields []string, expected, ac
|
||||
// Initial values.
|
||||
assert.DeepEqual(t, expected.GenesisForkVersion, actual.GenesisForkVersion, "%s: GenesisForkVersion", name)
|
||||
assert.DeepEqual(t, expected.BLSWithdrawalPrefixByte, actual.BLSWithdrawalPrefixByte, "%s: BLSWithdrawalPrefixByte", name)
|
||||
assert.DeepEqual(t, expected.BuilderWithdrawalPrefixByte, actual.BuilderWithdrawalPrefixByte, "%s: BuilderWithdrawalPrefixByte", name)
|
||||
assert.DeepEqual(t, expected.ETH1AddressWithdrawalPrefixByte, actual.ETH1AddressWithdrawalPrefixByte, "%s: ETH1AddressWithdrawalPrefixByte", name)
|
||||
|
||||
// Time parameters.
|
||||
|
||||
@@ -31,7 +31,6 @@ func MinimalSpecConfig() *BeaconChainConfig {
|
||||
// Initial values
|
||||
minimalConfig.BLSWithdrawalPrefixByte = byte(0)
|
||||
minimalConfig.ETH1AddressWithdrawalPrefixByte = byte(1)
|
||||
minimalConfig.BuilderWithdrawalPrefixByte = byte(3)
|
||||
|
||||
// Time parameters
|
||||
minimalConfig.SecondsPerSlot = 6
|
||||
|
||||
@@ -54,7 +54,6 @@ func compareConfigs(t *testing.T, expected, actual *params.BeaconChainConfig) {
|
||||
require.DeepEqual(t, expected.EjectionBalance, actual.EjectionBalance)
|
||||
require.DeepEqual(t, expected.EffectiveBalanceIncrement, actual.EffectiveBalanceIncrement)
|
||||
require.DeepEqual(t, expected.BLSWithdrawalPrefixByte, actual.BLSWithdrawalPrefixByte)
|
||||
require.DeepEqual(t, expected.BuilderWithdrawalPrefixByte, actual.BuilderWithdrawalPrefixByte)
|
||||
require.DeepEqual(t, expected.ZeroHash, actual.ZeroHash)
|
||||
require.DeepEqual(t, expected.GenesisDelay, actual.GenesisDelay)
|
||||
require.DeepEqual(t, expected.MinAttestationInclusionDelay, actual.MinAttestationInclusionDelay)
|
||||
|
||||
@@ -4,7 +4,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"execution.go",
|
||||
"execution_payload_envelope.go",
|
||||
"factory.go",
|
||||
"get_payload.go",
|
||||
"getters.go",
|
||||
@@ -37,7 +36,6 @@ go_library(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/validator-client:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
@@ -47,7 +45,6 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"execution_payload_envelope_test.go",
|
||||
"execution_test.go",
|
||||
"factory_test.go",
|
||||
"getters_test.go",
|
||||
|
||||
@@ -1,153 +0,0 @@
|
||||
package blocks
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
field_params "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/ssz"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type signedExecutionPayloadEnvelope struct {
|
||||
s *ethpb.SignedExecutionPayloadEnvelope
|
||||
}
|
||||
|
||||
type executionPayloadEnvelope struct {
|
||||
p *ethpb.ExecutionPayloadEnvelope
|
||||
}
|
||||
|
||||
// WrappedROSignedExecutionPayloadEnvelope wraps a signed execution payload envelope proto in a read-only interface.
|
||||
func WrappedROSignedExecutionPayloadEnvelope(s *ethpb.SignedExecutionPayloadEnvelope) (interfaces.ROSignedExecutionPayloadEnvelope, error) {
|
||||
w := signedExecutionPayloadEnvelope{s: s}
|
||||
if w.IsNil() {
|
||||
return nil, consensus_types.ErrNilObjectWrapped
|
||||
}
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// WrappedROExecutionPayloadEnvelope wraps an execution payload envelope proto in a read-only interface.
|
||||
func WrappedROExecutionPayloadEnvelope(p *ethpb.ExecutionPayloadEnvelope) (interfaces.ROExecutionPayloadEnvelope, error) {
|
||||
w := &executionPayloadEnvelope{p: p}
|
||||
if w.IsNil() {
|
||||
return nil, consensus_types.ErrNilObjectWrapped
|
||||
}
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Envelope returns the execution payload envelope as a read-only interface.
|
||||
func (s signedExecutionPayloadEnvelope) Envelope() (interfaces.ROExecutionPayloadEnvelope, error) {
|
||||
return WrappedROExecutionPayloadEnvelope(s.s.Message)
|
||||
}
|
||||
|
||||
// Signature returns the BLS signature as a 96-byte array.
|
||||
func (s signedExecutionPayloadEnvelope) Signature() [field_params.BLSSignatureLength]byte {
|
||||
return [field_params.BLSSignatureLength]byte(s.s.Signature)
|
||||
}
|
||||
|
||||
// IsNil reports whether the signed envelope or its contents are invalid.
|
||||
func (s signedExecutionPayloadEnvelope) IsNil() bool {
|
||||
if s.s == nil {
|
||||
return true
|
||||
}
|
||||
if len(s.s.Signature) != field_params.BLSSignatureLength {
|
||||
return true
|
||||
}
|
||||
w := executionPayloadEnvelope{p: s.s.Message}
|
||||
return w.IsNil()
|
||||
}
|
||||
|
||||
// SigningRoot computes the signing root for the signed envelope with the provided domain.
|
||||
func (s signedExecutionPayloadEnvelope) SigningRoot(domain []byte) (root [32]byte, err error) {
|
||||
return signing.ComputeSigningRoot(s.s.Message, domain)
|
||||
}
|
||||
|
||||
// Proto returns the underlying protobuf message.
|
||||
func (s signedExecutionPayloadEnvelope) Proto() proto.Message {
|
||||
return s.s
|
||||
}
|
||||
|
||||
// IsNil reports whether the envelope or its required fields are invalid.
|
||||
func (p *executionPayloadEnvelope) IsNil() bool {
|
||||
if p.p == nil {
|
||||
return true
|
||||
}
|
||||
if p.p.Payload == nil {
|
||||
return true
|
||||
}
|
||||
if len(p.p.BeaconBlockRoot) != field_params.RootLength {
|
||||
return true
|
||||
}
|
||||
if p.p.BlobKzgCommitments == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsBlinded reports whether the envelope contains a blinded payload.
|
||||
func (p *executionPayloadEnvelope) IsBlinded() bool {
|
||||
return !p.IsNil() && p.p.Payload == nil
|
||||
}
|
||||
|
||||
// Execution returns the execution payload as a read-only interface.
|
||||
func (p *executionPayloadEnvelope) Execution() (interfaces.ExecutionData, error) {
|
||||
return WrappedExecutionPayloadDeneb(p.p.Payload)
|
||||
}
|
||||
|
||||
// ExecutionRequests returns the execution requests attached to the envelope.
|
||||
func (p *executionPayloadEnvelope) ExecutionRequests() *enginev1.ExecutionRequests {
|
||||
return p.p.ExecutionRequests
|
||||
}
|
||||
|
||||
// BuilderIndex returns the proposer/builder index for the envelope.
|
||||
func (p *executionPayloadEnvelope) BuilderIndex() primitives.BuilderIndex {
|
||||
return p.p.BuilderIndex
|
||||
}
|
||||
|
||||
// BeaconBlockRoot returns the beacon block root referenced by the envelope.
|
||||
func (p *executionPayloadEnvelope) BeaconBlockRoot() [field_params.RootLength]byte {
|
||||
return [field_params.RootLength]byte(p.p.BeaconBlockRoot)
|
||||
}
|
||||
|
||||
// BlobKzgCommitments returns a copy of the envelope's KZG commitments.
|
||||
func (p *executionPayloadEnvelope) BlobKzgCommitments() [][]byte {
|
||||
commitments := make([][]byte, len(p.p.BlobKzgCommitments))
|
||||
for i, commit := range p.p.BlobKzgCommitments {
|
||||
commitments[i] = make([]byte, len(commit))
|
||||
copy(commitments[i], commit)
|
||||
}
|
||||
return commitments
|
||||
}
|
||||
|
||||
// VersionedHashes returns versioned hashes derived from the KZG commitments.
|
||||
func (p *executionPayloadEnvelope) VersionedHashes() []common.Hash {
|
||||
commitments := p.p.BlobKzgCommitments
|
||||
versionedHashes := make([]common.Hash, len(commitments))
|
||||
for i, commitment := range commitments {
|
||||
versionedHashes[i] = primitives.ConvertKzgCommitmentToVersionedHash(commitment)
|
||||
}
|
||||
return versionedHashes
|
||||
}
|
||||
|
||||
// BlobKzgCommitmentsRoot returns the SSZ root of the envelope's KZG commitments.
|
||||
func (p *executionPayloadEnvelope) BlobKzgCommitmentsRoot() ([field_params.RootLength]byte, error) {
|
||||
if p.IsNil() || p.p.BlobKzgCommitments == nil {
|
||||
return [field_params.RootLength]byte{}, consensus_types.ErrNilObjectWrapped
|
||||
}
|
||||
|
||||
return ssz.KzgCommitmentsRoot(p.p.BlobKzgCommitments)
|
||||
}
|
||||
|
||||
// Slot returns the slot of the envelope.
|
||||
func (p *executionPayloadEnvelope) Slot() primitives.Slot {
|
||||
return p.p.Slot
|
||||
}
|
||||
|
||||
// StateRoot returns the state root carried by the envelope.
|
||||
func (p *executionPayloadEnvelope) StateRoot() [field_params.RootLength]byte {
|
||||
return [field_params.RootLength]byte(p.p.StateRoot)
|
||||
}
|
||||
@@ -1,115 +0,0 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func validExecutionPayloadEnvelope() *ethpb.ExecutionPayloadEnvelope {
|
||||
payload := &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: bytes.Repeat([]byte{0x01}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x02}, 20),
|
||||
StateRoot: bytes.Repeat([]byte{0x03}, 32),
|
||||
ReceiptsRoot: bytes.Repeat([]byte{0x04}, 32),
|
||||
LogsBloom: bytes.Repeat([]byte{0x05}, 256),
|
||||
PrevRandao: bytes.Repeat([]byte{0x06}, 32),
|
||||
BlockNumber: 1,
|
||||
GasLimit: 2,
|
||||
GasUsed: 3,
|
||||
Timestamp: 4,
|
||||
BaseFeePerGas: bytes.Repeat([]byte{0x07}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x08}, 32),
|
||||
Transactions: [][]byte{},
|
||||
Withdrawals: []*enginev1.Withdrawal{},
|
||||
BlobGasUsed: 0,
|
||||
ExcessBlobGas: 0,
|
||||
}
|
||||
|
||||
return ðpb.ExecutionPayloadEnvelope{
|
||||
Payload: payload,
|
||||
ExecutionRequests: &enginev1.ExecutionRequests{},
|
||||
BuilderIndex: 10,
|
||||
BeaconBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
|
||||
Slot: 9,
|
||||
BlobKzgCommitments: [][]byte{bytes.Repeat([]byte{0x0C}, 48)},
|
||||
StateRoot: bytes.Repeat([]byte{0xBB}, 32),
|
||||
}
|
||||
}
|
||||
|
||||
func TestWrappedROExecutionPayloadEnvelope(t *testing.T) {
|
||||
t.Run("returns error on invalid beacon root length", func(t *testing.T) {
|
||||
invalid := validExecutionPayloadEnvelope()
|
||||
invalid.BeaconBlockRoot = []byte{0x01}
|
||||
_, err := blocks.WrappedROExecutionPayloadEnvelope(invalid)
|
||||
require.Equal(t, consensus_types.ErrNilObjectWrapped, err)
|
||||
})
|
||||
|
||||
t.Run("wraps and exposes fields", func(t *testing.T) {
|
||||
env := validExecutionPayloadEnvelope()
|
||||
wrapped, err := blocks.WrappedROExecutionPayloadEnvelope(env)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, primitives.BuilderIndex(10), wrapped.BuilderIndex())
|
||||
require.Equal(t, primitives.Slot(9), wrapped.Slot())
|
||||
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0xAA}, 32)), wrapped.BeaconBlockRoot())
|
||||
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0xBB}, 32)), wrapped.StateRoot())
|
||||
|
||||
commitments := wrapped.BlobKzgCommitments()
|
||||
assert.DeepEqual(t, env.BlobKzgCommitments, commitments)
|
||||
|
||||
versioned := wrapped.VersionedHashes()
|
||||
require.Equal(t, 1, len(versioned))
|
||||
|
||||
reqs := wrapped.ExecutionRequests()
|
||||
require.NotNil(t, reqs)
|
||||
|
||||
exec, err := wrapped.Execution()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, env.Payload.ParentHash, exec.ParentHash())
|
||||
})
|
||||
}
|
||||
|
||||
func TestWrappedROSignedExecutionPayloadEnvelope(t *testing.T) {
|
||||
t.Run("returns error for invalid signature length", func(t *testing.T) {
|
||||
signed := ðpb.SignedExecutionPayloadEnvelope{
|
||||
Message: validExecutionPayloadEnvelope(),
|
||||
Signature: bytes.Repeat([]byte{0xAA}, 95),
|
||||
}
|
||||
_, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signed)
|
||||
require.Equal(t, consensus_types.ErrNilObjectWrapped, err)
|
||||
})
|
||||
|
||||
t.Run("wraps and provides envelope/signing data", func(t *testing.T) {
|
||||
sig := bytes.Repeat([]byte{0xAB}, 96)
|
||||
signed := ðpb.SignedExecutionPayloadEnvelope{
|
||||
Message: validExecutionPayloadEnvelope(),
|
||||
Signature: sig,
|
||||
}
|
||||
|
||||
wrapped, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signed)
|
||||
require.NoError(t, err)
|
||||
|
||||
gotSig := wrapped.Signature()
|
||||
assert.DeepEqual(t, [96]byte(sig), gotSig)
|
||||
|
||||
env, err := wrapped.Envelope()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0xAA}, 32)), env.BeaconBlockRoot())
|
||||
|
||||
domain := bytes.Repeat([]byte{0xCC}, 32)
|
||||
wantRoot, err := signing.ComputeSigningRoot(signed.Message, domain)
|
||||
require.NoError(t, err)
|
||||
gotRoot, err := wrapped.SigningRoot(domain)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, wantRoot, gotRoot)
|
||||
})
|
||||
}
|
||||
@@ -111,7 +111,7 @@ func (h executionPayloadBidGloas) GasLimit() uint64 {
|
||||
return h.payload.GasLimit
|
||||
}
|
||||
|
||||
// BuilderIndex returns the builder index of the builder who created this bid.
|
||||
// BuilderIndex returns the validator index of the builder who created this bid.
|
||||
func (h executionPayloadBidGloas) BuilderIndex() primitives.BuilderIndex {
|
||||
return h.payload.BuilderIndex
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ go_library(
|
||||
srcs = [
|
||||
"beacon_block.go",
|
||||
"error.go",
|
||||
"execution_payload_envelope.go",
|
||||
"light_client.go",
|
||||
"signed_execution_payload_bid.go",
|
||||
"utils.go",
|
||||
@@ -20,7 +19,6 @@ go_library(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/validator-client:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
package interfaces
|
||||
|
||||
import (
|
||||
field_params "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type ROSignedExecutionPayloadEnvelope interface {
|
||||
Envelope() (ROExecutionPayloadEnvelope, error)
|
||||
Signature() [field_params.BLSSignatureLength]byte
|
||||
SigningRoot([]byte) ([32]byte, error)
|
||||
IsNil() bool
|
||||
Proto() proto.Message
|
||||
}
|
||||
|
||||
type ROExecutionPayloadEnvelope interface {
|
||||
Execution() (ExecutionData, error)
|
||||
ExecutionRequests() *enginev1.ExecutionRequests
|
||||
BuilderIndex() primitives.BuilderIndex
|
||||
BeaconBlockRoot() [field_params.RootLength]byte
|
||||
BlobKzgCommitments() [][]byte
|
||||
BlobKzgCommitmentsRoot() ([field_params.RootLength]byte, error)
|
||||
VersionedHashes() []common.Hash
|
||||
Slot() primitives.Slot
|
||||
StateRoot() [field_params.RootLength]byte
|
||||
IsBlinded() bool
|
||||
IsNil() bool
|
||||
}
|
||||
@@ -163,3 +163,18 @@ func Uint256ToSSZBytes(num string) ([]byte, error) {
|
||||
}
|
||||
return PadTo(ReverseByteOrder(uint256.Bytes()), 32), nil
|
||||
}
|
||||
|
||||
// PutLittleEndian writes an unsigned integer value in little-endian format.
|
||||
// Supports sizes 1, 2, 4, or 8 bytes for uint8/16/32/64 respectively.
|
||||
func PutLittleEndian(dst []byte, val uint64, size int) {
|
||||
switch size {
|
||||
case 1:
|
||||
dst[0] = byte(val)
|
||||
case 2:
|
||||
binary.LittleEndian.PutUint16(dst, uint16(val))
|
||||
case 4:
|
||||
binary.LittleEndian.PutUint32(dst, uint32(val))
|
||||
case 8:
|
||||
binary.LittleEndian.PutUint64(dst, val)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"encoding/binary"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/hash/htr"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
@@ -142,24 +141,3 @@ func withdrawalRoot(w *enginev1.Withdrawal) ([32]byte, error) {
|
||||
}
|
||||
return w.HashTreeRoot()
|
||||
}
|
||||
|
||||
// KzgCommitmentsRoot computes the HTR for a list of KZG commitments
|
||||
func KzgCommitmentsRoot(commitments [][]byte) ([32]byte, error) {
|
||||
roots := make([][32]byte, len(commitments))
|
||||
for i, commitment := range commitments {
|
||||
chunks, err := PackByChunk([][]byte{commitment})
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
roots[i] = htr.VectorizedSha256(chunks)[0]
|
||||
}
|
||||
|
||||
commitmentsRoot, err := BitwiseMerkleize(roots, uint64(len(roots)), fieldparams.MaxBlobCommitmentsPerBlock)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not compute merkleization")
|
||||
}
|
||||
|
||||
length := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(length[:8], uint64(len(roots)))
|
||||
return MixInLength(commitmentsRoot, length), nil
|
||||
}
|
||||
|
||||
@@ -9,7 +9,9 @@ go_library(
|
||||
"container.go",
|
||||
"generalized_index.go",
|
||||
"list.go",
|
||||
"merkle_proof.go",
|
||||
"path.go",
|
||||
"proof_collector.go",
|
||||
"query.go",
|
||||
"ssz_info.go",
|
||||
"ssz_object.go",
|
||||
@@ -20,7 +22,12 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/encoding/ssz/query",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/hash/htr:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -29,15 +36,24 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"generalized_index_test.go",
|
||||
"merkle_proof_test.go",
|
||||
"path_test.go",
|
||||
"proof_collector_test.go",
|
||||
"query_test.go",
|
||||
"tag_parser_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//encoding/ssz/query/testutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/ssz_query/testing:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
34
encoding/ssz/query/merkle_proof.go
Normal file
34
encoding/ssz/query/merkle_proof.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
fastssz "github.com/prysmaticlabs/fastssz"
|
||||
)
|
||||
|
||||
// Prove is the entrypoint to generate an SSZ Merkle proof for the given generalized index.
|
||||
// Parameters:
|
||||
// - gindex: the generalized index of the node to prove inclusion for.
|
||||
// Returns:
|
||||
// - fastssz.Proof: the Merkle proof containing the leaf, index, and sibling hashes.
|
||||
// - error: any error encountered during proof generation.
|
||||
func (info *SszInfo) Prove(gindex uint64) (*fastssz.Proof, error) {
|
||||
if info == nil {
|
||||
return nil, fmt.Errorf("nil SszInfo")
|
||||
}
|
||||
|
||||
collector := newProofCollector()
|
||||
collector.addTarget(gindex)
|
||||
|
||||
// info.source is guaranteed to be valid and dereferenced by AnalyzeObject
|
||||
v := reflect.ValueOf(info.source).Elem()
|
||||
|
||||
// Start the merkleization and proof collection process.
|
||||
// In SSZ generalized indices, the root is always at index 1.
|
||||
if _, err := collector.merkleize(info, v, 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return collector.toProof()
|
||||
}
|
||||
163
encoding/ssz/query/merkle_proof_test.go
Normal file
163
encoding/ssz/query/merkle_proof_test.go
Normal file
@@ -0,0 +1,163 @@
|
||||
package query_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/ssz/query"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
)
|
||||
|
||||
func TestProve_FixedTestContainer(t *testing.T) {
|
||||
obj := createFixedTestContainer()
|
||||
|
||||
tests := []string{
|
||||
".field_uint32",
|
||||
".nested.value2",
|
||||
".vector_field[3]",
|
||||
".bitvector64_field",
|
||||
".trailing_field",
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc, func(t *testing.T) {
|
||||
proveAndVerify(t, obj, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProve_VariableTestContainer(t *testing.T) {
|
||||
obj := createVariableTestContainer()
|
||||
|
||||
tests := []string{
|
||||
".leading_field",
|
||||
".field_list_uint64[2]",
|
||||
"len(field_list_uint64)",
|
||||
".nested.nested_list_field[1]",
|
||||
".variable_container_list[0].inner_1.field_list_uint64[1]",
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc, func(t *testing.T) {
|
||||
proveAndVerify(t, obj, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProve_BeaconBlock(t *testing.T) {
|
||||
randaoReveal := make([]byte, 96)
|
||||
for i := range randaoReveal {
|
||||
randaoReveal[i] = 0x42
|
||||
}
|
||||
root32 := make([]byte, 32)
|
||||
for i := range root32 {
|
||||
root32[i] = 0x24
|
||||
}
|
||||
sig := make([]byte, 96)
|
||||
for i := range sig {
|
||||
sig[i] = 0x99
|
||||
}
|
||||
|
||||
att := ð.Attestation{
|
||||
AggregationBits: bitfield.Bitlist{0x01},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: root32,
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root32,
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: root32,
|
||||
},
|
||||
},
|
||||
Signature: sig,
|
||||
}
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 123
|
||||
b.Block.Body.RandaoReveal = randaoReveal
|
||||
b.Block.Body.Attestations = []*eth.Attestation{att}
|
||||
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
protoBlock, err := sb.Block().Proto()
|
||||
require.NoError(t, err)
|
||||
|
||||
obj, ok := protoBlock.(query.SSZObject)
|
||||
require.Equal(t, true, ok, "block proto does not implement query.SSZObject")
|
||||
|
||||
tests := []string{
|
||||
".slot",
|
||||
".body.randao_reveal",
|
||||
".body.attestations[0].data.slot",
|
||||
"len(body.attestations)",
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc, func(t *testing.T) {
|
||||
proveAndVerify(t, obj, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProve_BeaconState(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisState(t, 16)
|
||||
require.NoError(t, st.SetSlot(primitives.Slot(42)))
|
||||
|
||||
sszObj, ok := st.ToProtoUnsafe().(query.SSZObject)
|
||||
require.Equal(t, true, ok, "state proto does not implement query.SSZObject")
|
||||
|
||||
tests := []string{
|
||||
".slot",
|
||||
".latest_block_header",
|
||||
".validators[0].effective_balance",
|
||||
"len(validators)",
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc, func(t *testing.T) {
|
||||
proveAndVerify(t, sszObj, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// proveAndVerify helper to analyze an object, generate a merkle proof for the given path,
|
||||
// and verify the proof against the object's root.
|
||||
func proveAndVerify(t *testing.T, obj query.SSZObject, pathStr string) {
|
||||
t.Helper()
|
||||
|
||||
info, err := query.AnalyzeObject(obj)
|
||||
require.NoError(t, err)
|
||||
|
||||
path, err := query.ParsePath(pathStr)
|
||||
require.NoError(t, err)
|
||||
|
||||
gi, err := query.GetGeneralizedIndexFromPath(info, path)
|
||||
require.NoError(t, err)
|
||||
|
||||
proof, err := info.Prove(gi)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int(gi), proof.Index)
|
||||
|
||||
root, err := obj.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
ok, err := ssz.VerifyProof(root[:], proof)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok, "merkle proof verification failed")
|
||||
|
||||
require.Equal(t, 32, len(proof.Leaf))
|
||||
for i, h := range proof.Hashes {
|
||||
require.Equal(t, 32, len(h), "proof hash %d is not 32 bytes", i)
|
||||
}
|
||||
|
||||
}
|
||||
672
encoding/ssz/query/proof_collector.go
Normal file
672
encoding/ssz/query/proof_collector.go
Normal file
@@ -0,0 +1,672 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/bits"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/container/trie"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/hash/htr"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ssz "github.com/OffchainLabs/prysm/v7/encoding/ssz"
|
||||
"github.com/OffchainLabs/prysm/v7/math"
|
||||
fastssz "github.com/prysmaticlabs/fastssz"
|
||||
)
|
||||
|
||||
// proofCollector collects sibling hashes and leaves needed for Merkle proofs.
|
||||
//
|
||||
// Multiproof-ready design:
|
||||
// - requiredSiblings/requiredLeaves store which gindices we want to collect (registered before merkleization).
|
||||
// - siblings/leaves store the actual collected hashes.
|
||||
//
|
||||
// Concurrency:
|
||||
// - required* maps are read-only during merkleization.
|
||||
// - siblings/leaves writes are protected by mutex.
|
||||
type proofCollector struct {
|
||||
sync.Mutex
|
||||
|
||||
// Required gindices (registered before merkleization)
|
||||
requiredSiblings map[uint64]struct{}
|
||||
requiredLeaves map[uint64]struct{}
|
||||
|
||||
// Collected hashes
|
||||
siblings map[uint64][32]byte
|
||||
leaves map[uint64][32]byte
|
||||
}
|
||||
|
||||
func newProofCollector() *proofCollector {
|
||||
return &proofCollector{
|
||||
requiredSiblings: make(map[uint64]struct{}),
|
||||
requiredLeaves: make(map[uint64]struct{}),
|
||||
siblings: make(map[uint64][32]byte),
|
||||
leaves: make(map[uint64][32]byte),
|
||||
}
|
||||
}
|
||||
|
||||
func (pc *proofCollector) reset() {
|
||||
pc.Lock()
|
||||
defer pc.Unlock()
|
||||
|
||||
pc.requiredSiblings = make(map[uint64]struct{})
|
||||
pc.requiredLeaves = make(map[uint64]struct{})
|
||||
pc.siblings = make(map[uint64][32]byte)
|
||||
pc.leaves = make(map[uint64][32]byte)
|
||||
}
|
||||
|
||||
// addTarget register the target leaf and its required sibling nodes for proof construction.
|
||||
// Registration should happen before merkleization begins.
|
||||
func (pc *proofCollector) addTarget(gindex uint64) {
|
||||
pc.Lock()
|
||||
defer pc.Unlock()
|
||||
|
||||
pc.requiredLeaves[gindex] = struct{}{}
|
||||
|
||||
// Walk from the target leaf up to (but not including) the root (gindex=1).
|
||||
// At each step, register the sibling node required to prove inclusion.
|
||||
nodeGindex := gindex
|
||||
for nodeGindex > 1 {
|
||||
siblingGindex := nodeGindex ^ 1 // flip the last bit: left<->right sibling
|
||||
pc.requiredSiblings[siblingGindex] = struct{}{}
|
||||
|
||||
// Move to parent
|
||||
nodeGindex /= 2
|
||||
}
|
||||
}
|
||||
|
||||
// toProof converts the collected siblings and leaves into a fastssz.Proof structure.
|
||||
// Current behavior expects a single target leaf (single proof).
|
||||
func (pc *proofCollector) toProof() (*fastssz.Proof, error) {
|
||||
pc.Lock()
|
||||
defer pc.Unlock()
|
||||
|
||||
proof := &fastssz.Proof{}
|
||||
if len(pc.leaves) == 0 {
|
||||
return nil, errors.New("no leaves collected: add target leaves before merkleization")
|
||||
}
|
||||
|
||||
leafGindices := make([]uint64, 0, len(pc.leaves))
|
||||
for g := range pc.leaves {
|
||||
leafGindices = append(leafGindices, g)
|
||||
}
|
||||
slices.Sort(leafGindices)
|
||||
|
||||
// single proof resides in leafGindices[0]
|
||||
targetGindex := leafGindices[0]
|
||||
proofIndex, err := math.Int(targetGindex)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("gindex %d overflows int: %w", targetGindex, err)
|
||||
}
|
||||
proof.Index = proofIndex
|
||||
|
||||
// store the leaf
|
||||
leaf := pc.leaves[targetGindex]
|
||||
leafBuf := make([]byte, 32)
|
||||
copy(leafBuf, leaf[:])
|
||||
proof.Leaf = leafBuf
|
||||
|
||||
// Walk from target up to root, collecting siblings.
|
||||
steps := bits.Len64(targetGindex) - 1
|
||||
proof.Hashes = make([][]byte, 0, steps)
|
||||
|
||||
for targetGindex > 1 {
|
||||
sib := targetGindex ^ 1
|
||||
h, ok := pc.siblings[sib]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("missing sibling hash for gindex %d", sib)
|
||||
}
|
||||
proof.Hashes = append(proof.Hashes, h[:])
|
||||
targetGindex /= 2
|
||||
}
|
||||
|
||||
return proof, nil
|
||||
}
|
||||
|
||||
// collectLeaf checks if the given gindex is a required leaf for the proof,
|
||||
// and if so, stores the provided leaf hash in the collector.
|
||||
func (pc *proofCollector) collectLeaf(gindex uint64, leaf [32]byte) {
|
||||
if _, ok := pc.requiredLeaves[gindex]; !ok {
|
||||
return
|
||||
}
|
||||
pc.Lock()
|
||||
pc.leaves[gindex] = leaf
|
||||
pc.Unlock()
|
||||
}
|
||||
|
||||
// collectSibling stores the hash for a sibling node identified by gindex.
|
||||
// It only stores the hash if gindex was pre-registered via addTarget (present in requiredSiblings).
|
||||
// Writes to the collected siblings map are protected by the collector mutex.
|
||||
func (pc *proofCollector) collectSibling(gindex uint64, hash [32]byte) {
|
||||
if _, ok := pc.requiredSiblings[gindex]; !ok {
|
||||
return
|
||||
}
|
||||
pc.Lock()
|
||||
pc.siblings[gindex] = hash
|
||||
pc.Unlock()
|
||||
}
|
||||
|
||||
// Merkleizers and proof collection methods
|
||||
|
||||
// merkleize recursively traverses an SSZ info and computes the Merkle root of the subtree.
|
||||
//
|
||||
// Proof collection:
|
||||
// - During traversal it calls collectLeaf/collectSibling with the SSZ generalized indices (gindices)
|
||||
// of visited nodes.
|
||||
// - The collector only stores hashes for gindices that were pre-registered via addTarget
|
||||
// (requiredLeaves/requiredSiblings). This makes the traversal multiproof-ready: you can register
|
||||
// multiple targets before calling merkleize.
|
||||
//
|
||||
// SSZ types handled: basic types, containers, lists, vectors, bitlists, and bitvectors.
|
||||
//
|
||||
// Parameters:
|
||||
// - info: SSZ type metadata for the current value.
|
||||
// - v: reflect.Value of the current value.
|
||||
// - currentGindex: generalized index of the current subtree root.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the current subtree.
|
||||
// - error: any error encountered during traversal/merkleization.
|
||||
func (pc *proofCollector) merkleize(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||
if info.sszType.isBasic() {
|
||||
return pc.merkleizeBasicType(info.sszType, v, currentGindex)
|
||||
}
|
||||
switch info.sszType {
|
||||
case Container:
|
||||
return pc.merkleizeContainer(info, v, currentGindex)
|
||||
case List:
|
||||
return pc.merkleizeList(info, v, currentGindex)
|
||||
case Vector:
|
||||
return pc.merkleizeVector(info, v, currentGindex)
|
||||
case Bitlist:
|
||||
return pc.merkleizeBitlist(info, v, currentGindex)
|
||||
case Bitvector:
|
||||
return pc.merkleizeBitvector(info, v, currentGindex)
|
||||
default:
|
||||
return [32]byte{}, fmt.Errorf("unsupported SSZ type: %v", info.sszType)
|
||||
}
|
||||
}
|
||||
|
||||
// merkleizeBasicType serializes a basic SSZ value into a 32-byte leaf chunk (little-endian, zero-padded).
|
||||
//
|
||||
// Proof collection:
|
||||
// - It calls collectLeaf(currentGindex, leaf) and stores the leaf if currentGindex was pre-registered via addTarget.
|
||||
//
|
||||
// Parameters:
|
||||
// - t: the SSZType (basic).
|
||||
// - v: the reflect.Value of the basic value.
|
||||
// - currentGindex: the generalized index (gindex) of this leaf.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: the 32-byte SSZ leaf chunk.
|
||||
// - error: if the SSZType is not a supported basic type.
|
||||
func (pc *proofCollector) merkleizeBasicType(t SSZType, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||
var leaf [32]byte
|
||||
|
||||
// Serialize the value into a 32-byte chunk (little-endian, zero-padded)
|
||||
switch t {
|
||||
case Uint8:
|
||||
leaf[0] = uint8(v.Uint())
|
||||
case Uint16:
|
||||
binary.LittleEndian.PutUint16(leaf[:2], uint16(v.Uint()))
|
||||
case Uint32:
|
||||
binary.LittleEndian.PutUint32(leaf[:4], uint32(v.Uint()))
|
||||
case Uint64:
|
||||
binary.LittleEndian.PutUint64(leaf[:8], v.Uint())
|
||||
case Boolean:
|
||||
if v.Bool() {
|
||||
leaf[0] = 1
|
||||
}
|
||||
default:
|
||||
return [32]byte{}, fmt.Errorf("unexpected basic type: %v", t)
|
||||
}
|
||||
|
||||
pc.collectLeaf(currentGindex, leaf)
|
||||
|
||||
return leaf, nil
|
||||
}
|
||||
|
||||
// merkleizeContainer computes the Merkle root of an SSZ container by:
|
||||
// 1. Merkleizing each field into a 32-byte subtree root
|
||||
// 2. Merkleizing the field roots into the container root (padding to the next power-of-2)
|
||||
//
|
||||
// Generalized indices (gindices): depth = ssz.Depth(uint64(N)) and field i has gindex = (currentGindex << depth) + uint64(i).
|
||||
// Proof collection: merkleize() computes each field root, merkleizeVectorAndCollect collects required siblings, and collectLeaf stores the container root if registered.
|
||||
//
|
||||
// Parameters:
|
||||
// - info: SSZ type metadata for the container.
|
||||
// - v: reflect.Value of the container value.
|
||||
// - currentGindex: generalized index (gindex) of the container root.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the container.
|
||||
// - error: any error encountered while merkleizing fields.
|
||||
func (pc *proofCollector) merkleizeContainer(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||
// If the container root itself is the target, compute directly and return early.
|
||||
// This avoids full subtree merkleization when we only need the root.
|
||||
if _, ok := pc.requiredLeaves[currentGindex]; ok {
|
||||
root, err := info.HashTreeRoot()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
pc.collectLeaf(currentGindex, root)
|
||||
return root, nil
|
||||
}
|
||||
|
||||
ci, err := info.ContainerInfo()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
v = dereferencePointer(v)
|
||||
|
||||
// Calculate depth: how many levels from container root to field leaves
|
||||
numFields := len(ci.order)
|
||||
depth := ssz.Depth(uint64(numFields))
|
||||
|
||||
// Step 1: Compute HTR for each subtree (field)
|
||||
fieldRoots := make([][32]byte, numFields)
|
||||
|
||||
for i, name := range ci.order {
|
||||
fieldInfo := ci.fields[name]
|
||||
fieldVal := v.FieldByName(fieldInfo.goFieldName)
|
||||
|
||||
// Field i's gindex: shift currentGindex left by depth, then OR with field index
|
||||
fieldGindex := currentGindex<<depth + uint64(i)
|
||||
|
||||
htr, err := pc.merkleize(fieldInfo.sszInfo, fieldVal, fieldGindex)
|
||||
if err != nil {
|
||||
return [32]byte{}, fmt.Errorf("field %s: %w", name, err)
|
||||
}
|
||||
fieldRoots[i] = htr
|
||||
}
|
||||
|
||||
// Step 2: Merkleize the field hashes into the container root,
|
||||
// collecting sibling hashes if target is within this subtree
|
||||
root := pc.merkleizeVectorAndCollect(fieldRoots, currentGindex, uint64(depth))
|
||||
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// merkleizeVectorBody computes the Merkle root of the "data" subtree for vector-like SSZ types
|
||||
// (vectors and the data-part of lists/bitlists).
|
||||
//
|
||||
// Generalized indices (gindices): depth = ssz.Depth(limit); leafBase = subtreeRootGindex << depth; element/chunk i gindex = leafBase + uint64(i).
|
||||
// Proof collection: merkleize() is called for composite elements; merkleizeVectorAndCollect collects required siblings at this layer.
|
||||
// Padding: merkleizeVectorAndCollect uses trie.ZeroHashes as needed.
|
||||
//
|
||||
// Parameters:
|
||||
// - elemInfo: SSZ type metadata for the element.
|
||||
// - v: reflect.Value of the vector/list data.
|
||||
// - length: number of actual elements present.
|
||||
// - limit: virtual leaf capacity used for padding/Depth (fixed length for vectors, limit for lists).
|
||||
// - subtreeRootGindex: gindex of the data subtree root.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the data subtree.
|
||||
// - error: any error encountered while merkleizing composite elements.
|
||||
func (pc *proofCollector) merkleizeVectorBody(elemInfo *SszInfo, v reflect.Value, length int, limit uint64, subtreeRootGindex uint64) ([32]byte, error) {
|
||||
depth := uint64(ssz.Depth(limit))
|
||||
|
||||
var chunks [][32]byte
|
||||
if elemInfo.sszType.isBasic() {
|
||||
// Serialize basic elements and pack into 32-byte chunks using ssz.PackByChunk.
|
||||
elemSize, err := math.Int(itemLength(elemInfo))
|
||||
if err != nil {
|
||||
return [32]byte{}, fmt.Errorf("element size %d overflows int: %w", itemLength(elemInfo), err)
|
||||
}
|
||||
serialized := make([][]byte, length)
|
||||
// Single contiguous allocation for all element data
|
||||
allData := make([]byte, length*elemSize)
|
||||
for i := range length {
|
||||
buf := allData[i*elemSize : (i+1)*elemSize]
|
||||
elem := v.Index(i)
|
||||
if elemInfo.sszType == Boolean && elem.Bool() {
|
||||
buf[0] = 1
|
||||
} else {
|
||||
bytesutil.PutLittleEndian(buf, elem.Uint(), elemSize)
|
||||
}
|
||||
serialized[i] = buf
|
||||
}
|
||||
chunks, err = ssz.PackByChunk(serialized)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
} else {
|
||||
// Composite elements: compute each element root (no padding here; merkleizeVectorAndCollect pads).
|
||||
chunks = make([][32]byte, length)
|
||||
|
||||
// Fall back to per-element merkleization with proper gindices for proof collection.
|
||||
// Parallel execution
|
||||
workerCount := min(runtime.GOMAXPROCS(0), length)
|
||||
|
||||
jobs := make(chan int, workerCount*16)
|
||||
errCh := make(chan error, 1) // only need the first error
|
||||
stopCh := make(chan struct{})
|
||||
var stopOnce sync.Once
|
||||
var wg sync.WaitGroup
|
||||
|
||||
worker := func() {
|
||||
defer wg.Done()
|
||||
for idx := range jobs {
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
elemGindex := subtreeRootGindex<<depth + uint64(idx)
|
||||
htr, err := pc.merkleize(elemInfo, v.Index(idx), elemGindex)
|
||||
if err != nil {
|
||||
stopOnce.Do(func() { close(stopCh) })
|
||||
select {
|
||||
case errCh <- fmt.Errorf("index %d: %w", idx, err):
|
||||
default:
|
||||
}
|
||||
return
|
||||
}
|
||||
chunks[idx] = htr
|
||||
}
|
||||
}
|
||||
|
||||
wg.Add(workerCount)
|
||||
for range workerCount {
|
||||
go worker()
|
||||
}
|
||||
|
||||
// Enqueue jobs; stop early if any worker reports an error.
|
||||
enqueue:
|
||||
for i := range length {
|
||||
select {
|
||||
case <-stopCh:
|
||||
break enqueue
|
||||
case jobs <- i:
|
||||
}
|
||||
}
|
||||
close(jobs)
|
||||
|
||||
wg.Wait()
|
||||
|
||||
select {
|
||||
case err := <-errCh:
|
||||
return [32]byte{}, err
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
root := pc.merkleizeVectorAndCollect(chunks, subtreeRootGindex, depth)
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// merkleizeVector computes the Merkle root of an SSZ vector (fixed-length).
|
||||
//
|
||||
// Generalized indices (gindices): currentGindex is the gindex of the vector root; element/chunk gindices are derived
|
||||
// inside merkleizeVectorBody using leafBase = currentGindex << ssz.Depth(leaves).
|
||||
//
|
||||
// Proof collection: merkleizeVectorBody performs element/chunk merkleization and collects required siblings at the
|
||||
// vector layer; collectLeaf stores the vector root if currentGindex was registered via addTarget.
|
||||
//
|
||||
// Parameters:
|
||||
// - info: SSZ type metadata for the vector.
|
||||
// - v: reflect.Value of the vector value.
|
||||
// - currentGindex: generalized index (gindex) of the vector root.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the vector.
|
||||
// - error: any error encountered while merkleizing composite elements.
|
||||
func (pc *proofCollector) merkleizeVector(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||
vi, err := info.VectorInfo()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
length, err := math.Int(vi.Length())
|
||||
if err != nil {
|
||||
return [32]byte{}, fmt.Errorf("vector length %d overflows int: %w", vi.Length(), err)
|
||||
}
|
||||
elemInfo := vi.element
|
||||
|
||||
// Determine the virtual leaf capacity for the vector.
|
||||
leaves, err := getChunkCount(info)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
root, err := pc.merkleizeVectorBody(elemInfo, v, length, leaves, currentGindex)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
// If the vector root itself is the target
|
||||
pc.collectLeaf(currentGindex, root)
|
||||
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// merkleizeList computes the Merkle root of an SSZ list by merkleizing its data subtree and mixing in the length.
|
||||
//
|
||||
// Generalized indices (gindices): dataRoot is the left child of the list root (dataRootGindex = currentGindex*2); the length mixin is the right child (currentGindex*2+1).
|
||||
// Proof collection: merkleizeVectorBody computes the data root (collecting required siblings in the data subtree), and mixinLengthAndCollect collects required siblings at the length-mixin level; collectLeaf stores the list root if registered.
|
||||
//
|
||||
// Parameters:
|
||||
// - info: SSZ type metadata for the list.
|
||||
// - v: reflect.Value of the list value.
|
||||
// - currentGindex: generalized index (gindex) of the list root.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the list.
|
||||
// - error: any error encountered while merkleizing the data subtree.
|
||||
func (pc *proofCollector) merkleizeList(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||
li, err := info.ListInfo()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
length := v.Len()
|
||||
elemInfo := li.element
|
||||
|
||||
chunks := make([][32]byte, 2)
|
||||
// Compute the length hash (little-endian uint256)
|
||||
binary.LittleEndian.PutUint64(chunks[1][:8], uint64(length))
|
||||
|
||||
// Data subtree root is the left child of the list root.
|
||||
dataRootGindex := currentGindex * 2
|
||||
|
||||
// Compute virtual leaf capacity for the data subtree.
|
||||
leaves, err := getChunkCount(info)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
chunks[0], err = pc.merkleizeVectorBody(elemInfo, v, length, leaves, dataRootGindex)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
// Handle the length mixin level (and proof bookkeeping at this level).
|
||||
// Compute the final list root: hash(dataRoot || lengthHash)
|
||||
root := pc.mixinLengthAndCollect(currentGindex, chunks)
|
||||
|
||||
// If the list root itself is the target
|
||||
pc.collectLeaf(currentGindex, root)
|
||||
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// merkleizeBitvectorBody computes the Merkle root of a bitvector-like byte sequence by packing it into 32-byte chunks
|
||||
// and merkleizing those chunks as a fixed-capacity vector (padding with trie.ZeroHashes as needed).
|
||||
//
|
||||
// Generalized indices (gindices): depth = ssz.Depth(chunkLimit); leafBase = subtreeRootGindex << depth; chunk i uses gindex = leafBase + uint64(i).
|
||||
// Proof collection: merkleizeVectorAndCollect collects required sibling hashes at the chunk-merkleization layer.
|
||||
//
|
||||
// Parameters:
|
||||
// - data: raw byte sequence representing the bitvector payload.
|
||||
// - chunkLimit: fixed/limit number of 32-byte chunks (used for padding/Depth).
|
||||
// - subtreeRootGindex: gindex of the bitvector data subtree root.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the bitvector data subtree.
|
||||
// - error: any error encountered while packing data into chunks.
|
||||
func (pc *proofCollector) merkleizeBitvectorBody(data []byte, chunkLimit uint64, subtreeRootGindex uint64) ([32]byte, error) {
|
||||
depth := ssz.Depth(chunkLimit)
|
||||
chunks, err := ssz.PackByChunk([][]byte{data})
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
root := pc.merkleizeVectorAndCollect(chunks, subtreeRootGindex, uint64(depth))
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// merkleizeBitvector computes the Merkle root of a fixed-length SSZ bitvector and collects proof nodes for targets.
|
||||
//
|
||||
// Parameters:
|
||||
// - info: SSZ type metadata for the bitvector.
|
||||
// - v: reflect.Value of the bitvector value.
|
||||
// - currentGindex: generalized index (gindex) of the bitvector root.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the bitvector.
|
||||
// - error: any error encountered during packing or merkleization.
|
||||
func (pc *proofCollector) merkleizeBitvector(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||
bitvectorBytes := v.Bytes()
|
||||
if len(bitvectorBytes) == 0 {
|
||||
return [32]byte{}, fmt.Errorf("bitvector field is uninitialized (nil or empty slice)")
|
||||
}
|
||||
|
||||
// Compute virtual leaf capacity for the bitvector.
|
||||
numChunks, err := getChunkCount(info)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
root, err := pc.merkleizeBitvectorBody(bitvectorBytes, numChunks, currentGindex)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
pc.collectLeaf(currentGindex, root)
|
||||
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// merkleizeBitlist computes the Merkle root of an SSZ bitlist by merkleizing its data chunks and mixing in the bit length.
|
||||
//
|
||||
// Generalized indices (gindices): dataRoot is the left child (dataRootGindex = currentGindex*2) and the length mixin is the right child (currentGindex*2+1).
|
||||
// Proof collection: merkleizeBitvectorBody computes the data root (collecting required siblings under dataRootGindex), and mixinLengthAndCollect collects required siblings at the length-mixin level; collectLeaf stores the bitlist root if registered.
|
||||
//
|
||||
// Parameters:
|
||||
// - info: SSZ type metadata for the bitlist.
|
||||
// - v: reflect.Value of the bitlist value.
|
||||
// - currentGindex: generalized index (gindex) of the bitlist root.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the bitlist.
|
||||
// - error: any error encountered while merkleizing the data subtree.
|
||||
func (pc *proofCollector) merkleizeBitlist(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||
bi, err := info.BitlistInfo()
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
bitlistBytes := v.Bytes()
|
||||
|
||||
// Use go-bitfield to get bytes with termination bit cleared
|
||||
bl := bitfield.Bitlist(bitlistBytes)
|
||||
data := bl.BytesNoTrim()
|
||||
|
||||
// Get the bit length from bitlistInfo
|
||||
bitLength := bi.Length()
|
||||
|
||||
// Get the chunk limit from getChunkCount
|
||||
limitChunks, err := getChunkCount(info)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
chunks := make([][32]byte, 2)
|
||||
// Compute the length hash (little-endian uint256)
|
||||
binary.LittleEndian.PutUint64(chunks[1][:8], uint64(bitLength))
|
||||
|
||||
dataRootGindex := currentGindex * 2
|
||||
chunks[0], err = pc.merkleizeBitvectorBody(data, limitChunks, dataRootGindex)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
// Handle the length mixin level (and proof bookkeeping at this level).
|
||||
root := pc.mixinLengthAndCollect(currentGindex, chunks)
|
||||
|
||||
pc.collectLeaf(currentGindex, root)
|
||||
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// merkleizeVectorAndCollect merkleizes a slice of 32-byte leaf nodes into a subtree root, padding to a virtual size of 2^depth.
|
||||
//
|
||||
// Generalized indices (gindices): at layer i (0-based), nodes have gindices levelBase = subtreeGeneralizedIndex << (depth-i) and node gindex = levelBase + idx.
|
||||
// Proof collection: for each layer it calls collectSibling(nodeGindex, nodeHash) and stores only those gindices registered via addTarget.
|
||||
//
|
||||
// Parameters:
|
||||
// - elements: leaf-level hashes (may be shorter than 2^depth; padding is applied with trie.ZeroHashes).
|
||||
// - subtreeGeneralizedIndex: gindex of the subtree root.
|
||||
// - depth: number of merkleization layers from subtree root to leaves.
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: Merkle root of the subtree.
|
||||
func (pc *proofCollector) merkleizeVectorAndCollect(elements [][32]byte, subtreeGeneralizedIndex uint64, depth uint64) [32]byte {
|
||||
// Return zerohash at depth
|
||||
if len(elements) == 0 {
|
||||
return trie.ZeroHashes[depth]
|
||||
}
|
||||
for i := range depth {
|
||||
layerLen := len(elements)
|
||||
oddNodeLength := layerLen%2 == 1
|
||||
if oddNodeLength {
|
||||
zerohash := trie.ZeroHashes[i]
|
||||
elements = append(elements, zerohash)
|
||||
}
|
||||
|
||||
levelBaseGindex := subtreeGeneralizedIndex << (depth - i)
|
||||
for idx := range elements {
|
||||
gindex := levelBaseGindex + uint64(idx)
|
||||
pc.collectSibling(gindex, elements[idx])
|
||||
pc.collectLeaf(gindex, elements[idx])
|
||||
}
|
||||
|
||||
elements = htr.VectorizedSha256(elements)
|
||||
}
|
||||
return elements[0]
|
||||
}
|
||||
|
||||
// mixinLengthAndCollect computes the final mix-in root for list/bitlist values:
|
||||
//
|
||||
// root = hash(dataRoot, lengthHash)
|
||||
//
|
||||
// where chunks[0] is dataRoot and chunks[1] is the 32-byte length hash.
|
||||
//
|
||||
// Generalized indices (gindices): dataRoot is the left child (dataRootGindex = currentGindex*2) and lengthHash is the right child (lengthHashGindex = currentGindex*2+1).
|
||||
// Proof collection: it calls collectSibling/collectLeaf for both child gindices; the collector stores them only if they were registered via addTarget.
|
||||
//
|
||||
// Parameters:
|
||||
// - currentGindex: gindex of the parent node (list/bitlist root).
|
||||
// - chunks: two 32-byte nodes: [dataRoot, lengthHash].
|
||||
//
|
||||
// Returns:
|
||||
// - [32]byte: mixed-in Merkle root (or zero value on hashing error).
|
||||
// - error: any error encountered during hashing.
|
||||
func (pc *proofCollector) mixinLengthAndCollect(currentGindex uint64, chunks [][32]byte) [32]byte {
|
||||
dataRoot, lengthHash := chunks[0], chunks[1]
|
||||
dataRootGindex, lengthHashGindex := currentGindex*2, currentGindex*2+1
|
||||
|
||||
pc.collectSibling(dataRootGindex, dataRoot)
|
||||
pc.collectSibling(lengthHashGindex, lengthHash)
|
||||
|
||||
pc.collectLeaf(dataRootGindex, dataRoot)
|
||||
pc.collectLeaf(lengthHashGindex, lengthHash)
|
||||
|
||||
return ssz.MixInLength(dataRoot, lengthHash[:])
|
||||
}
|
||||
531
encoding/ssz/query/proof_collector_test.go
Normal file
531
encoding/ssz/query/proof_collector_test.go
Normal file
@@ -0,0 +1,531 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"reflect"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ssz "github.com/OffchainLabs/prysm/v7/encoding/ssz"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
sszquerypb "github.com/OffchainLabs/prysm/v7/proto/ssz_query/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestProofCollector_New(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
|
||||
require.NotNil(t, pc)
|
||||
require.Equal(t, 0, len(pc.requiredSiblings))
|
||||
require.Equal(t, 0, len(pc.requiredLeaves))
|
||||
require.Equal(t, 0, len(pc.siblings))
|
||||
require.Equal(t, 0, len(pc.leaves))
|
||||
}
|
||||
|
||||
func TestProofCollector_Reset(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
pc.requiredSiblings[3] = struct{}{}
|
||||
pc.requiredLeaves[5] = struct{}{}
|
||||
pc.siblings[3] = [32]byte{1}
|
||||
pc.leaves[5] = [32]byte{2}
|
||||
|
||||
pc.reset()
|
||||
|
||||
require.Equal(t, 0, len(pc.requiredSiblings))
|
||||
require.Equal(t, 0, len(pc.requiredLeaves))
|
||||
require.Equal(t, 0, len(pc.siblings))
|
||||
require.Equal(t, 0, len(pc.leaves))
|
||||
}
|
||||
|
||||
func TestProofCollector_AddTarget(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
pc.addTarget(5)
|
||||
|
||||
_, hasLeaf := pc.requiredLeaves[5]
|
||||
_, hasSibling4 := pc.requiredSiblings[4]
|
||||
_, hasSibling3 := pc.requiredSiblings[3]
|
||||
_, hasSibling1 := pc.requiredSiblings[1] // GI 1 is the root
|
||||
|
||||
require.Equal(t, true, hasLeaf)
|
||||
require.Equal(t, true, hasSibling4)
|
||||
require.Equal(t, true, hasSibling3)
|
||||
require.Equal(t, false, hasSibling1)
|
||||
}
|
||||
|
||||
func TestProofCollector_ToProof(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
pc.addTarget(5)
|
||||
|
||||
leaf := [32]byte{9}
|
||||
sibling4 := [32]byte{4}
|
||||
sibling3 := [32]byte{3}
|
||||
|
||||
pc.collectLeaf(5, leaf)
|
||||
pc.collectSibling(4, sibling4)
|
||||
pc.collectSibling(3, sibling3)
|
||||
|
||||
proof, err := pc.toProof()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 5, proof.Index)
|
||||
require.DeepEqual(t, leaf[:], proof.Leaf)
|
||||
require.Equal(t, 2, len(proof.Hashes))
|
||||
require.DeepEqual(t, sibling4[:], proof.Hashes[0])
|
||||
require.DeepEqual(t, sibling3[:], proof.Hashes[1])
|
||||
}
|
||||
|
||||
func TestProofCollector_ToProof_NoLeaves(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
_, err := pc.toProof()
|
||||
require.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestProofCollector_CollectLeaf(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
leaf := [32]byte{7}
|
||||
|
||||
pc.collectLeaf(10, leaf)
|
||||
require.Equal(t, 0, len(pc.leaves))
|
||||
|
||||
pc.addTarget(10)
|
||||
pc.collectLeaf(10, leaf)
|
||||
stored, ok := pc.leaves[10]
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, leaf, stored)
|
||||
}
|
||||
|
||||
func TestProofCollector_CollectSibling(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
hash := [32]byte{5}
|
||||
|
||||
pc.collectSibling(4, hash)
|
||||
require.Equal(t, 0, len(pc.siblings))
|
||||
|
||||
pc.addTarget(5)
|
||||
pc.collectSibling(4, hash)
|
||||
stored, ok := pc.siblings[4]
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, hash, stored)
|
||||
}
|
||||
|
||||
func TestProofCollector_Merkleize_BasicTypes(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
sszType SSZType
|
||||
value any
|
||||
expected [32]byte
|
||||
}{
|
||||
{
|
||||
name: "uint8",
|
||||
sszType: Uint8,
|
||||
value: uint8(0x11),
|
||||
expected: func() [32]byte {
|
||||
var leaf [32]byte
|
||||
leaf[0] = 0x11
|
||||
return leaf
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "uint16",
|
||||
sszType: Uint16,
|
||||
value: uint16(0x2211),
|
||||
expected: func() [32]byte {
|
||||
var leaf [32]byte
|
||||
binary.LittleEndian.PutUint16(leaf[:2], 0x2211)
|
||||
return leaf
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "uint32",
|
||||
sszType: Uint32,
|
||||
value: uint32(0x44332211),
|
||||
expected: func() [32]byte {
|
||||
var leaf [32]byte
|
||||
binary.LittleEndian.PutUint32(leaf[:4], 0x44332211)
|
||||
return leaf
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "uint64",
|
||||
sszType: Uint64,
|
||||
value: uint64(0x8877665544332211),
|
||||
expected: func() [32]byte {
|
||||
var leaf [32]byte
|
||||
binary.LittleEndian.PutUint64(leaf[:8], 0x8877665544332211)
|
||||
return leaf
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "bool",
|
||||
sszType: Boolean,
|
||||
value: true,
|
||||
expected: func() [32]byte {
|
||||
var leaf [32]byte
|
||||
leaf[0] = 1
|
||||
return leaf
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
gindex := uint64(3)
|
||||
pc.addTarget(gindex)
|
||||
|
||||
leaf, err := pc.merkleizeBasicType(tc.sszType, reflect.ValueOf(tc.value), gindex)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expected, leaf)
|
||||
|
||||
stored, ok := pc.leaves[gindex]
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, tc.expected, stored)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProofCollector_Merkleize_Container(t *testing.T) {
|
||||
container := makeFixedTestContainer()
|
||||
|
||||
info, err := AnalyzeObject(container)
|
||||
require.NoError(t, err)
|
||||
|
||||
pc := newProofCollector()
|
||||
pc.addTarget(1)
|
||||
|
||||
root, err := pc.merkleize(info, reflect.ValueOf(container), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected, err := container.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, root)
|
||||
|
||||
stored, ok := pc.leaves[1]
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, expected, stored)
|
||||
}
|
||||
|
||||
func TestProofCollector_Merkleize_Vector(t *testing.T) {
|
||||
container := makeFixedTestContainer()
|
||||
info, err := AnalyzeObject(container)
|
||||
require.NoError(t, err)
|
||||
|
||||
ci, err := info.ContainerInfo()
|
||||
require.NoError(t, err)
|
||||
field := ci.fields["vector_field"]
|
||||
|
||||
pc := newProofCollector()
|
||||
root, err := pc.merkleizeVector(field.sszInfo, reflect.ValueOf(container.VectorField), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
serialized := make([][]byte, len(container.VectorField))
|
||||
for i, v := range container.VectorField {
|
||||
buf := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(buf, v)
|
||||
serialized[i] = buf
|
||||
}
|
||||
chunks, err := ssz.PackByChunk(serialized)
|
||||
require.NoError(t, err)
|
||||
limit, err := getChunkCount(field.sszInfo)
|
||||
require.NoError(t, err)
|
||||
expected := ssz.MerkleizeVector(chunks, limit)
|
||||
|
||||
require.Equal(t, expected, root)
|
||||
}
|
||||
|
||||
func TestProofCollector_Merkleize_List(t *testing.T) {
|
||||
list := []*sszquerypb.FixedNestedContainer{
|
||||
makeFixedNestedContainer(1),
|
||||
makeFixedNestedContainer(2),
|
||||
}
|
||||
container := makeVariableTestContainer(list, bitfield.NewBitlist(1))
|
||||
info, err := AnalyzeObject(container)
|
||||
require.NoError(t, err)
|
||||
|
||||
ci, err := info.ContainerInfo()
|
||||
require.NoError(t, err)
|
||||
field := ci.fields["field_list_container"]
|
||||
|
||||
pc := newProofCollector()
|
||||
root, err := pc.merkleizeList(field.sszInfo, reflect.ValueOf(list), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
listInfo, err := field.sszInfo.ListInfo()
|
||||
require.NoError(t, err)
|
||||
expected, err := ssz.MerkleizeListSSZ(list, listInfo.Limit())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, expected, root)
|
||||
}
|
||||
|
||||
func TestProofCollector_Merkleize_Bitvector(t *testing.T) {
|
||||
container := makeFixedTestContainer()
|
||||
info, err := AnalyzeObject(container)
|
||||
require.NoError(t, err)
|
||||
|
||||
ci, err := info.ContainerInfo()
|
||||
require.NoError(t, err)
|
||||
field := ci.fields["bitvector64_field"]
|
||||
|
||||
pc := newProofCollector()
|
||||
root, err := pc.merkleizeBitvector(field.sszInfo, reflect.ValueOf(container.Bitvector64Field), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected, err := ssz.MerkleizeByteSliceSSZ([]byte(container.Bitvector64Field))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, root)
|
||||
}
|
||||
|
||||
func TestProofCollector_Merkleize_Bitlist(t *testing.T) {
|
||||
bitlist := bitfield.NewBitlist(16)
|
||||
bitlist.SetBitAt(3, true)
|
||||
bitlist.SetBitAt(8, true)
|
||||
|
||||
container := makeVariableTestContainer(nil, bitlist)
|
||||
info, err := AnalyzeObject(container)
|
||||
require.NoError(t, err)
|
||||
|
||||
ci, err := info.ContainerInfo()
|
||||
require.NoError(t, err)
|
||||
field := ci.fields["bitlist_field"]
|
||||
|
||||
pc := newProofCollector()
|
||||
root, err := pc.merkleizeBitlist(field.sszInfo, reflect.ValueOf(container.BitlistField), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
bitlistInfo, err := field.sszInfo.BitlistInfo()
|
||||
require.NoError(t, err)
|
||||
expected, err := ssz.BitlistRoot(bitfield.Bitlist(bitlist), bitlistInfo.Limit())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, root)
|
||||
}
|
||||
|
||||
func TestProofCollector_MerkleizeVectorBody_Basic(t *testing.T) {
|
||||
container := makeFixedTestContainer()
|
||||
info, err := AnalyzeObject(container)
|
||||
require.NoError(t, err)
|
||||
|
||||
ci, err := info.ContainerInfo()
|
||||
require.NoError(t, err)
|
||||
field := ci.fields["vector_field"]
|
||||
vectorInfo, err := field.sszInfo.VectorInfo()
|
||||
require.NoError(t, err)
|
||||
length := len(container.VectorField)
|
||||
limit, err := getChunkCount(field.sszInfo)
|
||||
require.NoError(t, err)
|
||||
|
||||
pc := newProofCollector()
|
||||
root, err := pc.merkleizeVectorBody(vectorInfo.element, reflect.ValueOf(container.VectorField), length, limit, 2)
|
||||
require.NoError(t, err)
|
||||
|
||||
serialized := make([][]byte, len(container.VectorField))
|
||||
for i, v := range container.VectorField {
|
||||
buf := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(buf, v)
|
||||
serialized[i] = buf
|
||||
}
|
||||
chunks, err := ssz.PackByChunk(serialized)
|
||||
require.NoError(t, err)
|
||||
expected := ssz.MerkleizeVector(chunks, limit)
|
||||
|
||||
require.Equal(t, expected, root)
|
||||
}
|
||||
|
||||
func TestProofCollector_MerkleizeVectorAndCollect(t *testing.T) {
|
||||
pc := newProofCollector()
|
||||
pc.addTarget(6)
|
||||
|
||||
elements := [][32]byte{{1}, {2}}
|
||||
expected := ssz.MerkleizeVector(slices.Clone(elements), 2)
|
||||
root := pc.merkleizeVectorAndCollect(elements, 3, 1)
|
||||
|
||||
storedLeaf, hasLeaf := pc.leaves[6]
|
||||
storedSibling, hasSibling := pc.siblings[7]
|
||||
|
||||
require.Equal(t, true, hasLeaf)
|
||||
require.Equal(t, true, hasSibling)
|
||||
require.Equal(t, elements[0], storedLeaf)
|
||||
require.Equal(t, elements[1], storedSibling)
|
||||
|
||||
require.Equal(t, expected, root)
|
||||
}
|
||||
|
||||
func TestProofCollector_MixinLengthAndCollect(t *testing.T) {
|
||||
list := []*sszquerypb.FixedNestedContainer{
|
||||
makeFixedNestedContainer(1),
|
||||
makeFixedNestedContainer(2),
|
||||
}
|
||||
container := makeVariableTestContainer(list, bitfield.NewBitlist(1))
|
||||
info, err := AnalyzeObject(container)
|
||||
require.NoError(t, err)
|
||||
|
||||
ci, err := info.ContainerInfo()
|
||||
require.NoError(t, err)
|
||||
field := ci.fields["field_list_container"]
|
||||
|
||||
// Target gindex 2 (data root) - sibling at gindex 3 (length hash) should be collected
|
||||
pc := newProofCollector()
|
||||
pc.addTarget(2)
|
||||
root, err := pc.merkleizeList(field.sszInfo, reflect.ValueOf(list), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
listInfo, err := field.sszInfo.ListInfo()
|
||||
require.NoError(t, err)
|
||||
expected, err := ssz.MerkleizeListSSZ(list, listInfo.Limit())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, root)
|
||||
|
||||
// Verify data root is collected as leaf at gindex 2
|
||||
storedLeaf, hasLeaf := pc.leaves[2]
|
||||
require.Equal(t, true, hasLeaf)
|
||||
|
||||
// Verify length hash is collected as sibling at gindex 3
|
||||
storedSibling, hasSibling := pc.siblings[3]
|
||||
require.Equal(t, true, hasSibling)
|
||||
|
||||
// Verify the root is hash(dataRoot || lengthHash)
|
||||
expectedBuf := append(storedLeaf[:], storedSibling[:]...)
|
||||
expectedRoot := sha256.Sum256(expectedBuf)
|
||||
require.Equal(t, expectedRoot, root)
|
||||
}
|
||||
|
||||
func BenchmarkOptimizedValidatorRoots(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 1000)
|
||||
for i := range validators {
|
||||
validators[i] = makeTestValidator(i)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for b.Loop() {
|
||||
_, err := stateutil.OptimizedValidatorRoots(validators)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkProofCollectorMerkleize(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 1000)
|
||||
for i := range validators {
|
||||
validators[i] = makeTestValidator(i)
|
||||
}
|
||||
|
||||
info, err := AnalyzeObject(validators[0])
|
||||
require.NoError(b, err)
|
||||
|
||||
b.ResetTimer()
|
||||
for b.Loop() {
|
||||
for _, val := range validators {
|
||||
pc := newProofCollector()
|
||||
v := reflect.ValueOf(val)
|
||||
_, err := pc.merkleize(info, v, 1)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func makeTestValidator(i int) *ethpb.Validator {
|
||||
pubkey := make([]byte, 48)
|
||||
for j := range pubkey {
|
||||
pubkey[j] = byte(i + j)
|
||||
}
|
||||
|
||||
withdrawalCredentials := make([]byte, 32)
|
||||
for j := range withdrawalCredentials {
|
||||
withdrawalCredentials[j] = byte(255 - ((i + j) % 256))
|
||||
}
|
||||
|
||||
return ðpb.Validator{
|
||||
PublicKey: pubkey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
EffectiveBalance: uint64(32000000000 + i),
|
||||
Slashed: i%2 == 0,
|
||||
ActivationEligibilityEpoch: primitives.Epoch(i),
|
||||
ActivationEpoch: primitives.Epoch(i + 1),
|
||||
ExitEpoch: primitives.Epoch(i + 2),
|
||||
WithdrawableEpoch: primitives.Epoch(i + 3),
|
||||
}
|
||||
}
|
||||
|
||||
func makeFixedNestedContainer(value uint64) *sszquerypb.FixedNestedContainer {
|
||||
value2 := make([]byte, 32)
|
||||
for i := range value2 {
|
||||
value2[i] = byte(i)
|
||||
}
|
||||
return &sszquerypb.FixedNestedContainer{
|
||||
Value1: value,
|
||||
Value2: value2,
|
||||
}
|
||||
}
|
||||
|
||||
func makeFixedTestContainer() *sszquerypb.FixedTestContainer {
|
||||
fieldBytes32 := make([]byte, 32)
|
||||
for i := range fieldBytes32 {
|
||||
fieldBytes32[i] = byte(i)
|
||||
}
|
||||
|
||||
vectorField := make([]uint64, 24)
|
||||
for i := range vectorField {
|
||||
vectorField[i] = uint64(i)
|
||||
}
|
||||
|
||||
rows := make([][]byte, 5)
|
||||
for i := range rows {
|
||||
row := make([]byte, 32)
|
||||
for j := range row {
|
||||
row[j] = byte(i) + byte(j)
|
||||
}
|
||||
rows[i] = row
|
||||
}
|
||||
|
||||
bitvector64 := bitfield.NewBitvector64()
|
||||
bitvector64.SetBitAt(1, true)
|
||||
bitvector512 := bitfield.NewBitvector512()
|
||||
bitvector512.SetBitAt(10, true)
|
||||
|
||||
trailing := make([]byte, 56)
|
||||
for i := range trailing {
|
||||
trailing[i] = byte(i)
|
||||
}
|
||||
|
||||
return &sszquerypb.FixedTestContainer{
|
||||
FieldUint32: 1,
|
||||
FieldUint64: 2,
|
||||
FieldBool: true,
|
||||
FieldBytes32: fieldBytes32,
|
||||
Nested: makeFixedNestedContainer(3),
|
||||
VectorField: vectorField,
|
||||
TwoDimensionBytesField: rows,
|
||||
Bitvector64Field: bitvector64,
|
||||
Bitvector512Field: bitvector512,
|
||||
TrailingField: trailing,
|
||||
}
|
||||
}
|
||||
|
||||
func makeVariableTestContainer(list []*sszquerypb.FixedNestedContainer, bitlist bitfield.Bitlist) *sszquerypb.VariableTestContainer {
|
||||
leading := make([]byte, 32)
|
||||
for i := range leading {
|
||||
leading[i] = byte(i)
|
||||
}
|
||||
trailing := make([]byte, 56)
|
||||
for i := range trailing {
|
||||
trailing[i] = byte(255 - i)
|
||||
}
|
||||
|
||||
if bitlist == nil {
|
||||
bitlist = bitfield.NewBitlist(0)
|
||||
}
|
||||
|
||||
return &sszquerypb.VariableTestContainer{
|
||||
LeadingField: leading,
|
||||
FieldListContainer: list,
|
||||
BitlistField: bitlist,
|
||||
TrailingField: trailing,
|
||||
}
|
||||
}
|
||||
@@ -389,6 +389,7 @@ func TestHashTreeRoot(t *testing.T) {
|
||||
require.NoError(t, err, "HashTreeRoot should not return an error")
|
||||
expectedHashTreeRoot, err := tt.obj.HashTreeRoot()
|
||||
require.NoError(t, err, "HashTreeRoot on original object should not return an error")
|
||||
// Verify the Merkle tree root matches with the SSZ generated HashTreeRoot
|
||||
require.Equal(t, expectedHashTreeRoot, hashTreeRoot, "HashTreeRoot from sszInfo should match original object's HashTreeRoot")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -202,7 +202,6 @@ go_test(
|
||||
"fulu__ssz_static__ssz_static_test.go",
|
||||
"gloas__epoch_processing__process_builder_pending_payments_test.go",
|
||||
"gloas__operations__execution_payload_header_test.go",
|
||||
"gloas__operations__execution_payload_test.go",
|
||||
"gloas__operations__payload_attestation_test.go",
|
||||
"gloas__operations__proposer_slashing_test.go",
|
||||
"gloas__sanity__slots_test.go",
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
package mainnet
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations"
|
||||
)
|
||||
|
||||
func TestMainnet_Gloas_Operations_ExecutionPayloadEnvelope(t *testing.T) {
|
||||
operations.RunExecutionPayloadTest(t, "mainnet")
|
||||
}
|
||||
@@ -208,7 +208,6 @@ go_test(
|
||||
"fulu__ssz_static__ssz_static_test.go",
|
||||
"gloas__epoch_processing__process_builder_pending_payments_test.go",
|
||||
"gloas__operations__execution_payload_bid_test.go",
|
||||
"gloas__operations__execution_payload_test.go",
|
||||
"gloas__operations__payload_attestation_test.go",
|
||||
"gloas__operations__proposer_slashing_test.go",
|
||||
"gloas__sanity__slots_test.go",
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
package minimal
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations"
|
||||
)
|
||||
|
||||
func TestMinimal_Gloas_Operations_ExecutionPayloadEnvelope(t *testing.T) {
|
||||
operations.RunExecutionPayloadTest(t, "minimal")
|
||||
}
|
||||
@@ -4,7 +4,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
testonly = True,
|
||||
srcs = [
|
||||
"execution_payload.go",
|
||||
"execution_payload_bid.go",
|
||||
"helpers.go",
|
||||
"payload_attestation.go",
|
||||
@@ -13,23 +12,12 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/testing/spectest/shared/gloas/operations",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/gloas:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/spectest/shared/common/operations:go_default_library",
|
||||
"//testing/spectest/utils:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_google_go_cmp//cmp:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_google_protobuf//testing/protocmp:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,123 +0,0 @@
|
||||
package operations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/spectest/utils"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/bazelbuild/rules_go/go/tools/bazel"
|
||||
"github.com/golang/snappy"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/testing/protocmp"
|
||||
)
|
||||
|
||||
type ExecutionConfig struct {
|
||||
Valid bool `json:"execution_valid"`
|
||||
}
|
||||
|
||||
func sszToSignedExecutionPayloadEnvelope(b []byte) (interfaces.ROSignedExecutionPayloadEnvelope, error) {
|
||||
envelope := ðpb.SignedExecutionPayloadEnvelope{}
|
||||
if err := envelope.UnmarshalSSZ(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blocks.WrappedROSignedExecutionPayloadEnvelope(envelope)
|
||||
}
|
||||
|
||||
func RunExecutionPayloadTest(t *testing.T, config string) {
|
||||
require.NoError(t, utils.SetConfig(t, config))
|
||||
cfg := params.BeaconConfig()
|
||||
params.SetGenesisFork(t, cfg, version.Fulu)
|
||||
testFolders, testsFolderPath := utils.TestFolders(t, config, "gloas", "operations/execution_payload/pyspec_tests")
|
||||
if len(testFolders) == 0 {
|
||||
t.Fatalf("No test folders found for %s/%s/%s", config, "gloas", "operations/execution_payload/pyspec_tests")
|
||||
}
|
||||
for _, folder := range testFolders {
|
||||
t.Run(folder.Name(), func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
// Check if signed_envelope.ssz_snappy exists, skip if not
|
||||
_, err := bazel.Runfile(path.Join(testsFolderPath, folder.Name(), "signed_envelope.ssz_snappy"))
|
||||
if err != nil && strings.Contains(err.Error(), "could not locate file") {
|
||||
t.Skipf("Skipping test %s: signed_envelope.ssz_snappy not found", folder.Name())
|
||||
return
|
||||
}
|
||||
|
||||
// Read the signed execution payload envelope
|
||||
envelopeFile, err := util.BazelFileBytes(testsFolderPath, folder.Name(), "signed_envelope.ssz_snappy")
|
||||
require.NoError(t, err)
|
||||
envelopeSSZ, err := snappy.Decode(nil /* dst */, envelopeFile)
|
||||
require.NoError(t, err, "Failed to decompress envelope")
|
||||
signedEnvelope, err := sszToSignedExecutionPayloadEnvelope(envelopeSSZ)
|
||||
require.NoError(t, err, "Failed to unmarshal signed envelope")
|
||||
|
||||
preBeaconStateFile, err := util.BazelFileBytes(testsFolderPath, folder.Name(), "pre.ssz_snappy")
|
||||
require.NoError(t, err)
|
||||
preBeaconStateSSZ, err := snappy.Decode(nil /* dst */, preBeaconStateFile)
|
||||
require.NoError(t, err, "Failed to decompress")
|
||||
preBeaconState, err := sszToState(preBeaconStateSSZ)
|
||||
require.NoError(t, err)
|
||||
|
||||
postSSZFilepath, err := bazel.Runfile(path.Join(testsFolderPath, folder.Name(), "post.ssz_snappy"))
|
||||
postSSZExists := true
|
||||
if err != nil && strings.Contains(err.Error(), "could not locate file") {
|
||||
postSSZExists = false
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
file, err := util.BazelFileBytes(testsFolderPath, folder.Name(), "execution.yaml")
|
||||
require.NoError(t, err)
|
||||
config := &ExecutionConfig{}
|
||||
require.NoError(t, utils.UnmarshalYaml(file, config), "Failed to Unmarshal")
|
||||
if !config.Valid {
|
||||
t.Skip("Skipping invalid execution engine test as it's never supported")
|
||||
}
|
||||
|
||||
err = gloas.ProcessExecutionPayload(context.Background(), preBeaconState, signedEnvelope)
|
||||
if postSSZExists {
|
||||
require.NoError(t, err)
|
||||
comparePostState(t, postSSZFilepath, preBeaconState)
|
||||
} else if config.Valid {
|
||||
// Note: This doesn't test anything worthwhile. It essentially tests
|
||||
// that *any* error has occurred, not any specific error.
|
||||
if err == nil {
|
||||
t.Fatal("Did not fail when expected")
|
||||
}
|
||||
t.Logf("Expected failure; failure reason = %v", err)
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func comparePostState(t *testing.T, postSSZFilepath string, want state.BeaconState) {
|
||||
postBeaconStateFile, err := os.ReadFile(postSSZFilepath) // #nosec G304
|
||||
require.NoError(t, err)
|
||||
postBeaconStateSSZ, err := snappy.Decode(nil /* dst */, postBeaconStateFile)
|
||||
require.NoError(t, err, "Failed to decompress")
|
||||
postBeaconState, err := sszToState(postBeaconStateSSZ)
|
||||
require.NoError(t, err)
|
||||
postBeaconStatePb, ok := postBeaconState.ToProtoUnsafe().(proto.Message)
|
||||
require.Equal(t, true, ok, "post beacon state did not return a proto.Message")
|
||||
pbState, ok := want.ToProtoUnsafe().(proto.Message)
|
||||
require.Equal(t, true, ok, "beacon state did not return a proto.Message")
|
||||
|
||||
if !proto.Equal(postBeaconStatePb, pbState) {
|
||||
diff := cmp.Diff(pbState, postBeaconStatePb, protocmp.Transform())
|
||||
t.Fatalf("Post state does not match expected state, diff: %s", diff)
|
||||
}
|
||||
}
|
||||
12
testing/validator-mock/validator_client_mock.go
generated
12
testing/validator-mock/validator_client_mock.go
generated
@@ -283,16 +283,16 @@ func (mr *MockValidatorClientMockRecorder) ProposeExit(ctx, in any) *gomock.Call
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProposeExit", reflect.TypeOf((*MockValidatorClient)(nil).ProposeExit), ctx, in)
|
||||
}
|
||||
|
||||
// SetHost mocks base method.
|
||||
func (m *MockValidatorClient) SetHost(host string) {
|
||||
// SwitchHost mocks base method.
|
||||
func (m *MockValidatorClient) SwitchHost(host string) {
|
||||
m.ctrl.T.Helper()
|
||||
m.ctrl.Call(m, "SetHost", host)
|
||||
m.ctrl.Call(m, "SwitchHost", host)
|
||||
}
|
||||
|
||||
// SetHost indicates an expected call of SetHost.
|
||||
func (mr *MockValidatorClientMockRecorder) SetHost(host any) *gomock.Call {
|
||||
// SwitchHost indicates an expected call of SwitchHost.
|
||||
func (mr *MockValidatorClientMockRecorder) SwitchHost(host any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHost", reflect.TypeOf((*MockValidatorClient)(nil).SetHost), host)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SwitchHost", reflect.TypeOf((*MockValidatorClient)(nil).SwitchHost), host)
|
||||
}
|
||||
|
||||
// StartEventStream mocks base method.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
diff -urN a/BUILD.bazel b/BUILD.bazel
|
||||
--- a/BUILD.bazel 1969-12-31 18:00:00.000000000 -0600
|
||||
+++ b/BUILD.bazel 2025-01-05 12:00:00.000000000 -0600
|
||||
@@ -0,0 +1,89 @@
|
||||
@@ -0,0 +1,90 @@
|
||||
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
+
|
||||
+go_library(
|
||||
@@ -32,6 +32,7 @@ diff -urN a/BUILD.bazel b/BUILD.bazel
|
||||
+ ],
|
||||
+ "@io_bazel_rules_go//go/platform:darwin_amd64": [
|
||||
+ "bindings_darwin_amd64.go",
|
||||
+ "wrapper_darwin_amd64.s",
|
||||
+ ],
|
||||
+ "//conditions:default": [],
|
||||
+ }),
|
||||
|
||||
@@ -25,6 +25,7 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//api/grpc:go_default_library",
|
||||
"//api/rest:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//cmd/validator/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
|
||||
@@ -3,14 +3,13 @@ package accounts
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
grpcutil "github.com/OffchainLabs/prysm/v7/api/grpc"
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/accounts/wallet"
|
||||
beaconApi "github.com/OffchainLabs/prysm/v7/validator/client/beacon-api"
|
||||
iface "github.com/OffchainLabs/prysm/v7/validator/client/iface"
|
||||
nodeClientFactory "github.com/OffchainLabs/prysm/v7/validator/client/node-client-factory"
|
||||
validatorClientFactory "github.com/OffchainLabs/prysm/v7/validator/client/validator-client-factory"
|
||||
@@ -77,22 +76,17 @@ func (acm *CLIManager) prepareBeaconClients(ctx context.Context) (*iface.Validat
|
||||
}
|
||||
|
||||
ctx = grpcutil.AppendHeaders(ctx, acm.grpcHeaders)
|
||||
grpcConn, err := grpc.DialContext(ctx, acm.beaconRPCProvider, acm.dialOpts...)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not dial endpoint %s", acm.beaconRPCProvider)
|
||||
}
|
||||
conn := validatorHelpers.NewNodeConnection(
|
||||
grpcConn,
|
||||
acm.beaconApiEndpoint,
|
||||
validatorHelpers.WithBeaconApiTimeout(acm.beaconApiTimeout),
|
||||
)
|
||||
|
||||
restHandler := beaconApi.NewBeaconApiRestHandler(
|
||||
http.Client{Timeout: acm.beaconApiTimeout},
|
||||
acm.beaconApiEndpoint,
|
||||
conn, err := validatorHelpers.NewNodeConnection(
|
||||
validatorHelpers.WithGRPC(ctx, acm.beaconRPCProvider, acm.dialOpts),
|
||||
validatorHelpers.WithREST(acm.beaconApiEndpoint, rest.WithHttpTimeout(acm.beaconApiTimeout)),
|
||||
)
|
||||
validatorClient := validatorClientFactory.NewValidatorClient(conn, restHandler)
|
||||
nodeClient := nodeClientFactory.NewNodeClient(conn, restHandler)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
validatorClient := validatorClientFactory.NewValidatorClient(conn)
|
||||
nodeClient := nodeClientFactory.NewNodeClient(conn)
|
||||
|
||||
return &validatorClient, &nodeClient, nil
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ go_library(
|
||||
"log.go",
|
||||
"log_helpers.go",
|
||||
"metrics.go",
|
||||
"multiple_endpoints_grpc_resolver.go",
|
||||
"propose.go",
|
||||
"registration.go",
|
||||
"runner.go",
|
||||
@@ -29,6 +28,7 @@ go_library(
|
||||
"//api/client:go_default_library",
|
||||
"//api/client/event:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//api/rest:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//async:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
@@ -58,7 +58,6 @@ go_library(
|
||||
"//time/slots:go_default_library",
|
||||
"//validator/accounts/iface:go_default_library",
|
||||
"//validator/accounts/wallet:go_default_library",
|
||||
"//validator/client/beacon-api:go_default_library",
|
||||
"//validator/client/beacon-chain-client-factory:go_default_library",
|
||||
"//validator/client/iface:go_default_library",
|
||||
"//validator/client/node-client-factory:go_default_library",
|
||||
@@ -86,13 +85,11 @@ go_library(
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc//:go_default_library",
|
||||
"@io_opentelemetry_go_contrib_instrumentation_net_http_otelhttp//:go_default_library",
|
||||
"@io_opentelemetry_go_otel_trace//:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//credentials:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
"@org_golang_google_grpc//resolver:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
@@ -124,6 +121,8 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api/grpc:go_default_library",
|
||||
"//api/rest:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
|
||||
@@ -26,7 +26,6 @@ go_library(
|
||||
"propose_exit.go",
|
||||
"prysm_beacon_chain_client.go",
|
||||
"registration.go",
|
||||
"rest_handler_client.go",
|
||||
"state_validators.go",
|
||||
"status.go",
|
||||
"stream_blocks.go",
|
||||
@@ -43,6 +42,7 @@ go_library(
|
||||
"//api:go_default_library",
|
||||
"//api/apiutil:go_default_library",
|
||||
"//api/client/event:go_default_library",
|
||||
"//api/rest:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
@@ -111,6 +111,7 @@ go_test(
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/apiutil:go_default_library",
|
||||
"//api/rest:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared/testing:go_default_library",
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
@@ -17,7 +18,7 @@ import (
|
||||
|
||||
type beaconApiChainClient struct {
|
||||
fallbackClient iface.ChainClient
|
||||
jsonRestHandler RestHandler
|
||||
jsonRestHandler rest.RestHandler
|
||||
stateValidatorsProvider StateValidatorsProvider
|
||||
}
|
||||
|
||||
@@ -327,7 +328,7 @@ func (c beaconApiChainClient) ValidatorParticipation(ctx context.Context, in *et
|
||||
return nil, errors.New("beaconApiChainClient.ValidatorParticipation is not implemented. To use a fallback client, pass a fallback client as the last argument of NewBeaconApiChainClientWithFallback.")
|
||||
}
|
||||
|
||||
func NewBeaconApiChainClientWithFallback(jsonRestHandler RestHandler, fallbackClient iface.ChainClient) iface.ChainClient {
|
||||
func NewBeaconApiChainClientWithFallback(jsonRestHandler rest.RestHandler, fallbackClient iface.ChainClient) iface.ChainClient {
|
||||
return &beaconApiChainClient{
|
||||
jsonRestHandler: jsonRestHandler,
|
||||
fallbackClient: fallbackClient,
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/client/iface"
|
||||
@@ -20,7 +21,7 @@ var (
|
||||
|
||||
type beaconApiNodeClient struct {
|
||||
fallbackClient iface.NodeClient
|
||||
jsonRestHandler RestHandler
|
||||
jsonRestHandler rest.RestHandler
|
||||
genesisProvider GenesisProvider
|
||||
}
|
||||
|
||||
@@ -115,7 +116,7 @@ func (c *beaconApiNodeClient) IsReady(ctx context.Context) bool {
|
||||
return statusCode == http.StatusOK
|
||||
}
|
||||
|
||||
func NewNodeClientWithFallback(jsonRestHandler RestHandler, fallbackClient iface.NodeClient) iface.NodeClient {
|
||||
func NewNodeClientWithFallback(jsonRestHandler rest.RestHandler, fallbackClient iface.NodeClient) iface.NodeClient {
|
||||
b := &beaconApiNodeClient{
|
||||
jsonRestHandler: jsonRestHandler,
|
||||
fallbackClient: fallbackClient,
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/client/event"
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
@@ -22,13 +23,13 @@ type beaconApiValidatorClient struct {
|
||||
genesisProvider GenesisProvider
|
||||
dutiesProvider dutiesProvider
|
||||
stateValidatorsProvider StateValidatorsProvider
|
||||
jsonRestHandler RestHandler
|
||||
jsonRestHandler rest.RestHandler
|
||||
beaconBlockConverter BeaconBlockConverter
|
||||
prysmChainClient iface.PrysmChainClient
|
||||
isEventStreamRunning bool
|
||||
}
|
||||
|
||||
func NewBeaconApiValidatorClient(jsonRestHandler RestHandler, opts ...ValidatorClientOpt) iface.ValidatorClient {
|
||||
func NewBeaconApiValidatorClient(jsonRestHandler rest.RestHandler, opts ...ValidatorClientOpt) iface.ValidatorClient {
|
||||
c := &beaconApiValidatorClient{
|
||||
genesisProvider: &beaconApiGenesisProvider{jsonRestHandler: jsonRestHandler},
|
||||
dutiesProvider: beaconApiDutiesProvider{jsonRestHandler: jsonRestHandler},
|
||||
@@ -331,6 +332,6 @@ func (c *beaconApiValidatorClient) Host() string {
|
||||
return c.jsonRestHandler.Host()
|
||||
}
|
||||
|
||||
func (c *beaconApiValidatorClient) SetHost(host string) {
|
||||
c.jsonRestHandler.SetHost(host)
|
||||
func (c *beaconApiValidatorClient) SwitchHost(host string) {
|
||||
c.jsonRestHandler.SwitchHost(host)
|
||||
}
|
||||
|
||||
@@ -549,7 +549,7 @@ func TestBeaconApiValidatorClient_Host(t *testing.T) {
|
||||
|
||||
hosts := []string{"http://localhost:8080", "http://localhost:8081"}
|
||||
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
|
||||
jsonRestHandler.EXPECT().SetHost(
|
||||
jsonRestHandler.EXPECT().SwitchHost(
|
||||
hosts[0],
|
||||
).Times(1)
|
||||
jsonRestHandler.EXPECT().Host().Return(
|
||||
@@ -557,17 +557,17 @@ func TestBeaconApiValidatorClient_Host(t *testing.T) {
|
||||
).Times(1)
|
||||
|
||||
validatorClient := beaconApiValidatorClient{jsonRestHandler: jsonRestHandler}
|
||||
validatorClient.SetHost(hosts[0])
|
||||
validatorClient.SwitchHost(hosts[0])
|
||||
host := validatorClient.Host()
|
||||
require.Equal(t, hosts[0], host)
|
||||
|
||||
jsonRestHandler.EXPECT().SetHost(
|
||||
jsonRestHandler.EXPECT().SwitchHost(
|
||||
hosts[1],
|
||||
).Times(1)
|
||||
jsonRestHandler.EXPECT().Host().Return(
|
||||
hosts[1],
|
||||
).Times(1)
|
||||
validatorClient.SetHost(hosts[1])
|
||||
validatorClient.SwitchHost(hosts[1])
|
||||
host = validatorClient.Host()
|
||||
require.Equal(t, hosts[1], host)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/apiutil"
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -27,7 +28,7 @@ type dutiesProvider interface {
|
||||
}
|
||||
|
||||
type beaconApiDutiesProvider struct {
|
||||
jsonRestHandler RestHandler
|
||||
jsonRestHandler rest.RestHandler
|
||||
}
|
||||
|
||||
type attesterDuty struct {
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
@@ -20,7 +21,7 @@ type GenesisProvider interface {
|
||||
}
|
||||
|
||||
type beaconApiGenesisProvider struct {
|
||||
jsonRestHandler RestHandler
|
||||
jsonRestHandler rest.RestHandler
|
||||
genesis *structs.Genesis
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
@@ -150,14 +150,14 @@ func (mr *MockRestHandlerMockRecorder) PostSSZ(ctx, endpoint, headers, data any)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PostSSZ", reflect.TypeOf((*MockRestHandler)(nil).PostSSZ), ctx, endpoint, headers, data)
|
||||
}
|
||||
|
||||
// SetHost mocks base method.
|
||||
func (m *MockRestHandler) SetHost(host string) {
|
||||
// SwitchHost mocks base method.
|
||||
func (m *MockRestHandler) SwitchHost(host string) {
|
||||
m.ctrl.T.Helper()
|
||||
m.ctrl.Call(m, "SetHost", host)
|
||||
m.ctrl.Call(m, "SwitchHost", host)
|
||||
}
|
||||
|
||||
// SetHost indicates an expected call of SetHost.
|
||||
func (mr *MockRestHandlerMockRecorder) SetHost(host any) *gomock.Call {
|
||||
// SwitchHost indicates an expected call of SwitchHost.
|
||||
func (mr *MockRestHandlerMockRecorder) SwitchHost(host any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHost", reflect.TypeOf((*MockRestHandler)(nil).SetHost), host)
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SwitchHost", reflect.TypeOf((*MockRestHandler)(nil).SwitchHost), host)
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/apiutil"
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
validator2 "github.com/OffchainLabs/prysm/v7/consensus-types/validator"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
@@ -18,7 +19,7 @@ import (
|
||||
)
|
||||
|
||||
// NewPrysmChainClient returns implementation of iface.PrysmChainClient.
|
||||
func NewPrysmChainClient(jsonRestHandler RestHandler, nodeClient iface.NodeClient) iface.PrysmChainClient {
|
||||
func NewPrysmChainClient(jsonRestHandler rest.RestHandler, nodeClient iface.NodeClient) iface.PrysmChainClient {
|
||||
return prysmChainClient{
|
||||
jsonRestHandler: jsonRestHandler,
|
||||
nodeClient: nodeClient,
|
||||
@@ -26,7 +27,7 @@ func NewPrysmChainClient(jsonRestHandler RestHandler, nodeClient iface.NodeClien
|
||||
}
|
||||
|
||||
type prysmChainClient struct {
|
||||
jsonRestHandler RestHandler
|
||||
jsonRestHandler rest.RestHandler
|
||||
nodeClient iface.NodeClient
|
||||
}
|
||||
|
||||
|
||||
@@ -12,13 +12,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api"
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/network/httputil"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
@@ -45,10 +44,7 @@ func TestGet(t *testing.T) {
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
jsonRestHandler := BeaconApiRestHandler{
|
||||
client: http.Client{Timeout: time.Second * 5},
|
||||
host: server.URL,
|
||||
}
|
||||
jsonRestHandler := rest.NewRestHandler(http.Client{Timeout: time.Second * 5}, server.URL)
|
||||
resp := &structs.GetGenesisResponse{}
|
||||
require.NoError(t, jsonRestHandler.Get(ctx, endpoint+"?arg1=abc&arg2=def", resp))
|
||||
assert.DeepEqual(t, genesisJson, resp)
|
||||
@@ -79,10 +75,7 @@ func TestGetSSZ(t *testing.T) {
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
jsonRestHandler := BeaconApiRestHandler{
|
||||
client: http.Client{Timeout: time.Second * 5},
|
||||
host: server.URL,
|
||||
}
|
||||
jsonRestHandler := rest.NewRestHandler(http.Client{Timeout: time.Second * 5}, server.URL)
|
||||
|
||||
body, header, err := jsonRestHandler.GetSSZ(ctx, endpoint)
|
||||
require.NoError(t, err)
|
||||
@@ -108,10 +101,7 @@ func TestGetSSZ(t *testing.T) {
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
jsonRestHandler := BeaconApiRestHandler{
|
||||
client: http.Client{Timeout: time.Second * 5},
|
||||
host: server.URL,
|
||||
}
|
||||
jsonRestHandler := rest.NewRestHandler(http.Client{Timeout: time.Second * 5}, server.URL)
|
||||
|
||||
body, header, err := jsonRestHandler.GetSSZ(ctx, endpoint)
|
||||
require.NoError(t, err)
|
||||
@@ -136,10 +126,7 @@ func TestGetSSZ(t *testing.T) {
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
jsonRestHandler := BeaconApiRestHandler{
|
||||
client: http.Client{Timeout: time.Second * 5},
|
||||
host: server.URL,
|
||||
}
|
||||
jsonRestHandler := rest.NewRestHandler(http.Client{Timeout: time.Second * 5}, server.URL)
|
||||
|
||||
_, _, err := jsonRestHandler.GetSSZ(ctx, endpoint)
|
||||
require.NoError(t, err)
|
||||
@@ -161,7 +148,7 @@ func TestAcceptOverrideSSZ(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer srv.Close()
|
||||
c := NewBeaconApiRestHandler(http.Client{Timeout: time.Second * 5}, srv.URL)
|
||||
c := rest.NewRestHandler(http.Client{Timeout: time.Second * 5}, srv.URL)
|
||||
_, _, err := c.GetSSZ(t.Context(), "/test")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -204,162 +191,12 @@ func TestPost(t *testing.T) {
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
jsonRestHandler := BeaconApiRestHandler{
|
||||
client: http.Client{Timeout: time.Second * 5},
|
||||
host: server.URL,
|
||||
}
|
||||
jsonRestHandler := rest.NewRestHandler(http.Client{Timeout: time.Second * 5}, server.URL)
|
||||
resp := &structs.GetGenesisResponse{}
|
||||
require.NoError(t, jsonRestHandler.Post(ctx, endpoint, headers, bytes.NewBuffer(dataBytes), resp))
|
||||
assert.DeepEqual(t, genesisJson, resp)
|
||||
}
|
||||
|
||||
func Test_decodeResp(t *testing.T) {
|
||||
type j struct {
|
||||
Foo string `json:"foo"`
|
||||
}
|
||||
t.Run("200 JSON with charset", func(t *testing.T) {
|
||||
body := bytes.Buffer{}
|
||||
r := &http.Response{
|
||||
Status: "200",
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(&body),
|
||||
Header: map[string][]string{"Content-Type": {"application/json; charset=utf-8"}},
|
||||
}
|
||||
require.NoError(t, decodeResp(r, nil))
|
||||
})
|
||||
t.Run("200 non-JSON", func(t *testing.T) {
|
||||
body := bytes.Buffer{}
|
||||
r := &http.Response{
|
||||
Status: "200",
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(&body),
|
||||
Header: map[string][]string{"Content-Type": {api.OctetStreamMediaType}},
|
||||
}
|
||||
require.NoError(t, decodeResp(r, nil))
|
||||
})
|
||||
t.Run("204 non-JSON", func(t *testing.T) {
|
||||
body := bytes.Buffer{}
|
||||
r := &http.Response{
|
||||
Status: "204",
|
||||
StatusCode: http.StatusNoContent,
|
||||
Body: io.NopCloser(&body),
|
||||
Header: map[string][]string{"Content-Type": {api.OctetStreamMediaType}},
|
||||
}
|
||||
require.NoError(t, decodeResp(r, nil))
|
||||
})
|
||||
t.Run("500 non-JSON", func(t *testing.T) {
|
||||
body := bytes.Buffer{}
|
||||
_, err := body.WriteString("foo")
|
||||
require.NoError(t, err)
|
||||
r := &http.Response{
|
||||
Status: "500",
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
Body: io.NopCloser(&body),
|
||||
Header: map[string][]string{"Content-Type": {api.OctetStreamMediaType}},
|
||||
}
|
||||
err = decodeResp(r, nil)
|
||||
errJson := &httputil.DefaultJsonError{}
|
||||
require.Equal(t, true, errors.As(err, &errJson))
|
||||
assert.Equal(t, http.StatusInternalServerError, errJson.Code)
|
||||
assert.Equal(t, "foo", errJson.Message)
|
||||
})
|
||||
t.Run("200 JSON with resp", func(t *testing.T) {
|
||||
body := bytes.Buffer{}
|
||||
b, err := json.Marshal(&j{Foo: "foo"})
|
||||
require.NoError(t, err)
|
||||
body.Write(b)
|
||||
r := &http.Response{
|
||||
Status: "200",
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(&body),
|
||||
Header: map[string][]string{"Content-Type": {api.JsonMediaType}},
|
||||
}
|
||||
resp := &j{}
|
||||
require.NoError(t, decodeResp(r, resp))
|
||||
assert.Equal(t, "foo", resp.Foo)
|
||||
})
|
||||
t.Run("200 JSON without resp", func(t *testing.T) {
|
||||
body := bytes.Buffer{}
|
||||
r := &http.Response{
|
||||
Status: "200",
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(&body),
|
||||
Header: map[string][]string{"Content-Type": {api.JsonMediaType}},
|
||||
}
|
||||
require.NoError(t, decodeResp(r, nil))
|
||||
})
|
||||
t.Run("204 JSON", func(t *testing.T) {
|
||||
body := bytes.Buffer{}
|
||||
r := &http.Response{
|
||||
Status: "204",
|
||||
StatusCode: http.StatusNoContent,
|
||||
Body: io.NopCloser(&body),
|
||||
Header: map[string][]string{"Content-Type": {api.JsonMediaType}},
|
||||
}
|
||||
require.NoError(t, decodeResp(r, nil))
|
||||
})
|
||||
t.Run("500 JSON", func(t *testing.T) {
|
||||
body := bytes.Buffer{}
|
||||
b, err := json.Marshal(&httputil.DefaultJsonError{Code: http.StatusInternalServerError, Message: "error"})
|
||||
require.NoError(t, err)
|
||||
body.Write(b)
|
||||
r := &http.Response{
|
||||
Status: "500",
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
Body: io.NopCloser(&body),
|
||||
Header: map[string][]string{"Content-Type": {api.JsonMediaType}},
|
||||
}
|
||||
err = decodeResp(r, nil)
|
||||
errJson := &httputil.DefaultJsonError{}
|
||||
require.Equal(t, true, errors.As(err, &errJson))
|
||||
assert.Equal(t, http.StatusInternalServerError, errJson.Code)
|
||||
assert.Equal(t, "error", errJson.Message)
|
||||
})
|
||||
t.Run("200 JSON cannot decode", func(t *testing.T) {
|
||||
body := bytes.Buffer{}
|
||||
_, err := body.WriteString("foo")
|
||||
require.NoError(t, err)
|
||||
r := &http.Response{
|
||||
Status: "200",
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(&body),
|
||||
Header: map[string][]string{"Content-Type": {api.JsonMediaType}},
|
||||
Request: &http.Request{},
|
||||
}
|
||||
resp := &j{}
|
||||
err = decodeResp(r, resp)
|
||||
assert.ErrorContains(t, "failed to decode response body into json", err)
|
||||
})
|
||||
t.Run("500 JSON cannot decode", func(t *testing.T) {
|
||||
body := bytes.Buffer{}
|
||||
_, err := body.WriteString("foo")
|
||||
require.NoError(t, err)
|
||||
r := &http.Response{
|
||||
Status: "500",
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
Body: io.NopCloser(&body),
|
||||
Header: map[string][]string{"Content-Type": {api.JsonMediaType}},
|
||||
Request: &http.Request{},
|
||||
}
|
||||
err = decodeResp(r, nil)
|
||||
assert.ErrorContains(t, "failed to decode response body into error json", err)
|
||||
})
|
||||
t.Run("500 not JSON", func(t *testing.T) {
|
||||
body := bytes.Buffer{}
|
||||
_, err := body.WriteString("foo")
|
||||
require.NoError(t, err)
|
||||
r := &http.Response{
|
||||
Status: "500",
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
Body: io.NopCloser(&body),
|
||||
Header: map[string][]string{"Content-Type": {"text/plain"}},
|
||||
Request: &http.Request{},
|
||||
}
|
||||
err = decodeResp(r, nil)
|
||||
assert.ErrorContains(t, "HTTP request unsuccessful (500: foo)", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetStatusCode(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
const endpoint = "/eth/v1/node/health"
|
||||
@@ -401,10 +238,7 @@ func TestGetStatusCode(t *testing.T) {
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
jsonRestHandler := BeaconApiRestHandler{
|
||||
client: http.Client{Timeout: time.Second * 5},
|
||||
host: server.URL,
|
||||
}
|
||||
jsonRestHandler := rest.NewRestHandler(http.Client{Timeout: time.Second * 5}, server.URL)
|
||||
|
||||
statusCode, err := jsonRestHandler.GetStatusCode(ctx, endpoint)
|
||||
require.NoError(t, err)
|
||||
@@ -413,10 +247,7 @@ func TestGetStatusCode(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("returns error on connection failure", func(t *testing.T) {
|
||||
jsonRestHandler := BeaconApiRestHandler{
|
||||
client: http.Client{Timeout: time.Millisecond * 100},
|
||||
host: "http://localhost:99999", // Invalid port
|
||||
}
|
||||
jsonRestHandler := rest.NewRestHandler(http.Client{Timeout: time.Millisecond * 100}, "http://localhost:99999")
|
||||
|
||||
_, err := jsonRestHandler.GetStatusCode(ctx, endpoint)
|
||||
require.ErrorContains(t, "failed to perform request", err)
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/apiutil"
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/pkg/errors"
|
||||
@@ -21,7 +22,7 @@ type StateValidatorsProvider interface {
|
||||
}
|
||||
|
||||
type beaconApiStateValidatorsProvider struct {
|
||||
jsonRestHandler RestHandler
|
||||
jsonRestHandler rest.RestHandler
|
||||
}
|
||||
|
||||
func (c beaconApiStateValidatorsProvider) StateValidators(
|
||||
|
||||
@@ -9,19 +9,17 @@ import (
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
)
|
||||
|
||||
func NewChainClient(validatorConn validatorHelpers.NodeConnection, jsonRestHandler beaconApi.RestHandler) iface.ChainClient {
|
||||
grpcClient := grpcApi.NewGrpcChainClient(validatorConn.GetGrpcClientConn())
|
||||
func NewChainClient(validatorConn validatorHelpers.NodeConnection) iface.ChainClient {
|
||||
grpcClient := grpcApi.NewGrpcChainClient(validatorConn)
|
||||
if features.Get().EnableBeaconRESTApi {
|
||||
return beaconApi.NewBeaconApiChainClientWithFallback(jsonRestHandler, grpcClient)
|
||||
} else {
|
||||
return grpcClient
|
||||
return beaconApi.NewBeaconApiChainClientWithFallback(validatorConn.GetRestHandler(), grpcClient)
|
||||
}
|
||||
return grpcClient
|
||||
}
|
||||
|
||||
func NewPrysmChainClient(validatorConn validatorHelpers.NodeConnection, jsonRestHandler beaconApi.RestHandler) iface.PrysmChainClient {
|
||||
func NewPrysmChainClient(validatorConn validatorHelpers.NodeConnection) iface.PrysmChainClient {
|
||||
if features.Get().EnableBeaconRESTApi {
|
||||
return beaconApi.NewPrysmChainClient(jsonRestHandler, nodeClientFactory.NewNodeClient(validatorConn, jsonRestHandler))
|
||||
} else {
|
||||
return grpcApi.NewGrpcPrysmChainClient(validatorConn.GetGrpcClientConn())
|
||||
return beaconApi.NewPrysmChainClient(validatorConn.GetRestHandler(), nodeClientFactory.NewNodeClient(validatorConn))
|
||||
}
|
||||
return grpcApi.NewGrpcPrysmChainClient(validatorConn)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"grpc_beacon_chain_client.go",
|
||||
"grpc_client_manager.go",
|
||||
"grpc_node_client.go",
|
||||
"grpc_prysm_beacon_chain_client.go",
|
||||
"grpc_validator_client.go",
|
||||
@@ -25,6 +26,7 @@ go_library(
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//validator/client/iface:go_default_library",
|
||||
"//validator/helpers:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_golang_protobuf//ptypes/empty",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
@@ -39,6 +41,8 @@ go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
"grpc_client_manager_test.go",
|
||||
"grpc_node_client_test.go",
|
||||
"grpc_prysm_beacon_chain_client_test.go",
|
||||
"grpc_validator_client_test.go",
|
||||
],
|
||||
@@ -56,7 +60,11 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"//testing/validator-mock:go_default_library",
|
||||
"//validator/client/iface:go_default_library",
|
||||
"//validator/helpers:go_default_library",
|
||||
"//validator/testing:go_default_library",
|
||||
"@com_github_golang_protobuf//ptypes/empty",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
"@org_uber_go_mock//gomock:go_default_library",
|
||||
],
|
||||
|
||||
@@ -5,38 +5,42 @@ import (
|
||||
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/client/iface"
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type grpcChainClient struct {
|
||||
beaconChainClient ethpb.BeaconChainClient
|
||||
*grpcClientManager[ethpb.BeaconChainClient]
|
||||
}
|
||||
|
||||
func (c *grpcChainClient) ChainHead(ctx context.Context, in *empty.Empty) (*ethpb.ChainHead, error) {
|
||||
return c.beaconChainClient.GetChainHead(ctx, in)
|
||||
return c.getClient().GetChainHead(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcChainClient) ValidatorBalances(ctx context.Context, in *ethpb.ListValidatorBalancesRequest) (*ethpb.ValidatorBalances, error) {
|
||||
return c.beaconChainClient.ListValidatorBalances(ctx, in)
|
||||
return c.getClient().ListValidatorBalances(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcChainClient) Validators(ctx context.Context, in *ethpb.ListValidatorsRequest) (*ethpb.Validators, error) {
|
||||
return c.beaconChainClient.ListValidators(ctx, in)
|
||||
return c.getClient().ListValidators(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcChainClient) ValidatorQueue(ctx context.Context, in *empty.Empty) (*ethpb.ValidatorQueue, error) {
|
||||
return c.beaconChainClient.GetValidatorQueue(ctx, in)
|
||||
return c.getClient().GetValidatorQueue(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcChainClient) ValidatorPerformance(ctx context.Context, in *ethpb.ValidatorPerformanceRequest) (*ethpb.ValidatorPerformanceResponse, error) {
|
||||
return c.beaconChainClient.GetValidatorPerformance(ctx, in)
|
||||
return c.getClient().GetValidatorPerformance(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcChainClient) ValidatorParticipation(ctx context.Context, in *ethpb.GetValidatorParticipationRequest) (*ethpb.ValidatorParticipationResponse, error) {
|
||||
return c.beaconChainClient.GetValidatorParticipation(ctx, in)
|
||||
return c.getClient().GetValidatorParticipation(ctx, in)
|
||||
}
|
||||
|
||||
func NewGrpcChainClient(cc grpc.ClientConnInterface) iface.ChainClient {
|
||||
return &grpcChainClient{ethpb.NewBeaconChainClient(cc)}
|
||||
// NewGrpcChainClient creates a new gRPC chain client that supports
|
||||
// dynamic connection switching via the NodeConnection's GrpcConnectionProvider.
|
||||
func NewGrpcChainClient(conn validatorHelpers.NodeConnection) iface.ChainClient {
|
||||
return &grpcChainClient{
|
||||
grpcClientManager: newGrpcClientManager(conn, ethpb.NewBeaconChainClient),
|
||||
}
|
||||
}
|
||||
|
||||
44
validator/client/grpc-api/grpc_client_manager.go
Normal file
44
validator/client/grpc-api/grpc_client_manager.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package grpc_api
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// grpcClientManager handles dynamic gRPC client recreation when the connection changes.
|
||||
// It uses generics to work with any gRPC client type.
|
||||
type grpcClientManager[T any] struct {
|
||||
mu sync.Mutex
|
||||
conn validatorHelpers.NodeConnection
|
||||
client T
|
||||
lastHost string
|
||||
newClient func(grpc.ClientConnInterface) T
|
||||
}
|
||||
|
||||
// newGrpcClientManager creates a new client manager with the given connection and client constructor.
|
||||
func newGrpcClientManager[T any](
|
||||
conn validatorHelpers.NodeConnection,
|
||||
newClient func(grpc.ClientConnInterface) T,
|
||||
) *grpcClientManager[T] {
|
||||
return &grpcClientManager[T]{
|
||||
conn: conn,
|
||||
newClient: newClient,
|
||||
client: newClient(conn.GetGrpcClientConn()),
|
||||
lastHost: conn.GetGrpcConnectionProvider().CurrentHost(),
|
||||
}
|
||||
}
|
||||
|
||||
// getClient returns the current client, recreating it if the connection has changed.
|
||||
func (m *grpcClientManager[T]) getClient() T {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
currentHost := m.conn.GetGrpcConnectionProvider().CurrentHost()
|
||||
if m.lastHost != currentHost {
|
||||
m.client = m.newClient(m.conn.GetGrpcClientConn())
|
||||
m.lastHost = currentHost
|
||||
}
|
||||
return m.client
|
||||
}
|
||||
168
validator/client/grpc-api/grpc_client_manager_test.go
Normal file
168
validator/client/grpc-api/grpc_client_manager_test.go
Normal file
@@ -0,0 +1,168 @@
|
||||
package grpc_api
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// mockProvider implements grpcutil.GrpcConnectionProvider for testing.
|
||||
type mockProvider struct {
|
||||
hosts []string
|
||||
currentIndex int
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (m *mockProvider) CurrentConn() *grpc.ClientConn { return nil }
|
||||
func (m *mockProvider) Hosts() []string { return m.hosts }
|
||||
func (m *mockProvider) Close() {}
|
||||
|
||||
func (m *mockProvider) CurrentHost() string {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
return m.hosts[m.currentIndex]
|
||||
}
|
||||
|
||||
func (m *mockProvider) SwitchHost(index int) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.currentIndex = index
|
||||
return nil
|
||||
}
|
||||
|
||||
// nextHost is a test helper for round-robin simulation (not part of the interface).
|
||||
func (m *mockProvider) nextHost() {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.currentIndex = (m.currentIndex + 1) % len(m.hosts)
|
||||
}
|
||||
|
||||
// testClient is a simple type for testing the generic client manager.
|
||||
type testClient struct{ id int }
|
||||
|
||||
// testManager creates a manager with client creation counting.
|
||||
func testManager(t *testing.T, provider *mockProvider) (*grpcClientManager[*testClient], *int) {
|
||||
conn, err := validatorHelpers.NewNodeConnection(validatorHelpers.WithGRPCProvider(provider))
|
||||
require.NoError(t, err)
|
||||
|
||||
clientCount := new(int)
|
||||
newClient := func(grpc.ClientConnInterface) *testClient {
|
||||
*clientCount++
|
||||
return &testClient{id: *clientCount}
|
||||
}
|
||||
|
||||
manager := newGrpcClientManager(conn, newClient)
|
||||
require.NotNil(t, manager)
|
||||
return manager, clientCount
|
||||
}
|
||||
|
||||
func TestGrpcClientManager(t *testing.T) {
|
||||
t.Run("tracks host", func(t *testing.T) {
|
||||
provider := &mockProvider{hosts: []string{"host1:4000", "host2:4000"}}
|
||||
manager, count := testManager(t, provider)
|
||||
assert.Equal(t, 1, *count)
|
||||
assert.Equal(t, "host1:4000", manager.lastHost)
|
||||
})
|
||||
|
||||
t.Run("same host returns same client", func(t *testing.T) {
|
||||
provider := &mockProvider{hosts: []string{"host1:4000", "host2:4000"}}
|
||||
manager, count := testManager(t, provider)
|
||||
|
||||
c1, c2, c3 := manager.getClient(), manager.getClient(), manager.getClient()
|
||||
assert.Equal(t, 1, *count)
|
||||
assert.Equal(t, c1, c2)
|
||||
assert.Equal(t, c2, c3)
|
||||
})
|
||||
|
||||
t.Run("host change recreates client", func(t *testing.T) {
|
||||
provider := &mockProvider{hosts: []string{"host1:4000", "host2:4000"}}
|
||||
manager, count := testManager(t, provider)
|
||||
|
||||
c1 := manager.getClient()
|
||||
assert.Equal(t, 1, c1.id)
|
||||
|
||||
provider.nextHost()
|
||||
c2 := manager.getClient()
|
||||
assert.Equal(t, 2, *count)
|
||||
assert.Equal(t, 2, c2.id)
|
||||
|
||||
// Same host again - no recreation
|
||||
c3 := manager.getClient()
|
||||
assert.Equal(t, 2, *count)
|
||||
assert.Equal(t, c2, c3)
|
||||
})
|
||||
|
||||
t.Run("multiple host switches", func(t *testing.T) {
|
||||
provider := &mockProvider{hosts: []string{"host1:4000", "host2:4000", "host3:4000"}}
|
||||
manager, count := testManager(t, provider)
|
||||
assert.Equal(t, 1, *count)
|
||||
|
||||
for expected := 2; expected <= 4; expected++ {
|
||||
provider.nextHost()
|
||||
_ = manager.getClient()
|
||||
assert.Equal(t, expected, *count)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestGrpcClientManager_Concurrent(t *testing.T) {
|
||||
t.Run("concurrent access same host", func(t *testing.T) {
|
||||
provider := &mockProvider{hosts: []string{"host1:4000", "host2:4000"}}
|
||||
manager, _ := testManager(t, provider)
|
||||
|
||||
var clientCount int
|
||||
var countMu sync.Mutex
|
||||
// Override with thread-safe counter
|
||||
manager.newClient = func(grpc.ClientConnInterface) *testClient {
|
||||
countMu.Lock()
|
||||
clientCount++
|
||||
id := clientCount
|
||||
countMu.Unlock()
|
||||
return &testClient{id: id}
|
||||
}
|
||||
manager.client = manager.newClient(nil)
|
||||
clientCount = 1
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for range 100 {
|
||||
wg.Go(func() { _ = manager.getClient() })
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
countMu.Lock()
|
||||
assert.Equal(t, 1, clientCount)
|
||||
countMu.Unlock()
|
||||
})
|
||||
|
||||
t.Run("concurrent with host changes", func(t *testing.T) {
|
||||
provider := &mockProvider{hosts: []string{"host1:4000", "host2:4000"}}
|
||||
manager, _ := testManager(t, provider)
|
||||
|
||||
var clientCount int
|
||||
var countMu sync.Mutex
|
||||
manager.newClient = func(grpc.ClientConnInterface) *testClient {
|
||||
countMu.Lock()
|
||||
clientCount++
|
||||
id := clientCount
|
||||
countMu.Unlock()
|
||||
return &testClient{id: id}
|
||||
}
|
||||
manager.client = manager.newClient(nil)
|
||||
clientCount = 1
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for range 50 {
|
||||
wg.Go(func() { _ = manager.getClient() })
|
||||
wg.Go(func() { provider.nextHost() })
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
countMu.Lock()
|
||||
assert.NotEqual(t, 0, clientCount, "Should have created at least one client")
|
||||
countMu.Unlock()
|
||||
})
|
||||
}
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/client/iface"
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -14,35 +14,40 @@ var (
|
||||
)
|
||||
|
||||
type grpcNodeClient struct {
|
||||
nodeClient ethpb.NodeClient
|
||||
*grpcClientManager[ethpb.NodeClient]
|
||||
}
|
||||
|
||||
func (c *grpcNodeClient) SyncStatus(ctx context.Context, in *empty.Empty) (*ethpb.SyncStatus, error) {
|
||||
return c.nodeClient.GetSyncStatus(ctx, in)
|
||||
return c.getClient().GetSyncStatus(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcNodeClient) Genesis(ctx context.Context, in *empty.Empty) (*ethpb.Genesis, error) {
|
||||
return c.nodeClient.GetGenesis(ctx, in)
|
||||
return c.getClient().GetGenesis(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcNodeClient) Version(ctx context.Context, in *empty.Empty) (*ethpb.Version, error) {
|
||||
return c.nodeClient.GetVersion(ctx, in)
|
||||
return c.getClient().GetVersion(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcNodeClient) Peers(ctx context.Context, in *empty.Empty) (*ethpb.Peers, error) {
|
||||
return c.nodeClient.ListPeers(ctx, in)
|
||||
return c.getClient().ListPeers(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcNodeClient) IsReady(ctx context.Context) bool {
|
||||
_, err := c.nodeClient.GetHealth(ctx, ðpb.HealthRequest{})
|
||||
// GetHealth returns 200 OK only if node is synced and not optimistic.
|
||||
// otherwise it will throw an error
|
||||
_, err := c.getClient().GetHealth(ctx, ðpb.HealthRequest{})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get health of node")
|
||||
log.WithError(err).Debug("Node is not ready")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func NewNodeClient(cc grpc.ClientConnInterface) iface.NodeClient {
|
||||
g := &grpcNodeClient{nodeClient: ethpb.NewNodeClient(cc)}
|
||||
return g
|
||||
// NewNodeClient creates a new gRPC node client that supports
|
||||
// dynamic connection switching via the NodeConnection's GrpcConnectionProvider.
|
||||
func NewNodeClient(conn validatorHelpers.NodeConnection) iface.NodeClient {
|
||||
return &grpcNodeClient{
|
||||
grpcClientManager: newGrpcClientManager(conn, ethpb.NewNodeClient),
|
||||
}
|
||||
}
|
||||
|
||||
72
validator/client/grpc-api/grpc_node_client_test.go
Normal file
72
validator/client/grpc-api/grpc_node_client_test.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package grpc_api
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/mock"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"go.uber.org/mock/gomock"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestGrpcNodeClient_IsReady(t *testing.T) {
|
||||
// The IsReady function now relies on GetHealth which returns:
|
||||
// - 200 OK (nil error) only if node is synced AND not optimistic
|
||||
// - 206 Partial Content (error) if syncing or optimistic
|
||||
// - 503 Unavailable (error) if unavailable
|
||||
testCases := []struct {
|
||||
name string
|
||||
healthErr error
|
||||
expectedResult bool
|
||||
}{
|
||||
{
|
||||
name: "returns true when health check succeeds (synced and not optimistic)",
|
||||
healthErr: nil,
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
name: "returns false when health check fails (syncing, optimistic, or unavailable)",
|
||||
healthErr: errors.New("node not ready"),
|
||||
expectedResult: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
ctx := t.Context()
|
||||
|
||||
mockNodeClient := mock.NewMockNodeClient(ctrl)
|
||||
|
||||
// Set up health check expectation
|
||||
mockNodeClient.EXPECT().GetHealth(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).Return(&empty.Empty{}, tc.healthErr)
|
||||
|
||||
// Create a mock provider
|
||||
provider := &mockProvider{hosts: []string{"host1:4000"}}
|
||||
conn, err := validatorHelpers.NewNodeConnection(validatorHelpers.WithGRPCProvider(provider))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create client with injected mock
|
||||
client := &grpcNodeClient{
|
||||
grpcClientManager: &grpcClientManager[ethpb.NodeClient]{
|
||||
conn: conn,
|
||||
client: mockNodeClient,
|
||||
lastHost: "host1:4000",
|
||||
newClient: func(grpc.ClientConnInterface) ethpb.NodeClient { return mockNodeClient },
|
||||
},
|
||||
}
|
||||
|
||||
result := client.IsReady(ctx)
|
||||
assert.Equal(t, tc.expectedResult, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -12,9 +12,9 @@ import (
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/eth/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/client/iface"
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type grpcPrysmChainClient struct {
|
||||
@@ -95,6 +95,8 @@ func (c *grpcPrysmChainClient) ValidatorPerformance(ctx context.Context, in *eth
|
||||
return c.chainClient.ValidatorPerformance(ctx, in)
|
||||
}
|
||||
|
||||
func NewGrpcPrysmChainClient(cc grpc.ClientConnInterface) iface.PrysmChainClient {
|
||||
return &grpcPrysmChainClient{chainClient: &grpcChainClient{ethpb.NewBeaconChainClient(cc)}}
|
||||
// NewGrpcPrysmChainClient creates a new gRPC Prysm chain client that supports
|
||||
// dynamic connection switching via the NodeConnection's GrpcConnectionProvider.
|
||||
func NewGrpcPrysmChainClient(conn validatorHelpers.NodeConnection) iface.PrysmChainClient {
|
||||
return &grpcPrysmChainClient{chainClient: NewGrpcChainClient(conn)}
|
||||
}
|
||||
|
||||
@@ -14,24 +14,24 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/client/iface"
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type grpcValidatorClient struct {
|
||||
beaconNodeValidatorClient ethpb.BeaconNodeValidatorClient
|
||||
isEventStreamRunning bool
|
||||
*grpcClientManager[ethpb.BeaconNodeValidatorClient]
|
||||
isEventStreamRunning bool
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) Duties(ctx context.Context, in *ethpb.DutiesRequest) (*ethpb.ValidatorDutiesContainer, error) {
|
||||
if features.Get().DisableDutiesV2 {
|
||||
return c.getDuties(ctx, in)
|
||||
}
|
||||
dutiesResponse, err := c.beaconNodeValidatorClient.GetDutiesV2(ctx, in)
|
||||
dutiesResponse, err := c.getClient().GetDutiesV2(ctx, in)
|
||||
if err != nil {
|
||||
if status.Code(err) == codes.Unimplemented {
|
||||
log.Warn("GetDutiesV2 returned status code unavailable, falling back to GetDuties")
|
||||
@@ -47,7 +47,7 @@ func (c *grpcValidatorClient) Duties(ctx context.Context, in *ethpb.DutiesReques
|
||||
|
||||
// getDuties is calling the v1 of get duties
|
||||
func (c *grpcValidatorClient) getDuties(ctx context.Context, in *ethpb.DutiesRequest) (*ethpb.ValidatorDutiesContainer, error) {
|
||||
dutiesResponse, err := c.beaconNodeValidatorClient.GetDuties(ctx, in)
|
||||
dutiesResponse, err := c.getClient().GetDuties(ctx, in)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(
|
||||
client.ErrConnectionIssue,
|
||||
@@ -147,108 +147,108 @@ func toValidatorDutyV2(duty *ethpb.DutiesV2Response_Duty) (*ethpb.ValidatorDuty,
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) CheckDoppelGanger(ctx context.Context, in *ethpb.DoppelGangerRequest) (*ethpb.DoppelGangerResponse, error) {
|
||||
return c.beaconNodeValidatorClient.CheckDoppelGanger(ctx, in)
|
||||
return c.getClient().CheckDoppelGanger(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) DomainData(ctx context.Context, in *ethpb.DomainRequest) (*ethpb.DomainResponse, error) {
|
||||
return c.beaconNodeValidatorClient.DomainData(ctx, in)
|
||||
return c.getClient().DomainData(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) AttestationData(ctx context.Context, in *ethpb.AttestationDataRequest) (*ethpb.AttestationData, error) {
|
||||
return c.beaconNodeValidatorClient.GetAttestationData(ctx, in)
|
||||
return c.getClient().GetAttestationData(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) BeaconBlock(ctx context.Context, in *ethpb.BlockRequest) (*ethpb.GenericBeaconBlock, error) {
|
||||
return c.beaconNodeValidatorClient.GetBeaconBlock(ctx, in)
|
||||
return c.getClient().GetBeaconBlock(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) FeeRecipientByPubKey(ctx context.Context, in *ethpb.FeeRecipientByPubKeyRequest) (*ethpb.FeeRecipientByPubKeyResponse, error) {
|
||||
return c.beaconNodeValidatorClient.GetFeeRecipientByPubKey(ctx, in)
|
||||
return c.getClient().GetFeeRecipientByPubKey(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SyncCommitteeContribution(ctx context.Context, in *ethpb.SyncCommitteeContributionRequest) (*ethpb.SyncCommitteeContribution, error) {
|
||||
return c.beaconNodeValidatorClient.GetSyncCommitteeContribution(ctx, in)
|
||||
return c.getClient().GetSyncCommitteeContribution(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SyncMessageBlockRoot(ctx context.Context, in *empty.Empty) (*ethpb.SyncMessageBlockRootResponse, error) {
|
||||
return c.beaconNodeValidatorClient.GetSyncMessageBlockRoot(ctx, in)
|
||||
return c.getClient().GetSyncMessageBlockRoot(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SyncSubcommitteeIndex(ctx context.Context, in *ethpb.SyncSubcommitteeIndexRequest) (*ethpb.SyncSubcommitteeIndexResponse, error) {
|
||||
return c.beaconNodeValidatorClient.GetSyncSubcommitteeIndex(ctx, in)
|
||||
return c.getClient().GetSyncSubcommitteeIndex(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) MultipleValidatorStatus(ctx context.Context, in *ethpb.MultipleValidatorStatusRequest) (*ethpb.MultipleValidatorStatusResponse, error) {
|
||||
return c.beaconNodeValidatorClient.MultipleValidatorStatus(ctx, in)
|
||||
return c.getClient().MultipleValidatorStatus(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) PrepareBeaconProposer(ctx context.Context, in *ethpb.PrepareBeaconProposerRequest) (*empty.Empty, error) {
|
||||
return c.beaconNodeValidatorClient.PrepareBeaconProposer(ctx, in)
|
||||
return c.getClient().PrepareBeaconProposer(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) ProposeAttestation(ctx context.Context, in *ethpb.Attestation) (*ethpb.AttestResponse, error) {
|
||||
return c.beaconNodeValidatorClient.ProposeAttestation(ctx, in)
|
||||
return c.getClient().ProposeAttestation(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) ProposeAttestationElectra(ctx context.Context, in *ethpb.SingleAttestation) (*ethpb.AttestResponse, error) {
|
||||
return c.beaconNodeValidatorClient.ProposeAttestationElectra(ctx, in)
|
||||
return c.getClient().ProposeAttestationElectra(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) ProposeBeaconBlock(ctx context.Context, in *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
return c.beaconNodeValidatorClient.ProposeBeaconBlock(ctx, in)
|
||||
return c.getClient().ProposeBeaconBlock(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) ProposeExit(ctx context.Context, in *ethpb.SignedVoluntaryExit) (*ethpb.ProposeExitResponse, error) {
|
||||
return c.beaconNodeValidatorClient.ProposeExit(ctx, in)
|
||||
return c.getClient().ProposeExit(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) StreamBlocksAltair(ctx context.Context, in *ethpb.StreamBlocksRequest) (ethpb.BeaconNodeValidator_StreamBlocksAltairClient, error) {
|
||||
return c.beaconNodeValidatorClient.StreamBlocksAltair(ctx, in)
|
||||
return c.getClient().StreamBlocksAltair(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubmitAggregateSelectionProof(ctx context.Context, in *ethpb.AggregateSelectionRequest, _ primitives.ValidatorIndex, _ uint64) (*ethpb.AggregateSelectionResponse, error) {
|
||||
return c.beaconNodeValidatorClient.SubmitAggregateSelectionProof(ctx, in)
|
||||
return c.getClient().SubmitAggregateSelectionProof(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubmitAggregateSelectionProofElectra(ctx context.Context, in *ethpb.AggregateSelectionRequest, _ primitives.ValidatorIndex, _ uint64) (*ethpb.AggregateSelectionElectraResponse, error) {
|
||||
return c.beaconNodeValidatorClient.SubmitAggregateSelectionProofElectra(ctx, in)
|
||||
return c.getClient().SubmitAggregateSelectionProofElectra(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubmitSignedAggregateSelectionProof(ctx context.Context, in *ethpb.SignedAggregateSubmitRequest) (*ethpb.SignedAggregateSubmitResponse, error) {
|
||||
return c.beaconNodeValidatorClient.SubmitSignedAggregateSelectionProof(ctx, in)
|
||||
return c.getClient().SubmitSignedAggregateSelectionProof(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubmitSignedAggregateSelectionProofElectra(ctx context.Context, in *ethpb.SignedAggregateSubmitElectraRequest) (*ethpb.SignedAggregateSubmitResponse, error) {
|
||||
return c.beaconNodeValidatorClient.SubmitSignedAggregateSelectionProofElectra(ctx, in)
|
||||
return c.getClient().SubmitSignedAggregateSelectionProofElectra(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubmitSignedContributionAndProof(ctx context.Context, in *ethpb.SignedContributionAndProof) (*empty.Empty, error) {
|
||||
return c.beaconNodeValidatorClient.SubmitSignedContributionAndProof(ctx, in)
|
||||
return c.getClient().SubmitSignedContributionAndProof(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubmitSyncMessage(ctx context.Context, in *ethpb.SyncCommitteeMessage) (*empty.Empty, error) {
|
||||
return c.beaconNodeValidatorClient.SubmitSyncMessage(ctx, in)
|
||||
return c.getClient().SubmitSyncMessage(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubmitValidatorRegistrations(ctx context.Context, in *ethpb.SignedValidatorRegistrationsV1) (*empty.Empty, error) {
|
||||
return c.beaconNodeValidatorClient.SubmitValidatorRegistrations(ctx, in)
|
||||
return c.getClient().SubmitValidatorRegistrations(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, _ []*ethpb.ValidatorDuty) (*empty.Empty, error) {
|
||||
return c.beaconNodeValidatorClient.SubscribeCommitteeSubnets(ctx, in)
|
||||
return c.getClient().SubscribeCommitteeSubnets(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) ValidatorIndex(ctx context.Context, in *ethpb.ValidatorIndexRequest) (*ethpb.ValidatorIndexResponse, error) {
|
||||
return c.beaconNodeValidatorClient.ValidatorIndex(ctx, in)
|
||||
return c.getClient().ValidatorIndex(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) ValidatorStatus(ctx context.Context, in *ethpb.ValidatorStatusRequest) (*ethpb.ValidatorStatusResponse, error) {
|
||||
return c.beaconNodeValidatorClient.ValidatorStatus(ctx, in)
|
||||
return c.getClient().ValidatorStatus(ctx, in)
|
||||
}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func (c *grpcValidatorClient) WaitForChainStart(ctx context.Context, in *empty.Empty) (*ethpb.ChainStartResponse, error) {
|
||||
stream, err := c.beaconNodeValidatorClient.WaitForChainStart(ctx, in)
|
||||
stream, err := c.getClient().WaitForChainStart(ctx, in)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(
|
||||
client.ErrConnectionIssue,
|
||||
@@ -260,13 +260,13 @@ func (c *grpcValidatorClient) WaitForChainStart(ctx context.Context, in *empty.E
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) AssignValidatorToSubnet(ctx context.Context, in *ethpb.AssignValidatorToSubnetRequest) (*empty.Empty, error) {
|
||||
return c.beaconNodeValidatorClient.AssignValidatorToSubnet(ctx, in)
|
||||
return c.getClient().AssignValidatorToSubnet(ctx, in)
|
||||
}
|
||||
func (c *grpcValidatorClient) AggregatedSigAndAggregationBits(
|
||||
ctx context.Context,
|
||||
in *ethpb.AggregatedSigAndAggregationBitsRequest,
|
||||
) (*ethpb.AggregatedSigAndAggregationBitsResponse, error) {
|
||||
return c.beaconNodeValidatorClient.AggregatedSigAndAggregationBits(ctx, in)
|
||||
return c.getClient().AggregatedSigAndAggregationBits(ctx, in)
|
||||
}
|
||||
|
||||
func (*grpcValidatorClient) AggregatedSelections(context.Context, []iface.BeaconCommitteeSelection) ([]iface.BeaconCommitteeSelection, error) {
|
||||
@@ -277,8 +277,12 @@ func (*grpcValidatorClient) AggregatedSyncSelections(context.Context, []iface.Sy
|
||||
return nil, iface.ErrNotSupported
|
||||
}
|
||||
|
||||
func NewGrpcValidatorClient(cc grpc.ClientConnInterface) iface.ValidatorClient {
|
||||
return &grpcValidatorClient{ethpb.NewBeaconNodeValidatorClient(cc), false}
|
||||
// NewGrpcValidatorClient creates a new gRPC validator client that supports
|
||||
// dynamic connection switching via the NodeConnection's GrpcConnectionProvider.
|
||||
func NewGrpcValidatorClient(conn validatorHelpers.NodeConnection) iface.ValidatorClient {
|
||||
return &grpcValidatorClient{
|
||||
grpcClientManager: newGrpcClientManager(conn, ethpb.NewBeaconNodeValidatorClient),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) StartEventStream(ctx context.Context, topics []string, eventsChannel chan<- *eventClient.Event) {
|
||||
@@ -308,7 +312,7 @@ func (c *grpcValidatorClient) StartEventStream(ctx context.Context, topics []str
|
||||
log.Warn("gRPC only supports the head topic, other topics will be ignored")
|
||||
}
|
||||
|
||||
stream, err := c.beaconNodeValidatorClient.StreamSlots(ctx, ðpb.StreamSlotsRequest{VerifiedOnly: true})
|
||||
stream, err := c.getClient().StreamSlots(ctx, ðpb.StreamSlotsRequest{VerifiedOnly: true})
|
||||
if err != nil {
|
||||
eventsChannel <- &eventClient.Event{
|
||||
EventType: eventClient.EventConnectionError,
|
||||
@@ -374,11 +378,20 @@ func (c *grpcValidatorClient) EventStreamIsRunning() bool {
|
||||
return c.isEventStreamRunning
|
||||
}
|
||||
|
||||
func (*grpcValidatorClient) Host() string {
|
||||
log.Warn(iface.ErrNotSupported)
|
||||
return ""
|
||||
func (c *grpcValidatorClient) Host() string {
|
||||
return c.grpcClientManager.conn.GetGrpcConnectionProvider().CurrentHost()
|
||||
}
|
||||
|
||||
func (*grpcValidatorClient) SetHost(_ string) {
|
||||
log.Warn(iface.ErrNotSupported)
|
||||
func (c *grpcValidatorClient) SwitchHost(host string) {
|
||||
provider := c.grpcClientManager.conn.GetGrpcConnectionProvider()
|
||||
// Find the index of the requested host and switch to it
|
||||
for i, h := range provider.Hosts() {
|
||||
if h == host {
|
||||
if err := provider.SwitchHost(i); err != nil {
|
||||
log.WithError(err).WithField("host", host).Error("Failed to set gRPC host")
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
log.WithField("host", host).Warn("Requested gRPC host not found in configured endpoints")
|
||||
}
|
||||
|
||||
@@ -14,8 +14,10 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
mock2 "github.com/OffchainLabs/prysm/v7/testing/mock"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
validatorTesting "github.com/OffchainLabs/prysm/v7/validator/testing"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"go.uber.org/mock/gomock"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
@@ -133,7 +135,15 @@ func TestWaitForChainStart_StreamSetupFails(t *testing.T) {
|
||||
gomock.Any(),
|
||||
).Return(nil, errors.New("failed stream"))
|
||||
|
||||
validatorClient := &grpcValidatorClient{beaconNodeValidatorClient, true}
|
||||
validatorClient := &grpcValidatorClient{
|
||||
grpcClientManager: newGrpcClientManager(
|
||||
validatorTesting.MockNodeConnection(),
|
||||
func(_ grpc.ClientConnInterface) eth.BeaconNodeValidatorClient {
|
||||
return beaconNodeValidatorClient
|
||||
},
|
||||
),
|
||||
isEventStreamRunning: true,
|
||||
}
|
||||
_, err := validatorClient.WaitForChainStart(t.Context(), &emptypb.Empty{})
|
||||
want := "could not setup beacon chain ChainStart streaming client"
|
||||
assert.ErrorContains(t, want, err)
|
||||
@@ -146,7 +156,15 @@ func TestStartEventStream(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
beaconNodeValidatorClient := mock2.NewMockBeaconNodeValidatorClient(ctrl)
|
||||
grpcClient := &grpcValidatorClient{beaconNodeValidatorClient, true}
|
||||
grpcClient := &grpcValidatorClient{
|
||||
grpcClientManager: newGrpcClientManager(
|
||||
validatorTesting.MockNodeConnection(),
|
||||
func(_ grpc.ClientConnInterface) eth.BeaconNodeValidatorClient {
|
||||
return beaconNodeValidatorClient
|
||||
},
|
||||
),
|
||||
isEventStreamRunning: true,
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
topics []string
|
||||
|
||||
@@ -152,5 +152,5 @@ type ValidatorClient interface {
|
||||
AggregatedSelections(ctx context.Context, selections []BeaconCommitteeSelection) ([]BeaconCommitteeSelection, error)
|
||||
AggregatedSyncSelections(ctx context.Context, selections []SyncCommitteeSelection) ([]SyncCommitteeSelection, error)
|
||||
Host() string
|
||||
SetHost(host string)
|
||||
SwitchHost(host string)
|
||||
}
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// Modification of a default grpc passthrough resolver (google.golang.org/grpc/resolver/passthrough) allowing to use multiple addresses
|
||||
// in grpc endpoint. Example:
|
||||
// conn, err := grpc.DialContext(ctx, "127.0.0.1:4000,127.0.0.1:4001", grpc.WithInsecure(), grpc.WithResolvers(&multipleEndpointsGrpcResolverBuilder{}))
|
||||
// It can be used with any grpc load balancer (pick_first, round_robin). Default is pick_first.
|
||||
// Round robin can be used by adding the following option:
|
||||
// grpc.WithDefaultServiceConfig("{\"loadBalancingConfig\":[{\"round_robin\":{}}]}")
|
||||
type multipleEndpointsGrpcResolverBuilder struct{}
|
||||
|
||||
// Build creates and starts multiple endpoints resolver.
|
||||
func (*multipleEndpointsGrpcResolverBuilder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) {
|
||||
r := &multipleEndpointsGrpcResolver{
|
||||
target: target,
|
||||
cc: cc,
|
||||
}
|
||||
r.start()
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Scheme returns default scheme.
|
||||
func (*multipleEndpointsGrpcResolverBuilder) Scheme() string {
|
||||
return resolver.GetDefaultScheme()
|
||||
}
|
||||
|
||||
type multipleEndpointsGrpcResolver struct {
|
||||
target resolver.Target
|
||||
cc resolver.ClientConn
|
||||
}
|
||||
|
||||
func (r *multipleEndpointsGrpcResolver) start() {
|
||||
ep := r.target.Endpoint()
|
||||
endpoints := strings.Split(ep, ",")
|
||||
var addrs []resolver.Address
|
||||
for _, endpoint := range endpoints {
|
||||
addrs = append(addrs, resolver.Address{Addr: endpoint, ServerName: endpoint})
|
||||
}
|
||||
if err := r.cc.UpdateState(resolver.State{Addresses: addrs}); err != nil {
|
||||
log.WithError(err).Error("Failed to update grpc connection state")
|
||||
}
|
||||
}
|
||||
|
||||
// ResolveNow --
|
||||
func (*multipleEndpointsGrpcResolver) ResolveNow(_ resolver.ResolveNowOptions) {}
|
||||
|
||||
// Close --
|
||||
func (*multipleEndpointsGrpcResolver) Close() {}
|
||||
@@ -8,11 +8,10 @@ import (
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
)
|
||||
|
||||
func NewNodeClient(validatorConn validatorHelpers.NodeConnection, jsonRestHandler beaconApi.RestHandler) iface.NodeClient {
|
||||
grpcClient := grpcApi.NewNodeClient(validatorConn.GetGrpcClientConn())
|
||||
func NewNodeClient(validatorConn validatorHelpers.NodeConnection) iface.NodeClient {
|
||||
grpcClient := grpcApi.NewNodeClient(validatorConn)
|
||||
if features.Get().EnableBeaconRESTApi {
|
||||
return beaconApi.NewNodeClientWithFallback(jsonRestHandler, grpcClient)
|
||||
} else {
|
||||
return grpcClient
|
||||
return beaconApi.NewNodeClientWithFallback(validatorConn.GetRestHandler(), grpcClient)
|
||||
}
|
||||
return grpcClient
|
||||
}
|
||||
|
||||
@@ -2,13 +2,11 @@ package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
api "github.com/OffchainLabs/prysm/v7/api/client"
|
||||
eventClient "github.com/OffchainLabs/prysm/v7/api/client/event"
|
||||
grpcutil "github.com/OffchainLabs/prysm/v7/api/grpc"
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/OffchainLabs/prysm/v7/async/event"
|
||||
lruwrpr "github.com/OffchainLabs/prysm/v7/cache/lru"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -17,7 +15,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/accounts/wallet"
|
||||
beaconApi "github.com/OffchainLabs/prysm/v7/validator/client/beacon-api"
|
||||
beaconChainClientFactory "github.com/OffchainLabs/prysm/v7/validator/client/beacon-chain-client-factory"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/client/iface"
|
||||
nodeclientfactory "github.com/OffchainLabs/prysm/v7/validator/client/node-client-factory"
|
||||
@@ -35,7 +32,6 @@ import (
|
||||
grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"github.com/pkg/errors"
|
||||
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -72,6 +68,7 @@ type Config struct {
|
||||
DB db.Database
|
||||
Wallet *wallet.Wallet
|
||||
WalletInitializedFeed *event.Feed
|
||||
Conn validatorHelpers.NodeConnection // Optional: pre-built connection (if nil, built from endpoint configs)
|
||||
MaxHealthChecks int
|
||||
GRPCMaxCallRecvMsgSize int
|
||||
GRPCRetries uint
|
||||
@@ -122,6 +119,12 @@ func NewValidatorService(ctx context.Context, cfg *Config) (*ValidatorService, e
|
||||
maxHealthChecks: cfg.MaxHealthChecks,
|
||||
}
|
||||
|
||||
// Use pre-built connection if provided
|
||||
if cfg.Conn != nil {
|
||||
s.conn = cfg.Conn
|
||||
return s, nil
|
||||
}
|
||||
|
||||
dialOpts := ConstructDialOptions(
|
||||
cfg.GRPCMaxCallRecvMsgSize,
|
||||
cfg.BeaconNodeCert,
|
||||
@@ -134,19 +137,21 @@ func NewValidatorService(ctx context.Context, cfg *Config) (*ValidatorService, e
|
||||
|
||||
s.ctx = grpcutil.AppendHeaders(ctx, cfg.GRPCHeaders)
|
||||
|
||||
grpcConn, err := grpc.DialContext(ctx, cfg.BeaconNodeGRPCEndpoint, dialOpts...)
|
||||
conn, err := validatorHelpers.NewNodeConnection(
|
||||
validatorHelpers.WithGRPC(s.ctx, cfg.BeaconNodeGRPCEndpoint, dialOpts),
|
||||
validatorHelpers.WithREST(cfg.BeaconApiEndpoint,
|
||||
rest.WithHttpHeaders(cfg.BeaconApiHeaders),
|
||||
rest.WithHttpTimeout(cfg.BeaconApiTimeout),
|
||||
rest.WithTracing(),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
if cfg.BeaconNodeCert != "" {
|
||||
if cfg.BeaconNodeCert != "" && cfg.BeaconNodeGRPCEndpoint != "" {
|
||||
log.Info("Established secure gRPC connection")
|
||||
}
|
||||
s.conn = validatorHelpers.NewNodeConnection(
|
||||
grpcConn,
|
||||
cfg.BeaconApiEndpoint,
|
||||
validatorHelpers.WithBeaconApiHeaders(cfg.BeaconApiHeaders),
|
||||
validatorHelpers.WithBeaconApiTimeout(cfg.BeaconApiTimeout),
|
||||
)
|
||||
s.conn = conn
|
||||
|
||||
return s, nil
|
||||
}
|
||||
@@ -181,20 +186,13 @@ func (v *ValidatorService) Start() {
|
||||
return
|
||||
}
|
||||
|
||||
u := strings.ReplaceAll(v.conn.GetBeaconApiUrl(), " ", "")
|
||||
hosts := strings.Split(u, ",")
|
||||
if len(hosts) == 0 {
|
||||
log.WithError(err).Error("No API hosts provided")
|
||||
restProvider := v.conn.GetRestConnectionProvider()
|
||||
if restProvider == nil || len(restProvider.Hosts()) == 0 {
|
||||
log.Error("No REST API hosts provided")
|
||||
return
|
||||
}
|
||||
|
||||
headersTransport := api.NewCustomHeadersTransport(http.DefaultTransport, v.conn.GetBeaconApiHeaders())
|
||||
restHandler := beaconApi.NewBeaconApiRestHandler(
|
||||
http.Client{Timeout: v.conn.GetBeaconApiTimeout(), Transport: otelhttp.NewTransport(headersTransport)},
|
||||
hosts[0],
|
||||
)
|
||||
|
||||
validatorClient := validatorclientfactory.NewValidatorClient(v.conn, restHandler)
|
||||
validatorClient := validatorclientfactory.NewValidatorClient(v.conn)
|
||||
|
||||
v.validator = &validator{
|
||||
slotFeed: new(event.Feed),
|
||||
@@ -208,12 +206,12 @@ func (v *ValidatorService) Start() {
|
||||
graffiti: v.graffiti,
|
||||
graffitiStruct: v.graffitiStruct,
|
||||
graffitiOrderedIndex: graffitiOrderedIndex,
|
||||
beaconNodeHosts: hosts,
|
||||
conn: v.conn,
|
||||
currentHostIndex: 0,
|
||||
validatorClient: validatorClient,
|
||||
chainClient: beaconChainClientFactory.NewChainClient(v.conn, restHandler),
|
||||
nodeClient: nodeclientfactory.NewNodeClient(v.conn, restHandler),
|
||||
prysmChainClient: beaconChainClientFactory.NewPrysmChainClient(v.conn, restHandler),
|
||||
chainClient: beaconChainClientFactory.NewChainClient(v.conn),
|
||||
nodeClient: nodeclientfactory.NewNodeClient(v.conn),
|
||||
prysmChainClient: beaconChainClientFactory.NewPrysmChainClient(v.conn),
|
||||
db: v.db,
|
||||
km: nil,
|
||||
web3SignerConfig: v.web3SignerConfig,
|
||||
@@ -369,7 +367,6 @@ func ConstructDialOptions(
|
||||
grpcprometheus.StreamClientInterceptor,
|
||||
grpcretry.StreamClientInterceptor(),
|
||||
),
|
||||
grpc.WithResolvers(&multipleEndpointsGrpcResolverBuilder{}),
|
||||
}
|
||||
|
||||
dialOpts = append(dialOpts, extraOpts...)
|
||||
|
||||
@@ -33,7 +33,10 @@ func TestStop_CancelsContext(t *testing.T) {
|
||||
|
||||
func TestNew_Insecure(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
_, err := NewValidatorService(t.Context(), &Config{})
|
||||
_, err := NewValidatorService(t.Context(), &Config{
|
||||
BeaconNodeGRPCEndpoint: "localhost:4000",
|
||||
BeaconApiEndpoint: "http://localhost:3500",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.LogsContain(t, hook, "You are using an insecure gRPC connection")
|
||||
}
|
||||
@@ -58,7 +61,11 @@ func TestStart_GrpcHeaders(t *testing.T) {
|
||||
"Authorization", "this is a valid value",
|
||||
},
|
||||
} {
|
||||
cfg := &Config{GRPCHeaders: strings.Split(input, ",")}
|
||||
cfg := &Config{
|
||||
BeaconNodeGRPCEndpoint: "localhost:4000",
|
||||
BeaconApiEndpoint: "http://localhost:3500",
|
||||
GRPCHeaders: strings.Split(input, ","),
|
||||
}
|
||||
validatorService, err := NewValidatorService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
md, _ := metadata.FromOutgoingContext(validatorService.ctx)
|
||||
|
||||
@@ -10,12 +10,10 @@ import (
|
||||
|
||||
func NewValidatorClient(
|
||||
validatorConn validatorHelpers.NodeConnection,
|
||||
jsonRestHandler beaconApi.RestHandler,
|
||||
opt ...beaconApi.ValidatorClientOpt,
|
||||
) iface.ValidatorClient {
|
||||
if features.Get().EnableBeaconRESTApi {
|
||||
return beaconApi.NewBeaconApiValidatorClient(jsonRestHandler, opt...)
|
||||
} else {
|
||||
return grpcApi.NewGrpcValidatorClient(validatorConn.GetGrpcClientConn())
|
||||
return beaconApi.NewBeaconApiValidatorClient(validatorConn.GetRestHandler(), opt...)
|
||||
}
|
||||
return grpcApi.NewGrpcValidatorClient(validatorConn)
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/validator/db"
|
||||
dbCommon "github.com/OffchainLabs/prysm/v7/validator/db/common"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/graffiti"
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/keymanager"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/keymanager/local"
|
||||
remoteweb3signer "github.com/OffchainLabs/prysm/v7/validator/keymanager/remote-web3signer"
|
||||
@@ -101,9 +102,9 @@ type validator struct {
|
||||
pubkeyToStatus map[[fieldparams.BLSPubkeyLength]byte]*validatorStatus
|
||||
wallet *wallet.Wallet
|
||||
walletInitializedChan chan *wallet.Wallet
|
||||
currentHostIndex uint64
|
||||
walletInitializedFeed *event.Feed
|
||||
graffitiOrderedIndex uint64
|
||||
conn validatorHelpers.NodeConnection
|
||||
submittedAtts map[submittedAttKey]*submittedAtt
|
||||
validatorsRegBatchSize int
|
||||
validatorClient iface.ValidatorClient
|
||||
@@ -114,7 +115,7 @@ type validator struct {
|
||||
km keymanager.IKeymanager
|
||||
accountChangedSub event.Subscription
|
||||
ticker slots.Ticker
|
||||
beaconNodeHosts []string
|
||||
currentHostIndex uint64
|
||||
genesisTime time.Time
|
||||
graffiti []byte
|
||||
voteStats voteStats
|
||||
@@ -1311,34 +1312,64 @@ func (v *validator) Host() string {
|
||||
}
|
||||
|
||||
func (v *validator) changeHost() {
|
||||
next := (v.currentHostIndex + 1) % uint64(len(v.beaconNodeHosts))
|
||||
hosts := v.hosts()
|
||||
if len(hosts) <= 1 {
|
||||
return
|
||||
}
|
||||
next := (v.currentHostIndex + 1) % uint64(len(hosts))
|
||||
log.WithFields(logrus.Fields{
|
||||
"currentHost": v.beaconNodeHosts[v.currentHostIndex],
|
||||
"nextHost": v.beaconNodeHosts[next],
|
||||
"currentHost": hosts[v.currentHostIndex],
|
||||
"nextHost": hosts[next],
|
||||
}).Warn("Beacon node is not responding, switching host")
|
||||
v.validatorClient.SetHost(v.beaconNodeHosts[next])
|
||||
v.validatorClient.SwitchHost(hosts[next])
|
||||
v.currentHostIndex = next
|
||||
}
|
||||
|
||||
// hosts returns the list of configured beacon node hosts.
|
||||
func (v *validator) hosts() []string {
|
||||
if features.Get().EnableBeaconRESTApi {
|
||||
return v.conn.GetRestConnectionProvider().Hosts()
|
||||
}
|
||||
return v.conn.GetGrpcConnectionProvider().Hosts()
|
||||
}
|
||||
|
||||
// numHosts returns the number of configured beacon node hosts.
|
||||
func (v *validator) numHosts() int {
|
||||
return len(v.hosts())
|
||||
}
|
||||
|
||||
func (v *validator) FindHealthyHost(ctx context.Context) bool {
|
||||
// Tail-recursive closure keeps retry count private.
|
||||
var check func(remaining int) bool
|
||||
check = func(remaining int) bool {
|
||||
if v.nodeClient.IsReady(ctx) { // ready → done
|
||||
numHosts := v.numHosts()
|
||||
startingHost := v.Host()
|
||||
attemptedHosts := []string{}
|
||||
|
||||
// Check all hosts for a fully synced node
|
||||
for i := range numHosts {
|
||||
if v.nodeClient.IsReady(ctx) {
|
||||
if len(attemptedHosts) > 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
"previousHost": startingHost,
|
||||
"newHost": v.Host(),
|
||||
"failedAttempts": attemptedHosts,
|
||||
}).Info("Failover succeeded: connected to healthy beacon node")
|
||||
}
|
||||
return true
|
||||
}
|
||||
if len(v.beaconNodeHosts) == 1 && features.Get().EnableBeaconRESTApi {
|
||||
log.WithField("host", v.Host()).Warn("Beacon node is not responding, no backup node configured")
|
||||
return false
|
||||
log.WithField("host", v.Host()).Debug("Beacon node not fully synced")
|
||||
attemptedHosts = append(attemptedHosts, v.Host())
|
||||
|
||||
// Try next host if not the last iteration
|
||||
if i < numHosts-1 {
|
||||
v.changeHost()
|
||||
}
|
||||
if remaining == 0 || !features.Get().EnableBeaconRESTApi {
|
||||
return false // exhausted or REST disabled
|
||||
}
|
||||
v.changeHost()
|
||||
return check(remaining - 1) // recurse
|
||||
}
|
||||
|
||||
return check(len(v.beaconNodeHosts))
|
||||
if numHosts == 1 {
|
||||
log.WithField("host", v.Host()).Warn("Beacon node is not fully synced, no backup node configured")
|
||||
} else {
|
||||
log.Warn("No fully synced beacon node found")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (v *validator) filterAndCacheActiveKeys(ctx context.Context, pubkeys [][fieldparams.BLSPubkeyLength]byte, slot primitives.Slot) ([][fieldparams.BLSPubkeyLength]byte, error) {
|
||||
|
||||
@@ -16,6 +16,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
grpcutil "github.com/OffchainLabs/prysm/v7/api/grpc"
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/async/event"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/validator/flags"
|
||||
@@ -37,6 +39,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/validator/accounts/wallet"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/client/iface"
|
||||
dbTest "github.com/OffchainLabs/prysm/v7/validator/db/testing"
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/keymanager"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/keymanager/local"
|
||||
remoteweb3signer "github.com/OffchainLabs/prysm/v7/validator/keymanager/remote-web3signer"
|
||||
@@ -2792,18 +2795,27 @@ func TestValidator_Host(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidator_ChangeHost(t *testing.T) {
|
||||
// Enable REST API mode for this test since changeHost only calls SwitchHost in REST API mode
|
||||
resetCfg := features.InitWithReset(&features.Flags{EnableBeaconRESTApi: true})
|
||||
defer resetCfg()
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
hosts := []string{"http://localhost:8080", "http://localhost:8081"}
|
||||
restProvider := &rest.MockRestProvider{MockHosts: hosts}
|
||||
conn, err := validatorHelpers.NewNodeConnection(validatorHelpers.WithRestProvider(restProvider))
|
||||
require.NoError(t, err)
|
||||
|
||||
client := validatormock.NewMockValidatorClient(ctrl)
|
||||
v := validator{
|
||||
validatorClient: client,
|
||||
beaconNodeHosts: []string{"http://localhost:8080", "http://localhost:8081"},
|
||||
conn: conn,
|
||||
currentHostIndex: 0,
|
||||
}
|
||||
|
||||
client.EXPECT().SetHost(v.beaconNodeHosts[1])
|
||||
client.EXPECT().SetHost(v.beaconNodeHosts[0])
|
||||
client.EXPECT().SwitchHost(hosts[1])
|
||||
client.EXPECT().SwitchHost(hosts[0])
|
||||
v.changeHost()
|
||||
assert.Equal(t, uint64(1), v.currentHostIndex)
|
||||
v.changeHost()
|
||||
@@ -2838,12 +2850,16 @@ func TestUpdateValidatorStatusCache(t *testing.T) {
|
||||
gomock.Any(),
|
||||
gomock.Any()).Return(mockResponse, nil)
|
||||
|
||||
mockProvider := &grpcutil.MockGrpcProvider{MockHosts: []string{"localhost:4000", "localhost:4001"}}
|
||||
conn, err := validatorHelpers.NewNodeConnection(validatorHelpers.WithGRPCProvider(mockProvider))
|
||||
require.NoError(t, err)
|
||||
|
||||
v := &validator{
|
||||
validatorClient: client,
|
||||
beaconNodeHosts: []string{"http://localhost:8080", "http://localhost:8081"},
|
||||
conn: conn,
|
||||
currentHostIndex: 0,
|
||||
pubkeyToStatus: map[[fieldparams.BLSPubkeyLength]byte]*validatorStatus{
|
||||
[fieldparams.BLSPubkeyLength]byte{0x03}: &validatorStatus{ // add non existent key and status to cache, should be fully removed on update
|
||||
[fieldparams.BLSPubkeyLength]byte{0x03}: { // add non existent key and status to cache, should be fully removed on update
|
||||
publicKey: []byte{0x03},
|
||||
status: ðpb.ValidatorStatusResponse{
|
||||
Status: ethpb.ValidatorStatus_ACTIVE,
|
||||
@@ -2853,7 +2869,7 @@ func TestUpdateValidatorStatusCache(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
err := v.updateValidatorStatusCache(ctx, pubkeys)
|
||||
err = v.updateValidatorStatusCache(ctx, pubkeys)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// make sure the nonexistent key is fully removed
|
||||
|
||||
@@ -10,6 +10,8 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/validator/helpers",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/grpc:go_default_library",
|
||||
"//api/rest:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//validator/db/iface:go_default_library",
|
||||
@@ -24,18 +26,23 @@ go_test(
|
||||
srcs = [
|
||||
"converts_test.go",
|
||||
"metadata_test.go",
|
||||
"node_connection_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api/grpc:go_default_library",
|
||||
"//api/rest:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/proposer:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//validator/db/common:go_default_library",
|
||||
"//validator/db/iface:go_default_library",
|
||||
"//validator/slashing-protection-history/format:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,78 +1,120 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"time"
|
||||
"context"
|
||||
|
||||
grpcutil "github.com/OffchainLabs/prysm/v7/api/grpc"
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// Use an interface with a private dummy function to force all other packages to call NewNodeConnection
|
||||
// NodeConnection provides access to both gRPC and REST API connections to a beacon node.
|
||||
type NodeConnection interface {
|
||||
// GetGrpcClientConn returns the current gRPC client connection.
|
||||
// Returns nil if no gRPC provider is configured.
|
||||
GetGrpcClientConn() *grpc.ClientConn
|
||||
GetBeaconApiUrl() string
|
||||
GetBeaconApiHeaders() map[string][]string
|
||||
setBeaconApiHeaders(map[string][]string)
|
||||
GetBeaconApiTimeout() time.Duration
|
||||
setBeaconApiTimeout(time.Duration)
|
||||
dummy()
|
||||
// GetGrpcConnectionProvider returns the gRPC connection provider.
|
||||
GetGrpcConnectionProvider() grpcutil.GrpcConnectionProvider
|
||||
// GetRestConnectionProvider returns the REST connection provider.
|
||||
GetRestConnectionProvider() rest.RestConnectionProvider
|
||||
// GetRestHandler returns the REST handler for making API requests.
|
||||
// Returns nil if no REST provider is configured.
|
||||
GetRestHandler() rest.RestHandler
|
||||
}
|
||||
|
||||
type nodeConnection struct {
|
||||
grpcClientConn *grpc.ClientConn
|
||||
beaconApiUrl string
|
||||
beaconApiHeaders map[string][]string
|
||||
beaconApiTimeout time.Duration
|
||||
}
|
||||
|
||||
// NodeConnectionOption is a functional option for configuring the node connection.
|
||||
type NodeConnectionOption func(nc NodeConnection)
|
||||
|
||||
// WithBeaconApiHeaders sets the HTTP headers that should be sent to the server along with each request.
|
||||
func WithBeaconApiHeaders(headers map[string][]string) NodeConnectionOption {
|
||||
return func(nc NodeConnection) {
|
||||
nc.setBeaconApiHeaders(headers)
|
||||
}
|
||||
}
|
||||
|
||||
// WithBeaconApiTimeout sets the HTTP request timeout.
|
||||
func WithBeaconApiTimeout(timeout time.Duration) NodeConnectionOption {
|
||||
return func(nc NodeConnection) {
|
||||
nc.setBeaconApiTimeout(timeout)
|
||||
}
|
||||
grpcConnectionProvider grpcutil.GrpcConnectionProvider
|
||||
restConnectionProvider rest.RestConnectionProvider
|
||||
}
|
||||
|
||||
func (c *nodeConnection) GetGrpcClientConn() *grpc.ClientConn {
|
||||
return c.grpcClientConn
|
||||
}
|
||||
|
||||
func (c *nodeConnection) GetBeaconApiUrl() string {
|
||||
return c.beaconApiUrl
|
||||
}
|
||||
|
||||
func (c *nodeConnection) GetBeaconApiHeaders() map[string][]string {
|
||||
return c.beaconApiHeaders
|
||||
}
|
||||
|
||||
func (c *nodeConnection) setBeaconApiHeaders(headers map[string][]string) {
|
||||
c.beaconApiHeaders = headers
|
||||
}
|
||||
|
||||
func (c *nodeConnection) GetBeaconApiTimeout() time.Duration {
|
||||
return c.beaconApiTimeout
|
||||
}
|
||||
|
||||
func (c *nodeConnection) setBeaconApiTimeout(timeout time.Duration) {
|
||||
c.beaconApiTimeout = timeout
|
||||
}
|
||||
|
||||
func (*nodeConnection) dummy() {}
|
||||
|
||||
func NewNodeConnection(grpcConn *grpc.ClientConn, beaconApiUrl string, opts ...NodeConnectionOption) NodeConnection {
|
||||
conn := &nodeConnection{}
|
||||
conn.grpcClientConn = grpcConn
|
||||
conn.beaconApiUrl = beaconApiUrl
|
||||
for _, opt := range opts {
|
||||
opt(conn)
|
||||
if c.grpcConnectionProvider == nil {
|
||||
return nil
|
||||
}
|
||||
return conn
|
||||
return c.grpcConnectionProvider.CurrentConn()
|
||||
}
|
||||
|
||||
func (c *nodeConnection) GetGrpcConnectionProvider() grpcutil.GrpcConnectionProvider {
|
||||
return c.grpcConnectionProvider
|
||||
}
|
||||
|
||||
func (c *nodeConnection) GetRestConnectionProvider() rest.RestConnectionProvider {
|
||||
return c.restConnectionProvider
|
||||
}
|
||||
|
||||
func (c *nodeConnection) GetRestHandler() rest.RestHandler {
|
||||
if c.restConnectionProvider == nil {
|
||||
return nil
|
||||
}
|
||||
return c.restConnectionProvider.RestHandler()
|
||||
}
|
||||
|
||||
// NodeConnectionOption is a functional option for configuring a NodeConnection.
|
||||
type NodeConnectionOption func(*nodeConnection) error
|
||||
|
||||
// WithGRPC configures a gRPC connection provider for the NodeConnection.
|
||||
// If endpoint is empty, this option is a no-op.
|
||||
func WithGRPC(ctx context.Context, endpoint string, dialOpts []grpc.DialOption) NodeConnectionOption {
|
||||
return func(c *nodeConnection) error {
|
||||
if endpoint == "" {
|
||||
return nil
|
||||
}
|
||||
provider, err := grpcutil.NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create gRPC connection provider")
|
||||
}
|
||||
c.grpcConnectionProvider = provider
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithREST configures a REST connection provider for the NodeConnection.
|
||||
// If endpoint is empty, this option is a no-op.
|
||||
func WithREST(endpoint string, opts ...rest.RestConnectionProviderOption) NodeConnectionOption {
|
||||
return func(c *nodeConnection) error {
|
||||
if endpoint == "" {
|
||||
return nil
|
||||
}
|
||||
provider, err := rest.NewRestConnectionProvider(endpoint, opts...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create REST connection provider")
|
||||
}
|
||||
c.restConnectionProvider = provider
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithGRPCProvider sets a pre-built gRPC connection provider.
|
||||
func WithGRPCProvider(provider grpcutil.GrpcConnectionProvider) NodeConnectionOption {
|
||||
return func(c *nodeConnection) error {
|
||||
c.grpcConnectionProvider = provider
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithRestProvider sets a pre-built REST connection provider.
|
||||
func WithRestProvider(provider rest.RestConnectionProvider) NodeConnectionOption {
|
||||
return func(c *nodeConnection) error {
|
||||
c.restConnectionProvider = provider
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewNodeConnection creates a new NodeConnection with the given options.
|
||||
// At least one provider (gRPC or REST) must be configured via options.
|
||||
// Returns an error if no providers are configured.
|
||||
func NewNodeConnection(opts ...NodeConnectionOption) (NodeConnection, error) {
|
||||
c := &nodeConnection{}
|
||||
for _, opt := range opts {
|
||||
if err := opt(c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if c.grpcConnectionProvider == nil && c.restConnectionProvider == nil {
|
||||
return nil, errors.New("at least one beacon node endpoint must be provided (--beacon-rpc-provider or --beacon-rest-api-provider)")
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
101
validator/helpers/node_connection_test.go
Normal file
101
validator/helpers/node_connection_test.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
grpcutil "github.com/OffchainLabs/prysm/v7/api/grpc"
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestNewNodeConnection(t *testing.T) {
|
||||
t.Run("with both providers", func(t *testing.T) {
|
||||
grpcProvider := &grpcutil.MockGrpcProvider{MockHosts: []string{"localhost:4000"}}
|
||||
restProvider := &rest.MockRestProvider{MockHosts: []string{"http://localhost:3500"}}
|
||||
conn, err := NewNodeConnection(
|
||||
WithGRPCProvider(grpcProvider),
|
||||
WithRestProvider(restProvider),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, grpcProvider, conn.GetGrpcConnectionProvider())
|
||||
assert.Equal(t, restProvider, conn.GetRestConnectionProvider())
|
||||
})
|
||||
|
||||
t.Run("with only rest provider", func(t *testing.T) {
|
||||
restProvider := &rest.MockRestProvider{MockHosts: []string{"http://localhost:3500"}}
|
||||
conn, err := NewNodeConnection(WithRestProvider(restProvider))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, (grpcutil.GrpcConnectionProvider)(nil), conn.GetGrpcConnectionProvider())
|
||||
assert.Equal(t, (*grpc.ClientConn)(nil), conn.GetGrpcClientConn())
|
||||
assert.Equal(t, restProvider, conn.GetRestConnectionProvider())
|
||||
})
|
||||
|
||||
t.Run("with only grpc provider", func(t *testing.T) {
|
||||
grpcProvider := &grpcutil.MockGrpcProvider{MockHosts: []string{"localhost:4000"}}
|
||||
conn, err := NewNodeConnection(WithGRPCProvider(grpcProvider))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, grpcProvider, conn.GetGrpcConnectionProvider())
|
||||
assert.Equal(t, (rest.RestConnectionProvider)(nil), conn.GetRestConnectionProvider())
|
||||
assert.Equal(t, (rest.RestHandler)(nil), conn.GetRestHandler())
|
||||
})
|
||||
|
||||
t.Run("with no providers returns error", func(t *testing.T) {
|
||||
conn, err := NewNodeConnection()
|
||||
require.ErrorContains(t, "at least one beacon node endpoint must be provided", err)
|
||||
assert.Equal(t, (NodeConnection)(nil), conn)
|
||||
})
|
||||
|
||||
t.Run("with empty endpoints is no-op", func(t *testing.T) {
|
||||
// Empty endpoints should be skipped, resulting in no providers
|
||||
conn, err := NewNodeConnection(
|
||||
WithGRPC(context.Background(), "", nil),
|
||||
WithREST(""),
|
||||
)
|
||||
require.ErrorContains(t, "at least one beacon node endpoint must be provided", err)
|
||||
assert.Equal(t, (NodeConnection)(nil), conn)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNodeConnection_GetGrpcClientConn(t *testing.T) {
|
||||
t.Run("delegates to provider", func(t *testing.T) {
|
||||
// We can't easily create a real grpc.ClientConn in tests,
|
||||
// but we can verify the delegation works with nil
|
||||
grpcProvider := &grpcutil.MockGrpcProvider{MockConn: nil, MockHosts: []string{"localhost:4000"}}
|
||||
conn, err := NewNodeConnection(WithGRPCProvider(grpcProvider))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should delegate to provider.CurrentConn()
|
||||
assert.Equal(t, grpcProvider.CurrentConn(), conn.GetGrpcClientConn())
|
||||
})
|
||||
|
||||
t.Run("returns nil when provider is nil", func(t *testing.T) {
|
||||
restProvider := &rest.MockRestProvider{MockHosts: []string{"http://localhost:3500"}}
|
||||
conn, err := NewNodeConnection(WithRestProvider(restProvider))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, (*grpc.ClientConn)(nil), conn.GetGrpcClientConn())
|
||||
})
|
||||
}
|
||||
|
||||
func TestNodeConnection_GetRestHandler(t *testing.T) {
|
||||
t.Run("delegates to provider", func(t *testing.T) {
|
||||
mockHandler := &rest.MockRestHandler{}
|
||||
restProvider := &rest.MockRestProvider{MockHandler: mockHandler, MockHosts: []string{"http://localhost:3500"}}
|
||||
conn, err := NewNodeConnection(WithRestProvider(restProvider))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, mockHandler, conn.GetRestHandler())
|
||||
})
|
||||
|
||||
t.Run("returns nil when provider is nil", func(t *testing.T) {
|
||||
grpcProvider := &grpcutil.MockGrpcProvider{MockHosts: []string{"localhost:4000"}}
|
||||
conn, err := NewNodeConnection(WithGRPCProvider(grpcProvider))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, (rest.RestHandler)(nil), conn.GetRestHandler())
|
||||
})
|
||||
}
|
||||
@@ -41,6 +41,8 @@ func TestNode_Builds(t *testing.T) {
|
||||
set.String("wallet-password-file", passwordFile, "path to wallet password")
|
||||
set.String("keymanager-kind", "imported", "keymanager kind")
|
||||
set.String("verbosity", "debug", "log verbosity")
|
||||
set.String("beacon-rpc-provider", "localhost:4000", "beacon node RPC endpoint")
|
||||
set.String("beacon-rest-api-provider", "http://localhost:3500", "beacon node REST API endpoint")
|
||||
require.NoError(t, set.Set(flags.WalletPasswordFileFlag.Name, passwordFile))
|
||||
ctx := cli.NewContext(&app, set, nil)
|
||||
opts := []accounts.Option{
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user