mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-26 21:59:13 -05:00
Compare commits
49 Commits
remove-pre
...
gRPC-fallb
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
922aaa4f0f | ||
|
|
63cd7ac88e | ||
|
|
42e5417a7b | ||
|
|
c8012b41f9 | ||
|
|
f35074a78f | ||
|
|
520733ba55 | ||
|
|
bcf060619b | ||
|
|
a030d88d42 | ||
|
|
d30ef5a30f | ||
|
|
545f450f70 | ||
|
|
9a5f5ce733 | ||
|
|
fe7b2bf20e | ||
|
|
7cc6ded31a | ||
|
|
5fd3300fdb | ||
|
|
f74a9cb3ec | ||
|
|
3d903d5d75 | ||
|
|
5f335b1b58 | ||
|
|
1f7f7c6833 | ||
|
|
4e952354d1 | ||
|
|
5268da43f1 | ||
|
|
a6fc327cfb | ||
|
|
cf04b457a6 | ||
|
|
4586b0accf | ||
|
|
3a71ad2ec1 | ||
|
|
588766e520 | ||
|
|
ecc19bc6ed | ||
|
|
164d2d50fd | ||
|
|
1355a9ff4d | ||
|
|
34478f30c8 | ||
|
|
214b4428e6 | ||
|
|
db82f3cc9d | ||
|
|
9c874037d1 | ||
|
|
9bb231fb3b | ||
|
|
6432140603 | ||
|
|
5e0a9ff992 | ||
|
|
0bfd661baf | ||
|
|
b21acc0bbb | ||
|
|
f6f65987c6 | ||
|
|
d26cdd74ee | ||
|
|
d1905cb018 | ||
|
|
9f828bdd88 | ||
|
|
17413b52ed | ||
|
|
a651e7f0ac | ||
|
|
3e1cb45e92 | ||
|
|
fc2dcb0e88 | ||
|
|
888db581dd | ||
|
|
f1d2ee72e2 | ||
|
|
31f18b9f60 | ||
|
|
6462c997e9 |
@@ -3,13 +3,16 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"grpc_connection_provider.go",
|
||||
"grpcutils.go",
|
||||
"log.go",
|
||||
"mock_grpc_provider.go",
|
||||
"parameters.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/api/grpc",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
@@ -18,12 +21,17 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["grpcutils_test.go"],
|
||||
srcs = [
|
||||
"grpc_connection_provider_test.go",
|
||||
"grpcutils_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//credentials/insecure:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
171
api/grpc/grpc_connection_provider.go
Normal file
171
api/grpc/grpc_connection_provider.go
Normal file
@@ -0,0 +1,171 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// GrpcConnectionProvider manages gRPC connections for failover support.
|
||||
// It allows switching between different beacon node endpoints when the current one becomes unavailable.
|
||||
// Only one connection is maintained at a time - when switching hosts, the old connection is closed.
|
||||
type GrpcConnectionProvider interface {
|
||||
// CurrentConn returns the currently active gRPC connection.
|
||||
// The connection is created lazily on first call.
|
||||
// Returns nil if the provider has been closed.
|
||||
CurrentConn() *grpc.ClientConn
|
||||
// CurrentHost returns the address of the currently active endpoint.
|
||||
CurrentHost() string
|
||||
// Hosts returns all configured endpoint addresses.
|
||||
Hosts() []string
|
||||
// SwitchHost switches to the endpoint at the given index.
|
||||
// The new connection is created lazily on next CurrentConn() call.
|
||||
SwitchHost(index int) error
|
||||
// Close closes the current connection.
|
||||
Close()
|
||||
}
|
||||
|
||||
type grpcConnectionProvider struct {
|
||||
// Immutable after construction - no lock needed for reads
|
||||
endpoints []string
|
||||
ctx context.Context
|
||||
dialOpts []grpc.DialOption
|
||||
|
||||
// Current connection state (protected by mutex)
|
||||
currentIndex uint64
|
||||
conn *grpc.ClientConn
|
||||
|
||||
mu sync.Mutex
|
||||
closed atomic.Bool
|
||||
}
|
||||
|
||||
// NewGrpcConnectionProvider creates a new connection provider that manages gRPC connections.
|
||||
// The endpoint parameter can be a comma-separated list of addresses (e.g., "host1:4000,host2:4000").
|
||||
// Only one connection is maintained at a time, created lazily on first use.
|
||||
func NewGrpcConnectionProvider(
|
||||
ctx context.Context,
|
||||
endpoint string,
|
||||
dialOpts []grpc.DialOption,
|
||||
) (GrpcConnectionProvider, error) {
|
||||
endpoints := parseEndpoints(endpoint)
|
||||
if len(endpoints) == 0 {
|
||||
return nil, errors.New("no gRPC endpoints provided")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"endpoints": endpoints,
|
||||
"count": len(endpoints),
|
||||
}).Info("Initialized gRPC connection provider")
|
||||
|
||||
return &grpcConnectionProvider{
|
||||
endpoints: endpoints,
|
||||
ctx: ctx,
|
||||
dialOpts: dialOpts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseEndpoints splits a comma-separated endpoint string into individual endpoints.
|
||||
func parseEndpoints(endpoint string) []string {
|
||||
if endpoint == "" {
|
||||
return nil
|
||||
}
|
||||
var endpoints []string
|
||||
for p := range strings.SplitSeq(endpoint, ",") {
|
||||
if p = strings.TrimSpace(p); p != "" {
|
||||
endpoints = append(endpoints, p)
|
||||
}
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) CurrentConn() *grpc.ClientConn {
|
||||
if p.closed.Load() {
|
||||
return nil
|
||||
}
|
||||
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
// Return existing connection if available
|
||||
if p.conn != nil {
|
||||
return p.conn
|
||||
}
|
||||
|
||||
// Create connection lazily
|
||||
ep := p.endpoints[p.currentIndex]
|
||||
conn, err := grpc.DialContext(p.ctx, ep, p.dialOpts...)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("endpoint", ep).Error("Failed to create gRPC connection")
|
||||
return nil
|
||||
}
|
||||
|
||||
p.conn = conn
|
||||
log.WithField("endpoint", ep).Debug("Created gRPC connection")
|
||||
return conn
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) CurrentHost() string {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
return p.endpoints[p.currentIndex]
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) Hosts() []string {
|
||||
// Return a copy to maintain immutability
|
||||
hosts := make([]string, len(p.endpoints))
|
||||
copy(hosts, p.endpoints)
|
||||
return hosts
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) SwitchHost(index int) error {
|
||||
if index < 0 || index >= len(p.endpoints) {
|
||||
return errors.Errorf("invalid host index %d, must be between 0 and %d", index, len(p.endpoints)-1)
|
||||
}
|
||||
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if uint64(index) == p.currentIndex {
|
||||
return nil // Already on this host
|
||||
}
|
||||
|
||||
oldHost := p.endpoints[p.currentIndex]
|
||||
|
||||
// Close existing connection if any
|
||||
if p.conn != nil {
|
||||
if err := p.conn.Close(); err != nil {
|
||||
log.WithError(err).WithField("endpoint", oldHost).Debug("Failed to close previous connection")
|
||||
}
|
||||
p.conn = nil
|
||||
}
|
||||
|
||||
p.currentIndex = uint64(index)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"previousHost": oldHost,
|
||||
"newHost": p.endpoints[index],
|
||||
}).Debug("Switched gRPC endpoint")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) Close() {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if p.closed.Load() {
|
||||
return
|
||||
}
|
||||
p.closed.Store(true)
|
||||
|
||||
if p.conn != nil {
|
||||
if err := p.conn.Close(); err != nil {
|
||||
log.WithError(err).WithField("endpoint", p.endpoints[p.currentIndex]).Debug("Failed to close gRPC connection")
|
||||
}
|
||||
p.conn = nil
|
||||
}
|
||||
}
|
||||
203
api/grpc/grpc_connection_provider_test.go
Normal file
203
api/grpc/grpc_connection_provider_test.go
Normal file
@@ -0,0 +1,203 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
func TestParseEndpoints(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected []string
|
||||
}{
|
||||
{"single endpoint", "localhost:4000", []string{"localhost:4000"}},
|
||||
{"multiple endpoints", "host1:4000,host2:4000,host3:4000", []string{"host1:4000", "host2:4000", "host3:4000"}},
|
||||
{"endpoints with spaces", "host1:4000, host2:4000 , host3:4000", []string{"host1:4000", "host2:4000", "host3:4000"}},
|
||||
{"empty string", "", nil},
|
||||
{"only commas", ",,,", nil},
|
||||
{"trailing comma", "host1:4000,host2:4000,", []string{"host1:4000", "host2:4000"}},
|
||||
{"leading comma", ",host1:4000,host2:4000", []string{"host1:4000", "host2:4000"}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.DeepEqual(t, tt.expected, parseEndpoints(tt.input))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewGrpcConnectionProvider_Errors(t *testing.T) {
|
||||
t.Run("no endpoints", func(t *testing.T) {
|
||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
_, err := NewGrpcConnectionProvider(context.Background(), "", dialOpts)
|
||||
require.ErrorContains(t, "no gRPC endpoints provided", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGrpcConnectionProvider_LazyConnection(t *testing.T) {
|
||||
// Start only one server but configure provider with two endpoints
|
||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
server := grpc.NewServer()
|
||||
go func() { _ = server.Serve(lis) }()
|
||||
defer server.Stop()
|
||||
|
||||
validAddr := lis.Addr().String()
|
||||
invalidAddr := "127.0.0.1:1" // Port 1 is unlikely to be listening
|
||||
|
||||
// Provider should succeed even though second endpoint is invalid (lazy connections)
|
||||
endpoint := validAddr + "," + invalidAddr
|
||||
ctx := context.Background()
|
||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
|
||||
require.NoError(t, err, "Provider creation should succeed with lazy connections")
|
||||
defer func() { provider.Close() }()
|
||||
|
||||
// First endpoint should work
|
||||
conn := provider.CurrentConn()
|
||||
assert.NotNil(t, conn, "First connection should be created lazily")
|
||||
}
|
||||
|
||||
func TestGrpcConnectionProvider_SingleConnectionModel(t *testing.T) {
|
||||
// Create provider with 3 endpoints
|
||||
var addrs []string
|
||||
var servers []*grpc.Server
|
||||
|
||||
for range 3 {
|
||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
server := grpc.NewServer()
|
||||
go func() { _ = server.Serve(lis) }()
|
||||
addrs = append(addrs, lis.Addr().String())
|
||||
servers = append(servers, server)
|
||||
}
|
||||
defer func() {
|
||||
for _, s := range servers {
|
||||
s.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
endpoint := strings.Join(addrs, ",")
|
||||
ctx := context.Background()
|
||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
|
||||
require.NoError(t, err)
|
||||
defer func() { provider.Close() }()
|
||||
|
||||
// Access the internal state to verify single connection behavior
|
||||
p := provider.(*grpcConnectionProvider)
|
||||
|
||||
// Initially no connection
|
||||
p.mu.Lock()
|
||||
assert.Equal(t, (*grpc.ClientConn)(nil), p.conn, "Connection should be nil before access")
|
||||
p.mu.Unlock()
|
||||
|
||||
// Access connection - should create one
|
||||
conn0 := provider.CurrentConn()
|
||||
assert.NotNil(t, conn0)
|
||||
|
||||
p.mu.Lock()
|
||||
assert.NotNil(t, p.conn, "Connection should be created after CurrentConn()")
|
||||
firstConn := p.conn
|
||||
p.mu.Unlock()
|
||||
|
||||
// Call CurrentConn again - should return same connection
|
||||
conn0Again := provider.CurrentConn()
|
||||
assert.Equal(t, conn0, conn0Again, "Should return same connection")
|
||||
|
||||
// Switch to different host - old connection should be closed, new one created lazily
|
||||
require.NoError(t, provider.SwitchHost(1))
|
||||
|
||||
p.mu.Lock()
|
||||
assert.Equal(t, (*grpc.ClientConn)(nil), p.conn, "Connection should be nil after SetHost (lazy)")
|
||||
p.mu.Unlock()
|
||||
|
||||
// Get new connection
|
||||
conn1 := provider.CurrentConn()
|
||||
assert.NotNil(t, conn1)
|
||||
assert.NotEqual(t, firstConn, conn1, "Should be a different connection after switching hosts")
|
||||
}
|
||||
|
||||
// testProvider creates a provider with n test servers and returns cleanup function.
|
||||
func testProvider(t *testing.T, n int) (GrpcConnectionProvider, []string, func()) {
|
||||
var addrs []string
|
||||
var cleanups []func()
|
||||
|
||||
for range n {
|
||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
server := grpc.NewServer()
|
||||
go func() { _ = server.Serve(lis) }()
|
||||
addrs = append(addrs, lis.Addr().String())
|
||||
cleanups = append(cleanups, server.Stop)
|
||||
}
|
||||
|
||||
endpoint := strings.Join(addrs, ",")
|
||||
|
||||
ctx := context.Background()
|
||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
|
||||
require.NoError(t, err)
|
||||
|
||||
cleanup := func() {
|
||||
provider.Close()
|
||||
for _, c := range cleanups {
|
||||
c()
|
||||
}
|
||||
}
|
||||
return provider, addrs, cleanup
|
||||
}
|
||||
|
||||
func TestGrpcConnectionProvider(t *testing.T) {
|
||||
provider, addrs, cleanup := testProvider(t, 3)
|
||||
defer cleanup()
|
||||
|
||||
t.Run("initial state", func(t *testing.T) {
|
||||
assert.Equal(t, 3, len(provider.Hosts()))
|
||||
assert.Equal(t, addrs[0], provider.CurrentHost())
|
||||
assert.NotNil(t, provider.CurrentConn())
|
||||
})
|
||||
|
||||
t.Run("SwitchHost", func(t *testing.T) {
|
||||
require.NoError(t, provider.SwitchHost(1))
|
||||
assert.Equal(t, addrs[1], provider.CurrentHost())
|
||||
assert.NotNil(t, provider.CurrentConn()) // New connection created lazily
|
||||
require.NoError(t, provider.SwitchHost(0))
|
||||
assert.Equal(t, addrs[0], provider.CurrentHost())
|
||||
require.ErrorContains(t, "invalid host index", provider.SwitchHost(-1))
|
||||
require.ErrorContains(t, "invalid host index", provider.SwitchHost(3))
|
||||
})
|
||||
|
||||
t.Run("SetHost circular", func(t *testing.T) {
|
||||
// Test round-robin style switching using SetHost with manual index
|
||||
indices := []int{1, 2, 0, 1} // Simulate circular switching
|
||||
for i, idx := range indices {
|
||||
require.NoError(t, provider.SwitchHost(idx))
|
||||
assert.Equal(t, addrs[idx], provider.CurrentHost(), "iteration %d", i)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Hosts returns copy", func(t *testing.T) {
|
||||
hosts := provider.Hosts()
|
||||
original := hosts[0]
|
||||
hosts[0] = "modified"
|
||||
assert.Equal(t, original, provider.Hosts()[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestGrpcConnectionProvider_Close(t *testing.T) {
|
||||
provider, _, cleanup := testProvider(t, 1)
|
||||
defer cleanup()
|
||||
|
||||
assert.NotNil(t, provider.CurrentConn())
|
||||
provider.Close()
|
||||
assert.Equal(t, (*grpc.ClientConn)(nil), provider.CurrentConn())
|
||||
provider.Close() // Double close is safe
|
||||
}
|
||||
20
api/grpc/mock_grpc_provider.go
Normal file
20
api/grpc/mock_grpc_provider.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package grpc
|
||||
|
||||
import "google.golang.org/grpc"
|
||||
|
||||
// MockGrpcProvider implements GrpcConnectionProvider for testing.
|
||||
type MockGrpcProvider struct {
|
||||
MockConn *grpc.ClientConn
|
||||
MockHosts []string
|
||||
}
|
||||
|
||||
func (m *MockGrpcProvider) CurrentConn() *grpc.ClientConn { return m.MockConn }
|
||||
func (m *MockGrpcProvider) CurrentHost() string {
|
||||
if len(m.MockHosts) > 0 {
|
||||
return m.MockHosts[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func (m *MockGrpcProvider) Hosts() []string { return m.MockHosts }
|
||||
func (m *MockGrpcProvider) SwitchHost(int) error { return nil }
|
||||
func (m *MockGrpcProvider) Close() {}
|
||||
34
api/rest/BUILD.bazel
Normal file
34
api/rest/BUILD.bazel
Normal file
@@ -0,0 +1,34 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"log.go",
|
||||
"mock_rest_provider.go",
|
||||
"rest_connection_provider.go",
|
||||
"rest_handler.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/api/rest",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/apiutil:go_default_library",
|
||||
"//api/client:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opentelemetry_go_contrib_instrumentation_net_http_otelhttp//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["rest_connection_provider_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
9
api/rest/log.go
Normal file
9
api/rest/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package rest
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "api/rest")
|
||||
22
api/rest/mock_rest_provider.go
Normal file
22
api/rest/mock_rest_provider.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package rest
|
||||
|
||||
import "net/http"
|
||||
|
||||
// MockRestProvider implements RestConnectionProvider for testing.
|
||||
type MockRestProvider struct {
|
||||
MockClient *http.Client
|
||||
MockHandler RestHandler
|
||||
MockHosts []string
|
||||
HostIndex int
|
||||
}
|
||||
|
||||
func (m *MockRestProvider) HttpClient() *http.Client { return m.MockClient }
|
||||
func (m *MockRestProvider) RestHandler() RestHandler { return m.MockHandler }
|
||||
func (m *MockRestProvider) CurrentHost() string {
|
||||
if len(m.MockHosts) > 0 {
|
||||
return m.MockHosts[m.HostIndex%len(m.MockHosts)]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func (m *MockRestProvider) Hosts() []string { return m.MockHosts }
|
||||
func (m *MockRestProvider) SwitchHost(index int) error { m.HostIndex = index; return nil }
|
||||
158
api/rest/rest_connection_provider.go
Normal file
158
api/rest/rest_connection_provider.go
Normal file
@@ -0,0 +1,158 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/client"
|
||||
pkgErrors "github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
)
|
||||
|
||||
// RestConnectionProvider manages HTTP client configuration for REST API with failover support.
|
||||
// It allows switching between different beacon node REST endpoints when the current one becomes unavailable.
|
||||
type RestConnectionProvider interface {
|
||||
// HttpClient returns the configured HTTP client with headers, timeout, and optional tracing.
|
||||
HttpClient() *http.Client
|
||||
// RestHandler returns the REST handler for making API requests.
|
||||
RestHandler() RestHandler
|
||||
// CurrentHost returns the current REST API endpoint URL.
|
||||
CurrentHost() string
|
||||
// Hosts returns all configured REST API endpoint URLs.
|
||||
Hosts() []string
|
||||
// SwitchHost switches to the endpoint at the given index.
|
||||
SwitchHost(index int) error
|
||||
}
|
||||
|
||||
// RestConnectionProviderOption is a functional option for configuring the REST connection provider.
|
||||
type RestConnectionProviderOption func(*restConnectionProvider)
|
||||
|
||||
// WithHttpTimeout sets the HTTP client timeout.
|
||||
func WithHttpTimeout(timeout time.Duration) RestConnectionProviderOption {
|
||||
return func(p *restConnectionProvider) {
|
||||
p.timeout = timeout
|
||||
}
|
||||
}
|
||||
|
||||
// WithHttpHeaders sets custom HTTP headers to include in all requests.
|
||||
func WithHttpHeaders(headers map[string][]string) RestConnectionProviderOption {
|
||||
return func(p *restConnectionProvider) {
|
||||
p.headers = headers
|
||||
}
|
||||
}
|
||||
|
||||
// WithTracing enables OpenTelemetry tracing for HTTP requests.
|
||||
func WithTracing() RestConnectionProviderOption {
|
||||
return func(p *restConnectionProvider) {
|
||||
p.enableTracing = true
|
||||
}
|
||||
}
|
||||
|
||||
type restConnectionProvider struct {
|
||||
endpoints []string
|
||||
httpClient *http.Client
|
||||
restHandler RestHandler
|
||||
currentIndex atomic.Uint64
|
||||
timeout time.Duration
|
||||
headers map[string][]string
|
||||
enableTracing bool
|
||||
}
|
||||
|
||||
// NewRestConnectionProvider creates a new REST connection provider that manages HTTP client configuration.
|
||||
// The endpoint parameter can be a comma-separated list of URLs (e.g., "http://host1:3500,http://host2:3500").
|
||||
func NewRestConnectionProvider(endpoint string, opts ...RestConnectionProviderOption) (RestConnectionProvider, error) {
|
||||
endpoints := parseEndpoints(endpoint)
|
||||
if len(endpoints) == 0 {
|
||||
return nil, pkgErrors.New("no REST API endpoints provided")
|
||||
}
|
||||
|
||||
p := &restConnectionProvider{
|
||||
endpoints: endpoints,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(p)
|
||||
}
|
||||
|
||||
// Build the HTTP transport chain
|
||||
var transport http.RoundTripper = http.DefaultTransport
|
||||
|
||||
// Add custom headers if configured
|
||||
if len(p.headers) > 0 {
|
||||
transport = client.NewCustomHeadersTransport(transport, p.headers)
|
||||
}
|
||||
|
||||
// Add tracing if enabled
|
||||
if p.enableTracing {
|
||||
transport = otelhttp.NewTransport(transport)
|
||||
}
|
||||
|
||||
p.httpClient = &http.Client{
|
||||
Timeout: p.timeout,
|
||||
Transport: transport,
|
||||
}
|
||||
|
||||
// Create the REST handler with the HTTP client and initial host
|
||||
p.restHandler = newRestHandler(*p.httpClient, endpoints[0])
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"endpoints": endpoints,
|
||||
"count": len(endpoints),
|
||||
}).Info("Initialized REST connection provider")
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// parseEndpoints splits a comma-separated endpoint string into individual endpoints.
|
||||
func parseEndpoints(endpoint string) []string {
|
||||
if endpoint == "" {
|
||||
return nil
|
||||
}
|
||||
var endpoints []string
|
||||
for p := range strings.SplitSeq(endpoint, ",") {
|
||||
if p = strings.TrimSpace(p); p != "" {
|
||||
endpoints = append(endpoints, p)
|
||||
}
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) HttpClient() *http.Client {
|
||||
return p.httpClient
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) RestHandler() RestHandler {
|
||||
return p.restHandler
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) CurrentHost() string {
|
||||
return p.endpoints[p.currentIndex.Load()]
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) Hosts() []string {
|
||||
// Return a copy to maintain immutability
|
||||
hosts := make([]string, len(p.endpoints))
|
||||
copy(hosts, p.endpoints)
|
||||
return hosts
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) SwitchHost(index int) error {
|
||||
if index < 0 || index >= len(p.endpoints) {
|
||||
return pkgErrors.Errorf("invalid host index %d, must be between 0 and %d", index, len(p.endpoints)-1)
|
||||
}
|
||||
|
||||
oldIdx := p.currentIndex.Load()
|
||||
p.currentIndex.Store(uint64(index))
|
||||
|
||||
// Update the rest handler's host
|
||||
p.restHandler.SetHost(p.endpoints[index])
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"previousHost": p.endpoints[oldIdx],
|
||||
"newHost": p.endpoints[index],
|
||||
}).Debug("Switched REST endpoint")
|
||||
return nil
|
||||
}
|
||||
76
api/rest/rest_connection_provider_test.go
Normal file
76
api/rest/rest_connection_provider_test.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestParseEndpoints(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected []string
|
||||
}{
|
||||
{"single endpoint", "http://localhost:3500", []string{"http://localhost:3500"}},
|
||||
{"multiple endpoints", "http://host1:3500,http://host2:3500,http://host3:3500", []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"}},
|
||||
{"endpoints with spaces", "http://host1:3500, http://host2:3500 , http://host3:3500", []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"}},
|
||||
{"empty string", "", nil},
|
||||
{"only commas", ",,,", nil},
|
||||
{"trailing comma", "http://host1:3500,http://host2:3500,", []string{"http://host1:3500", "http://host2:3500"}},
|
||||
{"leading comma", ",http://host1:3500,http://host2:3500", []string{"http://host1:3500", "http://host2:3500"}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.DeepEqual(t, tt.expected, parseEndpoints(tt.input))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewRestConnectionProvider_Errors(t *testing.T) {
|
||||
t.Run("no endpoints", func(t *testing.T) {
|
||||
_, err := NewRestConnectionProvider("")
|
||||
require.ErrorContains(t, "no REST API endpoints provided", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRestConnectionProvider(t *testing.T) {
|
||||
provider, err := NewRestConnectionProvider("http://host1:3500,http://host2:3500,http://host3:3500")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("initial state", func(t *testing.T) {
|
||||
assert.Equal(t, 3, len(provider.Hosts()))
|
||||
assert.Equal(t, "http://host1:3500", provider.CurrentHost())
|
||||
assert.NotNil(t, provider.HttpClient())
|
||||
})
|
||||
|
||||
t.Run("SwitchHost", func(t *testing.T) {
|
||||
require.NoError(t, provider.SwitchHost(1))
|
||||
assert.Equal(t, "http://host2:3500", provider.CurrentHost())
|
||||
require.NoError(t, provider.SwitchHost(0))
|
||||
assert.Equal(t, "http://host1:3500", provider.CurrentHost())
|
||||
require.ErrorContains(t, "invalid host index", provider.SwitchHost(-1))
|
||||
require.ErrorContains(t, "invalid host index", provider.SwitchHost(3))
|
||||
})
|
||||
|
||||
t.Run("Hosts returns copy", func(t *testing.T) {
|
||||
hosts := provider.Hosts()
|
||||
original := hosts[0]
|
||||
hosts[0] = "modified"
|
||||
assert.Equal(t, original, provider.Hosts()[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestRestConnectionProvider_WithOptions(t *testing.T) {
|
||||
headers := map[string][]string{"Authorization": {"Bearer token"}}
|
||||
provider, err := NewRestConnectionProvider(
|
||||
"http://localhost:3500",
|
||||
WithHttpHeaders(headers),
|
||||
WithHttpTimeout(30000000000), // 30 seconds in nanoseconds
|
||||
WithTracing(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, provider.HttpClient())
|
||||
assert.Equal(t, "http://localhost:3500", provider.CurrentHost())
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package beacon_api
|
||||
package rest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
|
||||
type reqOption func(*http.Request)
|
||||
|
||||
// RestHandler defines the interface for making REST API requests.
|
||||
type RestHandler interface {
|
||||
Get(ctx context.Context, endpoint string, resp any) error
|
||||
GetStatusCode(ctx context.Context, endpoint string) (int, error)
|
||||
@@ -32,26 +33,31 @@ type RestHandler interface {
|
||||
SetHost(host string)
|
||||
}
|
||||
|
||||
type BeaconApiRestHandler struct {
|
||||
type restHandler struct {
|
||||
client http.Client
|
||||
host string
|
||||
reqOverrides []reqOption
|
||||
}
|
||||
|
||||
// NewBeaconApiRestHandler returns a RestHandler
|
||||
func NewBeaconApiRestHandler(client http.Client, host string) RestHandler {
|
||||
brh := &BeaconApiRestHandler{
|
||||
// newRestHandler returns a RestHandler (internal use)
|
||||
func newRestHandler(client http.Client, host string) RestHandler {
|
||||
return NewRestHandler(client, host)
|
||||
}
|
||||
|
||||
// NewRestHandler returns a RestHandler
|
||||
func NewRestHandler(client http.Client, host string) RestHandler {
|
||||
rh := &restHandler{
|
||||
client: client,
|
||||
host: host,
|
||||
}
|
||||
brh.appendAcceptOverride()
|
||||
return brh
|
||||
rh.appendAcceptOverride()
|
||||
return rh
|
||||
}
|
||||
|
||||
// appendAcceptOverride enables the Accept header to be customized at runtime via an environment variable.
|
||||
// This is specified as an env var because it is a niche option that prysm may use for performance testing or debugging
|
||||
// bug which users are unlikely to need. Using an env var keeps the set of user-facing flags cleaner.
|
||||
func (c *BeaconApiRestHandler) appendAcceptOverride() {
|
||||
func (c *restHandler) appendAcceptOverride() {
|
||||
if accept := os.Getenv(params.EnvNameOverrideAccept); accept != "" {
|
||||
c.reqOverrides = append(c.reqOverrides, func(req *http.Request) {
|
||||
req.Header.Set("Accept", accept)
|
||||
@@ -60,18 +66,18 @@ func (c *BeaconApiRestHandler) appendAcceptOverride() {
|
||||
}
|
||||
|
||||
// HttpClient returns the underlying HTTP client of the handler
|
||||
func (c *BeaconApiRestHandler) HttpClient() *http.Client {
|
||||
func (c *restHandler) HttpClient() *http.Client {
|
||||
return &c.client
|
||||
}
|
||||
|
||||
// Host returns the underlying HTTP host
|
||||
func (c *BeaconApiRestHandler) Host() string {
|
||||
func (c *restHandler) Host() string {
|
||||
return c.host
|
||||
}
|
||||
|
||||
// Get sends a GET request and decodes the response body as a JSON object into the passed in object.
|
||||
// If an HTTP error is returned, the body is decoded as a DefaultJsonError JSON object and returned as the first return value.
|
||||
func (c *BeaconApiRestHandler) Get(ctx context.Context, endpoint string, resp any) error {
|
||||
func (c *restHandler) Get(ctx context.Context, endpoint string, resp any) error {
|
||||
url := c.host + endpoint
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
@@ -94,7 +100,7 @@ func (c *BeaconApiRestHandler) Get(ctx context.Context, endpoint string, resp an
|
||||
// GetStatusCode sends a GET request and returns only the HTTP status code.
|
||||
// This is useful for endpoints like /eth/v1/node/health that communicate status via HTTP codes
|
||||
// (200 = ready, 206 = syncing, 503 = unavailable) rather than response bodies.
|
||||
func (c *BeaconApiRestHandler) GetStatusCode(ctx context.Context, endpoint string) (int, error) {
|
||||
func (c *restHandler) GetStatusCode(ctx context.Context, endpoint string) (int, error) {
|
||||
url := c.host + endpoint
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
@@ -113,7 +119,7 @@ func (c *BeaconApiRestHandler) GetStatusCode(ctx context.Context, endpoint strin
|
||||
return httpResp.StatusCode, nil
|
||||
}
|
||||
|
||||
func (c *BeaconApiRestHandler) GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error) {
|
||||
func (c *restHandler) GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error) {
|
||||
url := c.host + endpoint
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
@@ -168,7 +174,7 @@ func (c *BeaconApiRestHandler) GetSSZ(ctx context.Context, endpoint string) ([]b
|
||||
|
||||
// Post sends a POST request and decodes the response body as a JSON object into the passed in object.
|
||||
// If an HTTP error is returned, the body is decoded as a DefaultJsonError JSON object and returned as the first return value.
|
||||
func (c *BeaconApiRestHandler) Post(
|
||||
func (c *restHandler) Post(
|
||||
ctx context.Context,
|
||||
apiEndpoint string,
|
||||
headers map[string]string,
|
||||
@@ -204,7 +210,7 @@ func (c *BeaconApiRestHandler) Post(
|
||||
}
|
||||
|
||||
// PostSSZ sends a POST request and prefers an SSZ (application/octet-stream) response body.
|
||||
func (c *BeaconApiRestHandler) PostSSZ(
|
||||
func (c *restHandler) PostSSZ(
|
||||
ctx context.Context,
|
||||
apiEndpoint string,
|
||||
headers map[string]string,
|
||||
@@ -305,6 +311,6 @@ func decodeResp(httpResp *http.Response, resp any) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *BeaconApiRestHandler) SetHost(host string) {
|
||||
func (c *restHandler) SetHost(host string) {
|
||||
c.host = host
|
||||
}
|
||||
@@ -14,7 +14,6 @@ go_library(
|
||||
"transition.go",
|
||||
"upgrade.go",
|
||||
"validator.go",
|
||||
"withdrawals.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra",
|
||||
visibility = ["//visibility:public"],
|
||||
@@ -42,8 +41,6 @@ go_library(
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/math:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
@@ -63,13 +60,11 @@ go_test(
|
||||
"transition_test.go",
|
||||
"upgrade_test.go",
|
||||
"validator_test.go",
|
||||
"withdrawals_test.go",
|
||||
],
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
@@ -82,16 +77,12 @@ go_test(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/fuzz:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -3,19 +3,14 @@ package electra
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -95,217 +90,6 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessConsolidationRequests implements the spec definition below. This method makes mutating
|
||||
// calls to the beacon state.
|
||||
//
|
||||
// def process_consolidation_request(
|
||||
// state: BeaconState,
|
||||
// consolidation_request: ConsolidationRequest
|
||||
// ) -> None:
|
||||
// if is_valid_switch_to_compounding_request(state, consolidation_request):
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// request_source_pubkey = consolidation_request.source_pubkey
|
||||
// source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey))
|
||||
// switch_to_compounding_validator(state, source_index)
|
||||
// return
|
||||
//
|
||||
// # Verify that source != target, so a consolidation cannot be used as an exit.
|
||||
// if consolidation_request.source_pubkey == consolidation_request.target_pubkey:
|
||||
// return
|
||||
// # If the pending consolidations queue is full, consolidation requests are ignored
|
||||
// if len(state.pending_consolidations) == PENDING_CONSOLIDATIONS_LIMIT:
|
||||
// return
|
||||
// # If there is too little available consolidation churn limit, consolidation requests are ignored
|
||||
// if get_consolidation_churn_limit(state) <= MIN_ACTIVATION_BALANCE:
|
||||
// return
|
||||
//
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// # Verify pubkeys exists
|
||||
// request_source_pubkey = consolidation_request.source_pubkey
|
||||
// request_target_pubkey = consolidation_request.target_pubkey
|
||||
// if request_source_pubkey not in validator_pubkeys:
|
||||
// return
|
||||
// if request_target_pubkey not in validator_pubkeys:
|
||||
// return
|
||||
// source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey))
|
||||
// target_index = ValidatorIndex(validator_pubkeys.index(request_target_pubkey))
|
||||
// source_validator = state.validators[source_index]
|
||||
// target_validator = state.validators[target_index]
|
||||
//
|
||||
// # Verify source withdrawal credentials
|
||||
// has_correct_credential = has_execution_withdrawal_credential(source_validator)
|
||||
// is_correct_source_address = (
|
||||
// source_validator.withdrawal_credentials[12:] == consolidation_request.source_address
|
||||
// )
|
||||
// if not (has_correct_credential and is_correct_source_address):
|
||||
// return
|
||||
//
|
||||
// # Verify that target has compounding withdrawal credentials
|
||||
// if not has_compounding_withdrawal_credential(target_validator):
|
||||
// return
|
||||
//
|
||||
// # Verify the source and the target are active
|
||||
// current_epoch = get_current_epoch(state)
|
||||
// if not is_active_validator(source_validator, current_epoch):
|
||||
// return
|
||||
// if not is_active_validator(target_validator, current_epoch):
|
||||
// return
|
||||
// # Verify exits for source and target have not been initiated
|
||||
// if source_validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||
// return
|
||||
// if target_validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||
// return
|
||||
//
|
||||
// # Verify the source has been active long enough
|
||||
// if current_epoch < source_validator.activation_epoch + SHARD_COMMITTEE_PERIOD:
|
||||
// return
|
||||
//
|
||||
// # Verify the source has no pending withdrawals in the queue
|
||||
// if get_pending_balance_to_withdraw(state, source_index) > 0:
|
||||
// return
|
||||
// # Initiate source validator exit and append pending consolidation
|
||||
// source_validator.exit_epoch = compute_consolidation_epoch_and_update_churn(
|
||||
// state, source_validator.effective_balance
|
||||
// )
|
||||
// source_validator.withdrawable_epoch = Epoch(
|
||||
// source_validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
// )
|
||||
// state.pending_consolidations.append(PendingConsolidation(
|
||||
// source_index=source_index,
|
||||
// target_index=target_index
|
||||
// ))
|
||||
func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, reqs []*enginev1.ConsolidationRequest) error {
|
||||
if len(reqs) == 0 || st == nil {
|
||||
return nil
|
||||
}
|
||||
curEpoch := slots.ToEpoch(st.Slot())
|
||||
ffe := params.BeaconConfig().FarFutureEpoch
|
||||
minValWithdrawDelay := params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
pcLimit := params.BeaconConfig().PendingConsolidationsLimit
|
||||
|
||||
for _, cr := range reqs {
|
||||
if cr == nil {
|
||||
return errors.New("nil consolidation request")
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return fmt.Errorf("cannot process consolidation requests: %w", ctx.Err())
|
||||
}
|
||||
if IsValidSwitchToCompoundingRequest(st, cr) {
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(cr.SourcePubkey))
|
||||
if !ok {
|
||||
log.Error("Failed to find source validator index")
|
||||
continue
|
||||
}
|
||||
if err := SwitchToCompoundingValidator(st, srcIdx); err != nil {
|
||||
log.WithError(err).Error("Failed to switch to compounding validator")
|
||||
}
|
||||
continue
|
||||
}
|
||||
sourcePubkey := bytesutil.ToBytes48(cr.SourcePubkey)
|
||||
targetPubkey := bytesutil.ToBytes48(cr.TargetPubkey)
|
||||
if sourcePubkey == targetPubkey {
|
||||
continue
|
||||
}
|
||||
|
||||
if npc, err := st.NumPendingConsolidations(); err != nil {
|
||||
return fmt.Errorf("failed to fetch number of pending consolidations: %w", err) // This should never happen.
|
||||
} else if npc >= pcLimit {
|
||||
continue
|
||||
}
|
||||
|
||||
activeBal, err := helpers.TotalActiveBalance(st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
churnLimit := helpers.ConsolidationChurnLimit(primitives.Gwei(activeBal))
|
||||
if churnLimit <= primitives.Gwei(params.BeaconConfig().MinActivationBalance) {
|
||||
continue
|
||||
}
|
||||
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(sourcePubkey)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
tgtIdx, ok := st.ValidatorIndexByPubkey(targetPubkey)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
srcV, err := st.ValidatorAtIndex(srcIdx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch source validator: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
roSrcV, err := state_native.NewValidator(srcV)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tgtV, err := st.ValidatorAtIndexReadOnly(tgtIdx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch target validator: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
// Verify source withdrawal credentials
|
||||
if !roSrcV.HasExecutionWithdrawalCredentials() {
|
||||
continue
|
||||
}
|
||||
// Confirm source_validator.withdrawal_credentials[12:] == consolidation_request.source_address
|
||||
if len(srcV.WithdrawalCredentials) != 32 || len(cr.SourceAddress) != 20 || !bytes.HasSuffix(srcV.WithdrawalCredentials, cr.SourceAddress) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Target validator must have their withdrawal credentials set appropriately.
|
||||
if !tgtV.HasCompoundingWithdrawalCredentials() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Both validators must be active.
|
||||
if !helpers.IsActiveValidator(srcV, curEpoch) || !helpers.IsActiveValidatorUsingTrie(tgtV, curEpoch) {
|
||||
continue
|
||||
}
|
||||
// Neither validator is exiting.
|
||||
if srcV.ExitEpoch != ffe || tgtV.ExitEpoch() != ffe {
|
||||
continue
|
||||
}
|
||||
|
||||
e, overflow := math.SafeAdd(uint64(srcV.ActivationEpoch), uint64(params.BeaconConfig().ShardCommitteePeriod))
|
||||
if overflow {
|
||||
log.Error("Overflow when adding activation epoch and shard committee period")
|
||||
continue
|
||||
}
|
||||
if uint64(curEpoch) < e {
|
||||
continue
|
||||
}
|
||||
hasBal, err := st.HasPendingBalanceToWithdraw(srcIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
|
||||
continue
|
||||
}
|
||||
if hasBal {
|
||||
continue
|
||||
}
|
||||
|
||||
// Initiate the exit of the source validator.
|
||||
exitEpoch, err := ComputeConsolidationEpochAndUpdateChurn(ctx, st, primitives.Gwei(srcV.EffectiveBalance))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to compute consolidation epoch")
|
||||
continue
|
||||
}
|
||||
srcV.ExitEpoch = exitEpoch
|
||||
srcV.WithdrawableEpoch = exitEpoch + minValWithdrawDelay
|
||||
if err := st.UpdateValidatorAtIndex(srcIdx, srcV); err != nil {
|
||||
return fmt.Errorf("failed to update validator: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
if err := st.AppendPendingConsolidation(ð.PendingConsolidation{SourceIndex: srcIdx, TargetIndex: tgtIdx}); err != nil {
|
||||
return fmt.Errorf("failed to append pending consolidation: %w", err) // This should never happen.
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsValidSwitchToCompoundingRequest returns true if the given consolidation request is valid for switching to compounding.
|
||||
//
|
||||
// Spec code:
|
||||
|
||||
@@ -8,8 +8,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
@@ -203,275 +201,6 @@ func TestProcessPendingConsolidations(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessConsolidationRequests(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
reqs []*enginev1.ConsolidationRequest
|
||||
validate func(*testing.T, state.BeaconState)
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "nil request",
|
||||
state: func() state.BeaconState {
|
||||
st := ð.BeaconStateElectra{}
|
||||
s, err := state_native.InitializeFromProtoElectra(st)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
reqs: []*enginev1.ConsolidationRequest{nil},
|
||||
validate: func(t *testing.T, st state.BeaconState) {
|
||||
require.DeepEqual(t, st, st)
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "one valid request",
|
||||
state: func() state.BeaconState {
|
||||
st := ð.BeaconStateElectra{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)),
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
}
|
||||
// Validator scenario setup. See comments in reqs section.
|
||||
st.Validators[3].WithdrawalCredentials = bytesutil.Bytes32(0)
|
||||
st.Validators[8].WithdrawalCredentials = bytesutil.Bytes32(1)
|
||||
st.Validators[9].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
st.Validators[12].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
st.Validators[13].ExitEpoch = 10
|
||||
st.Validators[16].ExitEpoch = 10
|
||||
st.PendingPartialWithdrawals = []*eth.PendingPartialWithdrawal{
|
||||
{
|
||||
Index: 17,
|
||||
Amount: 100,
|
||||
},
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoElectra(st)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
reqs: []*enginev1.ConsolidationRequest{
|
||||
// Source doesn't have withdrawal credentials.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||
SourcePubkey: []byte("val_3"),
|
||||
TargetPubkey: []byte("val_4"),
|
||||
},
|
||||
// Source withdrawal credentials don't match the consolidation address.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)), // Should be 5
|
||||
SourcePubkey: []byte("val_5"),
|
||||
TargetPubkey: []byte("val_6"),
|
||||
},
|
||||
// Target does not have their withdrawal credentials set appropriately. (Using eth1 address prefix)
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(7)),
|
||||
SourcePubkey: []byte("val_7"),
|
||||
TargetPubkey: []byte("val_8"),
|
||||
},
|
||||
// Source is inactive.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(9)),
|
||||
SourcePubkey: []byte("val_9"),
|
||||
TargetPubkey: []byte("val_10"),
|
||||
},
|
||||
// Target is inactive.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(11)),
|
||||
SourcePubkey: []byte("val_11"),
|
||||
TargetPubkey: []byte("val_12"),
|
||||
},
|
||||
// Source is exiting.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(13)),
|
||||
SourcePubkey: []byte("val_13"),
|
||||
TargetPubkey: []byte("val_14"),
|
||||
},
|
||||
// Target is exiting.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(15)),
|
||||
SourcePubkey: []byte("val_15"),
|
||||
TargetPubkey: []byte("val_16"),
|
||||
},
|
||||
// Source doesn't exist
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
||||
SourcePubkey: []byte("INVALID"),
|
||||
TargetPubkey: []byte("val_0"),
|
||||
},
|
||||
// Target doesn't exist
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
||||
SourcePubkey: []byte("val_0"),
|
||||
TargetPubkey: []byte("INVALID"),
|
||||
},
|
||||
// Source == target
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
||||
SourcePubkey: []byte("val_0"),
|
||||
TargetPubkey: []byte("val_0"),
|
||||
},
|
||||
// Has pending partial withdrawal
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
||||
SourcePubkey: []byte("val_17"),
|
||||
TargetPubkey: []byte("val_1"),
|
||||
},
|
||||
// Valid consolidation request. This should be last to ensure invalid requests do
|
||||
// not end the processing early.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||
SourcePubkey: []byte("val_1"),
|
||||
TargetPubkey: []byte("val_2"),
|
||||
},
|
||||
},
|
||||
validate: func(t *testing.T, st state.BeaconState) {
|
||||
// Verify a pending consolidation is created.
|
||||
numPC, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), numPC)
|
||||
pcs, err := st.PendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(1), pcs[0].SourceIndex)
|
||||
require.Equal(t, primitives.ValidatorIndex(2), pcs[0].TargetIndex)
|
||||
|
||||
// Verify the source validator is exiting.
|
||||
src, err := st.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch not updated")
|
||||
require.Equal(t, params.BeaconConfig().MinValidatorWithdrawabilityDelay, src.WithdrawableEpoch-src.ExitEpoch, "source validator withdrawable epoch not set correctly")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "pending consolidations limit reached",
|
||||
state: func() state.BeaconState {
|
||||
st := ð.BeaconStateElectra{
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit),
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoElectra(st)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
reqs: []*enginev1.ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||
SourcePubkey: []byte("val_1"),
|
||||
TargetPubkey: []byte("val_2"),
|
||||
},
|
||||
},
|
||||
validate: func(t *testing.T, st state.BeaconState) {
|
||||
// Verify no pending consolidation is created.
|
||||
numPC, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
|
||||
|
||||
// Verify the source validator is not exiting.
|
||||
src, err := st.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch should not be updated")
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.WithdrawableEpoch, "source validator withdrawable epoch should not be updated")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "pending consolidations limit reached during processing",
|
||||
state: func() state.BeaconState {
|
||||
st := ð.BeaconStateElectra{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)),
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit-1),
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoElectra(st)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
reqs: []*enginev1.ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||
SourcePubkey: []byte("val_1"),
|
||||
TargetPubkey: []byte("val_2"),
|
||||
},
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(3)),
|
||||
SourcePubkey: []byte("val_3"),
|
||||
TargetPubkey: []byte("val_4"),
|
||||
},
|
||||
},
|
||||
validate: func(t *testing.T, st state.BeaconState) {
|
||||
// Verify a pending consolidation is created.
|
||||
numPC, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
|
||||
|
||||
// The first consolidation was appended.
|
||||
pcs, err := st.PendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(1), pcs[params.BeaconConfig().PendingConsolidationsLimit-1].SourceIndex)
|
||||
require.Equal(t, primitives.ValidatorIndex(2), pcs[params.BeaconConfig().PendingConsolidationsLimit-1].TargetIndex)
|
||||
|
||||
// Verify the second source validator is not exiting.
|
||||
src, err := st.ValidatorAtIndex(3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch should not be updated")
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.WithdrawableEpoch, "source validator withdrawable epoch should not be updated")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "pending consolidations limit reached and compounded consolidation after",
|
||||
state: func() state.BeaconState {
|
||||
st := ð.BeaconStateElectra{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)),
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit),
|
||||
}
|
||||
// To allow compounding consolidation requests.
|
||||
st.Validators[3].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
s, err := state_native.InitializeFromProtoElectra(st)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
reqs: []*enginev1.ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||
SourcePubkey: []byte("val_1"),
|
||||
TargetPubkey: []byte("val_2"),
|
||||
},
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(3)),
|
||||
SourcePubkey: []byte("val_3"),
|
||||
TargetPubkey: []byte("val_3"),
|
||||
},
|
||||
},
|
||||
validate: func(t *testing.T, st state.BeaconState) {
|
||||
// Verify a pending consolidation is created.
|
||||
numPC, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
|
||||
|
||||
// Verify that the last consolidation was included
|
||||
src, err := st.ValidatorAtIndex(3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().CompoundingWithdrawalPrefixByte, src.WithdrawalCredentials[0], "source validator was not compounded")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := electra.ProcessConsolidationRequests(context.TODO(), tt.state, tt.reqs)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ProcessWithdrawalRequests() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !tt.wantErr {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if tt.validate != nil {
|
||||
tt.validate(t, tt.state)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsValidSwitchToCompoundingRequest(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
t.Run("nil source pubkey", func(t *testing.T) {
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/contracts/deposit"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
@@ -536,62 +535,3 @@ func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount
|
||||
validator.EffectiveBalance = min(amount-(amount%params.BeaconConfig().EffectiveBalanceIncrement), maxEffectiveBalance)
|
||||
return validator, nil
|
||||
}
|
||||
|
||||
// ProcessDepositRequests is a function as part of electra to process execution layer deposits
|
||||
func ProcessDepositRequests(ctx context.Context, beaconState state.BeaconState, requests []*enginev1.DepositRequest) (state.BeaconState, error) {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessDepositRequests")
|
||||
defer span.End()
|
||||
|
||||
if len(requests) == 0 {
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
for _, receipt := range requests {
|
||||
beaconState, err = processDepositRequest(beaconState, receipt)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not apply deposit request")
|
||||
}
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// processDepositRequest processes the specific deposit request
|
||||
// def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
|
||||
//
|
||||
// # Set deposit request start index
|
||||
// if state.deposit_requests_start_index == UNSET_DEPOSIT_REQUESTS_START_INDEX:
|
||||
// state.deposit_requests_start_index = deposit_request.index
|
||||
//
|
||||
// # Create pending deposit
|
||||
// state.pending_deposits.append(PendingDeposit(
|
||||
// pubkey=deposit_request.pubkey,
|
||||
// withdrawal_credentials=deposit_request.withdrawal_credentials,
|
||||
// amount=deposit_request.amount,
|
||||
// signature=deposit_request.signature,
|
||||
// slot=state.slot,
|
||||
// ))
|
||||
func processDepositRequest(beaconState state.BeaconState, request *enginev1.DepositRequest) (state.BeaconState, error) {
|
||||
requestsStartIndex, err := beaconState.DepositRequestsStartIndex()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get deposit requests start index")
|
||||
}
|
||||
if request == nil {
|
||||
return nil, errors.New("nil deposit request")
|
||||
}
|
||||
if requestsStartIndex == params.BeaconConfig().UnsetDepositRequestsStartIndex {
|
||||
if err := beaconState.SetDepositRequestsStartIndex(request.Index); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set deposit requests start index")
|
||||
}
|
||||
}
|
||||
if err := beaconState.AppendPendingDeposit(ðpb.PendingDeposit{
|
||||
PublicKey: bytesutil.SafeCopyBytes(request.Pubkey),
|
||||
WithdrawalCredentials: bytesutil.SafeCopyBytes(request.WithdrawalCredentials),
|
||||
Amount: request.Amount,
|
||||
Signature: bytesutil.SafeCopyBytes(request.Signature),
|
||||
Slot: beaconState.Slot(),
|
||||
}); err != nil {
|
||||
return nil, errors.Wrap(err, "could not append deposit request")
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
stateTesting "github.com/OffchainLabs/prysm/v7/beacon-chain/state/testing"
|
||||
@@ -15,7 +14,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
@@ -361,60 +359,6 @@ func TestBatchProcessNewPendingDeposits(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestProcessDepositRequests(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetDepositRequestsStartIndex(1))
|
||||
|
||||
t.Run("empty requests continues", func(t *testing.T) {
|
||||
newSt, err := electra.ProcessDepositRequests(t.Context(), st, []*enginev1.DepositRequest{})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, newSt, st)
|
||||
})
|
||||
t.Run("nil request errors", func(t *testing.T) {
|
||||
_, err = electra.ProcessDepositRequests(t.Context(), st, []*enginev1.DepositRequest{nil})
|
||||
require.ErrorContains(t, "nil deposit request", err)
|
||||
})
|
||||
|
||||
vals := st.Validators()
|
||||
vals[0].PublicKey = sk.PublicKey().Marshal()
|
||||
vals[0].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
bals := st.Balances()
|
||||
bals[0] = params.BeaconConfig().MinActivationBalance + 2000
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
require.NoError(t, st.SetPendingDeposits(make([]*eth.PendingDeposit, 0))) // reset pbd as the determinitstic state populates this already
|
||||
withdrawalCred := make([]byte, 32)
|
||||
withdrawalCred[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
depositMessage := ð.DepositMessage{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
Amount: 1000,
|
||||
WithdrawalCredentials: withdrawalCred,
|
||||
}
|
||||
domain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil)
|
||||
require.NoError(t, err)
|
||||
sr, err := signing.ComputeSigningRoot(depositMessage, domain)
|
||||
require.NoError(t, err)
|
||||
sig := sk.Sign(sr[:])
|
||||
requests := []*enginev1.DepositRequest{
|
||||
{
|
||||
Pubkey: depositMessage.PublicKey,
|
||||
Index: 0,
|
||||
WithdrawalCredentials: depositMessage.WithdrawalCredentials,
|
||||
Amount: depositMessage.Amount,
|
||||
Signature: sig.Marshal(),
|
||||
},
|
||||
}
|
||||
st, err = electra.ProcessDepositRequests(t.Context(), st, requests)
|
||||
require.NoError(t, err)
|
||||
|
||||
pbd, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbd))
|
||||
require.Equal(t, uint64(1000), pbd[0].Amount)
|
||||
}
|
||||
|
||||
func TestProcessDeposit_Electra_Simple(t *testing.T) {
|
||||
deps, _, err := util.DeterministicDepositsAndKeysSameValidator(3)
|
||||
require.NoError(t, err)
|
||||
|
||||
60
beacon-chain/core/requests/BUILD.bazel
Normal file
60
beacon-chain/core/requests/BUILD.bazel
Normal file
@@ -0,0 +1,60 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"consolidations.go",
|
||||
"deposits.go",
|
||||
"log.go",
|
||||
"withdrawals.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/math:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"consolidations_test.go",
|
||||
"deposits_test.go",
|
||||
"withdrawals_test.go",
|
||||
],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
365
beacon-chain/core/requests/consolidations.go
Normal file
365
beacon-chain/core/requests/consolidations.go
Normal file
@@ -0,0 +1,365 @@
|
||||
package requests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls/common"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
prysmMath "github.com/OffchainLabs/prysm/v7/math"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ProcessConsolidationRequests implements the spec definition below. This method makes mutating
|
||||
// calls to the beacon state.
|
||||
//
|
||||
// def process_consolidation_request(
|
||||
// state: BeaconState,
|
||||
// consolidation_request: ConsolidationRequest
|
||||
// ) -> None:
|
||||
// if is_valid_switch_to_compounding_request(state, consolidation_request):
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// request_source_pubkey = consolidation_request.source_pubkey
|
||||
// source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey))
|
||||
// switch_to_compounding_validator(state, source_index)
|
||||
// return
|
||||
//
|
||||
// # Verify that source != target, so a consolidation cannot be used as an exit.
|
||||
// if consolidation_request.source_pubkey == consolidation_request.target_pubkey:
|
||||
// return
|
||||
// # If the pending consolidations queue is full, consolidation requests are ignored
|
||||
// if len(state.pending_consolidations) == PENDING_CONSOLIDATIONS_LIMIT:
|
||||
// return
|
||||
// # If there is too little available consolidation churn limit, consolidation requests are ignored
|
||||
// if get_consolidation_churn_limit(state) <= MIN_ACTIVATION_BALANCE:
|
||||
// return
|
||||
//
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// # Verify pubkeys exists
|
||||
// request_source_pubkey = consolidation_request.source_pubkey
|
||||
// request_target_pubkey = consolidation_request.target_pubkey
|
||||
// if request_source_pubkey not in validator_pubkeys:
|
||||
// return
|
||||
// if request_target_pubkey not in validator_pubkeys:
|
||||
// return
|
||||
// source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey))
|
||||
// target_index = ValidatorIndex(validator_pubkeys.index(request_target_pubkey))
|
||||
// source_validator = state.validators[source_index]
|
||||
// target_validator = state.validators[target_index]
|
||||
//
|
||||
// # Verify source withdrawal credentials
|
||||
// has_correct_credential = has_execution_withdrawal_credential(source_validator)
|
||||
// is_correct_source_address = (
|
||||
// source_validator.withdrawal_credentials[12:] == consolidation_request.source_address
|
||||
// )
|
||||
// if not (has_correct_credential and is_correct_source_address):
|
||||
// return
|
||||
//
|
||||
// # Verify that target has compounding withdrawal credentials
|
||||
// if not has_compounding_withdrawal_credential(target_validator):
|
||||
// return
|
||||
//
|
||||
// # Verify the source and the target are active
|
||||
// current_epoch = get_current_epoch(state)
|
||||
// if not is_active_validator(source_validator, current_epoch):
|
||||
// return
|
||||
// if not is_active_validator(target_validator, current_epoch):
|
||||
// return
|
||||
// # Verify exits for source and target have not been initiated
|
||||
// if source_validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||
// return
|
||||
// if target_validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||
// return
|
||||
//
|
||||
// # Verify the source has been active long enough
|
||||
// if current_epoch < source_validator.activation_epoch + SHARD_COMMITTEE_PERIOD:
|
||||
// return
|
||||
//
|
||||
// # Verify the source has no pending withdrawals in the queue
|
||||
// if get_pending_balance_to_withdraw(state, source_index) > 0:
|
||||
// return
|
||||
// # Initiate source validator exit and append pending consolidation
|
||||
// source_validator.exit_epoch = compute_consolidation_epoch_and_update_churn(
|
||||
// state, source_validator.effective_balance
|
||||
// )
|
||||
// source_validator.withdrawable_epoch = Epoch(
|
||||
// source_validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
// )
|
||||
// state.pending_consolidations.append(PendingConsolidation(
|
||||
// source_index=source_index,
|
||||
// target_index=target_index
|
||||
// ))
|
||||
func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, reqs []*enginev1.ConsolidationRequest) error {
|
||||
ctx, span := trace.StartSpan(ctx, "requests.ProcessConsolidationRequests")
|
||||
defer span.End()
|
||||
|
||||
if len(reqs) == 0 || st == nil {
|
||||
return nil
|
||||
}
|
||||
curEpoch := slots.ToEpoch(st.Slot())
|
||||
ffe := params.BeaconConfig().FarFutureEpoch
|
||||
minValWithdrawDelay := params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
pcLimit := params.BeaconConfig().PendingConsolidationsLimit
|
||||
|
||||
for _, cr := range reqs {
|
||||
if cr == nil {
|
||||
return errors.New("nil consolidation request")
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return fmt.Errorf("cannot process consolidation requests: %w", ctx.Err())
|
||||
}
|
||||
|
||||
if isValidSwitchToCompoundingRequest(st, cr) {
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(cr.SourcePubkey))
|
||||
if !ok {
|
||||
log.Error("Failed to find source validator index")
|
||||
continue
|
||||
}
|
||||
if err := switchToCompoundingValidator(st, srcIdx); err != nil {
|
||||
log.WithError(err).Error("Failed to switch to compounding validator")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
sourcePubkey := bytesutil.ToBytes48(cr.SourcePubkey)
|
||||
targetPubkey := bytesutil.ToBytes48(cr.TargetPubkey)
|
||||
if sourcePubkey == targetPubkey {
|
||||
continue
|
||||
}
|
||||
|
||||
if npc, err := st.NumPendingConsolidations(); err != nil {
|
||||
return fmt.Errorf("failed to fetch number of pending consolidations: %w", err) // This should never happen.
|
||||
} else if npc >= pcLimit {
|
||||
continue
|
||||
}
|
||||
|
||||
activeBal, err := helpers.TotalActiveBalance(st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
churnLimit := helpers.ConsolidationChurnLimit(primitives.Gwei(activeBal))
|
||||
if churnLimit <= primitives.Gwei(params.BeaconConfig().MinActivationBalance) {
|
||||
continue
|
||||
}
|
||||
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(sourcePubkey)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
tgtIdx, ok := st.ValidatorIndexByPubkey(targetPubkey)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
srcV, err := st.ValidatorAtIndex(srcIdx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch source validator: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
roSrcV, err := state_native.NewValidator(srcV)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tgtV, err := st.ValidatorAtIndexReadOnly(tgtIdx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch target validator: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
// Verify source withdrawal credentials.
|
||||
if !roSrcV.HasExecutionWithdrawalCredentials() {
|
||||
continue
|
||||
}
|
||||
// Confirm source_validator.withdrawal_credentials[12:] == consolidation_request.source_address.
|
||||
if len(srcV.WithdrawalCredentials) != 32 || len(cr.SourceAddress) != 20 || !bytes.HasSuffix(srcV.WithdrawalCredentials, cr.SourceAddress) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Target validator must have their withdrawal credentials set appropriately.
|
||||
if !tgtV.HasCompoundingWithdrawalCredentials() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Both validators must be active.
|
||||
if !helpers.IsActiveValidator(srcV, curEpoch) || !helpers.IsActiveValidatorUsingTrie(tgtV, curEpoch) {
|
||||
continue
|
||||
}
|
||||
// Neither validator is exiting.
|
||||
if srcV.ExitEpoch != ffe || tgtV.ExitEpoch() != ffe {
|
||||
continue
|
||||
}
|
||||
|
||||
e, overflow := math.SafeAdd(uint64(srcV.ActivationEpoch), uint64(params.BeaconConfig().ShardCommitteePeriod))
|
||||
if overflow {
|
||||
log.Error("Overflow when adding activation epoch and shard committee period")
|
||||
continue
|
||||
}
|
||||
if uint64(curEpoch) < e {
|
||||
continue
|
||||
}
|
||||
|
||||
hasBal, err := st.HasPendingBalanceToWithdraw(srcIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
|
||||
continue
|
||||
}
|
||||
if hasBal {
|
||||
continue
|
||||
}
|
||||
|
||||
exitEpoch, err := computeConsolidationEpochAndUpdateChurn(st, primitives.Gwei(srcV.EffectiveBalance))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to compute consolidation epoch")
|
||||
continue
|
||||
}
|
||||
srcV.ExitEpoch = exitEpoch
|
||||
srcV.WithdrawableEpoch = exitEpoch + minValWithdrawDelay
|
||||
if err := st.UpdateValidatorAtIndex(srcIdx, srcV); err != nil {
|
||||
return fmt.Errorf("failed to update validator: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
if err := st.AppendPendingConsolidation(ð.PendingConsolidation{SourceIndex: srcIdx, TargetIndex: tgtIdx}); err != nil {
|
||||
return fmt.Errorf("failed to append pending consolidation: %w", err) // This should never happen.
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isValidSwitchToCompoundingRequest(st state.BeaconState, req *enginev1.ConsolidationRequest) bool {
|
||||
if req.SourcePubkey == nil || req.TargetPubkey == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if !bytes.Equal(req.SourcePubkey, req.TargetPubkey) {
|
||||
return false
|
||||
}
|
||||
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(req.SourcePubkey))
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
srcV, err := st.ValidatorAtIndexReadOnly(srcIdx)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
sourceAddress := req.SourceAddress
|
||||
withdrawalCreds := srcV.GetWithdrawalCredentials()
|
||||
if len(withdrawalCreds) != 32 || len(sourceAddress) != 20 || !bytes.HasSuffix(withdrawalCreds, sourceAddress) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !srcV.HasETH1WithdrawalCredentials() {
|
||||
return false
|
||||
}
|
||||
|
||||
curEpoch := slots.ToEpoch(st.Slot())
|
||||
if !helpers.IsActiveValidatorUsingTrie(srcV, curEpoch) {
|
||||
return false
|
||||
}
|
||||
|
||||
if srcV.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func switchToCompoundingValidator(st state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
v, err := st.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(v.WithdrawalCredentials) == 0 {
|
||||
return errors.New("validator has no withdrawal credentials")
|
||||
}
|
||||
|
||||
v.WithdrawalCredentials[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
if err := st.UpdateValidatorAtIndex(idx, v); err != nil {
|
||||
return err
|
||||
}
|
||||
return queueExcessActiveBalance(st, idx)
|
||||
}
|
||||
|
||||
func queueExcessActiveBalance(st state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
bal, err := st.BalanceAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bal > params.BeaconConfig().MinActivationBalance {
|
||||
if err := st.UpdateBalancesAtIndex(idx, params.BeaconConfig().MinActivationBalance); err != nil {
|
||||
return err
|
||||
}
|
||||
excessBalance := bal - params.BeaconConfig().MinActivationBalance
|
||||
val, err := st.ValidatorAtIndexReadOnly(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pk := val.PublicKey()
|
||||
return st.AppendPendingDeposit(ð.PendingDeposit{
|
||||
PublicKey: pk[:],
|
||||
WithdrawalCredentials: val.GetWithdrawalCredentials(),
|
||||
Amount: excessBalance,
|
||||
Signature: common.InfiniteSignature[:],
|
||||
Slot: params.BeaconConfig().GenesisSlot,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func computeConsolidationEpochAndUpdateChurn(st state.BeaconState, consolidationBalance primitives.Gwei) (primitives.Epoch, error) {
|
||||
earliestEpoch, err := st.EarliestConsolidationEpoch()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
earliestConsolidationEpoch := max(earliestEpoch, helpers.ActivationExitEpoch(slots.ToEpoch(st.Slot())))
|
||||
|
||||
activeBal, err := helpers.TotalActiveBalance(st)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
perEpochConsolidationChurn := helpers.ConsolidationChurnLimit(primitives.Gwei(activeBal))
|
||||
|
||||
var consolidationBalanceToConsume primitives.Gwei
|
||||
if earliestEpoch < earliestConsolidationEpoch {
|
||||
consolidationBalanceToConsume = perEpochConsolidationChurn
|
||||
} else {
|
||||
consolidationBalanceToConsume, err = st.ConsolidationBalanceToConsume()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if consolidationBalance > consolidationBalanceToConsume {
|
||||
balanceToProcess := consolidationBalance - consolidationBalanceToConsume
|
||||
additionalEpochs, err := prysmMath.Div64(uint64(balanceToProcess-1), uint64(perEpochConsolidationChurn))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
additionalEpochs++
|
||||
earliestConsolidationEpoch += primitives.Epoch(additionalEpochs)
|
||||
consolidationBalanceToConsume += primitives.Gwei(additionalEpochs) * perEpochConsolidationChurn
|
||||
}
|
||||
|
||||
if err := st.SetConsolidationBalanceToConsume(consolidationBalanceToConsume - consolidationBalance); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := st.SetEarliestConsolidationEpoch(earliestConsolidationEpoch); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return earliestConsolidationEpoch, nil
|
||||
}
|
||||
316
beacon-chain/core/requests/consolidations_test.go
Normal file
316
beacon-chain/core/requests/consolidations_test.go
Normal file
@@ -0,0 +1,316 @@
|
||||
package requests_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func createValidatorsWithTotalActiveBalance(totalBal primitives.Gwei) []*eth.Validator {
|
||||
num := totalBal / primitives.Gwei(params.BeaconConfig().MinActivationBalance)
|
||||
vals := make([]*eth.Validator, num)
|
||||
for i := range vals {
|
||||
wd := make([]byte, 32)
|
||||
wd[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
wd[31] = byte(i)
|
||||
|
||||
vals[i] = ð.Validator{
|
||||
ActivationEpoch: primitives.Epoch(0),
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
PublicKey: fmt.Appendf(nil, "val_%d", i),
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawalCredentials: wd,
|
||||
}
|
||||
}
|
||||
if totalBal%primitives.Gwei(params.BeaconConfig().MinActivationBalance) != 0 {
|
||||
vals = append(vals, ð.Validator{
|
||||
ActivationEpoch: primitives.Epoch(0),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: uint64(totalBal) % params.BeaconConfig().MinActivationBalance,
|
||||
})
|
||||
}
|
||||
return vals
|
||||
}
|
||||
|
||||
func TestProcessConsolidationRequests(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
reqs []*enginev1.ConsolidationRequest
|
||||
validate func(*testing.T, state.BeaconState)
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "nil request",
|
||||
state: func() state.BeaconState {
|
||||
st := ð.BeaconStateElectra{}
|
||||
s, err := state_native.InitializeFromProtoElectra(st)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
reqs: []*enginev1.ConsolidationRequest{nil},
|
||||
validate: func(t *testing.T, st state.BeaconState) {
|
||||
require.DeepEqual(t, st, st)
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "one valid request",
|
||||
state: func() state.BeaconState {
|
||||
st := ð.BeaconStateElectra{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)),
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
}
|
||||
// Validator scenario setup. See comments in reqs section.
|
||||
st.Validators[3].WithdrawalCredentials = bytesutil.Bytes32(0)
|
||||
st.Validators[8].WithdrawalCredentials = bytesutil.Bytes32(1)
|
||||
st.Validators[9].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
st.Validators[12].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
st.Validators[13].ExitEpoch = 10
|
||||
st.Validators[16].ExitEpoch = 10
|
||||
st.PendingPartialWithdrawals = []*eth.PendingPartialWithdrawal{
|
||||
{
|
||||
Index: 17,
|
||||
Amount: 100,
|
||||
},
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoElectra(st)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
reqs: []*enginev1.ConsolidationRequest{
|
||||
// Source doesn't have withdrawal credentials.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||
SourcePubkey: []byte("val_3"),
|
||||
TargetPubkey: []byte("val_4"),
|
||||
},
|
||||
// Source withdrawal credentials don't match the consolidation address.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)), // Should be 5
|
||||
SourcePubkey: []byte("val_5"),
|
||||
TargetPubkey: []byte("val_6"),
|
||||
},
|
||||
// Target does not have their withdrawal credentials set appropriately. (Using eth1 address prefix)
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(7)),
|
||||
SourcePubkey: []byte("val_7"),
|
||||
TargetPubkey: []byte("val_8"),
|
||||
},
|
||||
// Source is inactive.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(9)),
|
||||
SourcePubkey: []byte("val_9"),
|
||||
TargetPubkey: []byte("val_10"),
|
||||
},
|
||||
// Target is inactive.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(11)),
|
||||
SourcePubkey: []byte("val_11"),
|
||||
TargetPubkey: []byte("val_12"),
|
||||
},
|
||||
// Source is exiting.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(13)),
|
||||
SourcePubkey: []byte("val_13"),
|
||||
TargetPubkey: []byte("val_14"),
|
||||
},
|
||||
// Target is exiting.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(15)),
|
||||
SourcePubkey: []byte("val_15"),
|
||||
TargetPubkey: []byte("val_16"),
|
||||
},
|
||||
// Source doesn't exist
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
||||
SourcePubkey: []byte("INVALID"),
|
||||
TargetPubkey: []byte("val_0"),
|
||||
},
|
||||
// Target doesn't exist
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
||||
SourcePubkey: []byte("val_0"),
|
||||
TargetPubkey: []byte("INVALID"),
|
||||
},
|
||||
// Source == target
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
||||
SourcePubkey: []byte("val_0"),
|
||||
TargetPubkey: []byte("val_0"),
|
||||
},
|
||||
// Has pending partial withdrawal
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
||||
SourcePubkey: []byte("val_17"),
|
||||
TargetPubkey: []byte("val_1"),
|
||||
},
|
||||
// Valid consolidation request. This should be last to ensure invalid requests do
|
||||
// not end the processing early.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||
SourcePubkey: []byte("val_1"),
|
||||
TargetPubkey: []byte("val_2"),
|
||||
},
|
||||
},
|
||||
validate: func(t *testing.T, st state.BeaconState) {
|
||||
// Verify a pending consolidation is created.
|
||||
numPC, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), numPC)
|
||||
pcs, err := st.PendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(1), pcs[0].SourceIndex)
|
||||
require.Equal(t, primitives.ValidatorIndex(2), pcs[0].TargetIndex)
|
||||
|
||||
// Verify the source validator is exiting.
|
||||
src, err := st.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch not updated")
|
||||
require.Equal(t, params.BeaconConfig().MinValidatorWithdrawabilityDelay, src.WithdrawableEpoch-src.ExitEpoch, "source validator withdrawable epoch not set correctly")
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "pending consolidations limit reached",
|
||||
state: func() state.BeaconState {
|
||||
st := ð.BeaconStateElectra{
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit),
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoElectra(st)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
reqs: []*enginev1.ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||
SourcePubkey: []byte("val_1"),
|
||||
TargetPubkey: []byte("val_2"),
|
||||
},
|
||||
},
|
||||
validate: func(t *testing.T, st state.BeaconState) {
|
||||
// Verify no pending consolidation is created.
|
||||
numPC, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
|
||||
|
||||
// Verify the source validator is not exiting.
|
||||
src, err := st.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch should not be updated")
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.WithdrawableEpoch, "source validator withdrawable epoch should not be updated")
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "pending consolidations limit reached during processing",
|
||||
state: func() state.BeaconState {
|
||||
st := ð.BeaconStateElectra{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)),
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit-1),
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoElectra(st)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
reqs: []*enginev1.ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||
SourcePubkey: []byte("val_1"),
|
||||
TargetPubkey: []byte("val_2"),
|
||||
},
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(3)),
|
||||
SourcePubkey: []byte("val_3"),
|
||||
TargetPubkey: []byte("val_4"),
|
||||
},
|
||||
},
|
||||
validate: func(t *testing.T, st state.BeaconState) {
|
||||
// Verify a pending consolidation is created.
|
||||
numPC, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
|
||||
|
||||
// The first consolidation was appended.
|
||||
pcs, err := st.PendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(1), pcs[params.BeaconConfig().PendingConsolidationsLimit-1].SourceIndex)
|
||||
require.Equal(t, primitives.ValidatorIndex(2), pcs[params.BeaconConfig().PendingConsolidationsLimit-1].TargetIndex)
|
||||
|
||||
// Verify the second source validator is not exiting.
|
||||
src, err := st.ValidatorAtIndex(3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch should not be updated")
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.WithdrawableEpoch, "source validator withdrawable epoch should not be updated")
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "pending consolidations limit reached and compounded consolidation after",
|
||||
state: func() state.BeaconState {
|
||||
st := ð.BeaconStateElectra{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)),
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit),
|
||||
}
|
||||
// To allow compounding consolidation requests.
|
||||
st.Validators[3].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
s, err := state_native.InitializeFromProtoElectra(st)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
reqs: []*enginev1.ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||
SourcePubkey: []byte("val_1"),
|
||||
TargetPubkey: []byte("val_2"),
|
||||
},
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(3)),
|
||||
SourcePubkey: []byte("val_3"),
|
||||
TargetPubkey: []byte("val_3"),
|
||||
},
|
||||
},
|
||||
validate: func(t *testing.T, st state.BeaconState) {
|
||||
// Verify a pending consolidation is created.
|
||||
numPC, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
|
||||
|
||||
// Verify that the last consolidation was included
|
||||
src, err := st.ValidatorAtIndex(3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().CompoundingWithdrawalPrefixByte, src.WithdrawalCredentials[0], "source validator was not compounded")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := requests.ProcessConsolidationRequests(context.TODO(), tt.state, tt.reqs)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ProcessWithdrawalRequests() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !tt.wantErr {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if tt.validate != nil {
|
||||
tt.validate(t, tt.state)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
73
beacon-chain/core/requests/deposits.go
Normal file
73
beacon-chain/core/requests/deposits.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package requests
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ProcessDepositRequests processes execution layer deposits requests.
|
||||
func ProcessDepositRequests(ctx context.Context, beaconState state.BeaconState, reqs []*enginev1.DepositRequest) (state.BeaconState, error) {
|
||||
_, span := trace.StartSpan(ctx, "requests.ProcessDepositRequests")
|
||||
defer span.End()
|
||||
|
||||
if len(reqs) == 0 {
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
for _, req := range reqs {
|
||||
beaconState, err = processDepositRequest(beaconState, req)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not apply deposit request")
|
||||
}
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// processDepositRequest processes the specific deposit request
|
||||
//
|
||||
// def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
|
||||
//
|
||||
// # Set deposit request start index
|
||||
// if state.deposit_requests_start_index == UNSET_DEPOSIT_REQUESTS_START_INDEX:
|
||||
// state.deposit_requests_start_index = deposit_request.index
|
||||
//
|
||||
// # Create pending deposit
|
||||
// state.pending_deposits.append(PendingDeposit(
|
||||
// pubkey=deposit_request.pubkey,
|
||||
// withdrawal_credentials=deposit_request.withdrawal_credentials,
|
||||
// amount=deposit_request.amount,
|
||||
// signature=deposit_request.signature,
|
||||
// slot=state.slot,
|
||||
// ))
|
||||
func processDepositRequest(beaconState state.BeaconState, req *enginev1.DepositRequest) (state.BeaconState, error) {
|
||||
requestsStartIndex, err := beaconState.DepositRequestsStartIndex()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get deposit requests start index")
|
||||
}
|
||||
if req == nil {
|
||||
return nil, errors.New("nil deposit request")
|
||||
}
|
||||
if requestsStartIndex == params.BeaconConfig().UnsetDepositRequestsStartIndex {
|
||||
if err := beaconState.SetDepositRequestsStartIndex(req.Index); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set deposit requests start index")
|
||||
}
|
||||
}
|
||||
if err := beaconState.AppendPendingDeposit(ðpb.PendingDeposit{
|
||||
PublicKey: bytesutil.SafeCopyBytes(req.Pubkey),
|
||||
WithdrawalCredentials: bytesutil.SafeCopyBytes(req.WithdrawalCredentials),
|
||||
Amount: req.Amount,
|
||||
Signature: bytesutil.SafeCopyBytes(req.Signature),
|
||||
Slot: beaconState.Slot(),
|
||||
}); err != nil {
|
||||
return nil, errors.Wrap(err, "could not append deposit request")
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
70
beacon-chain/core/requests/deposits_test.go
Normal file
70
beacon-chain/core/requests/deposits_test.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package requests_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessDepositRequests(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetDepositRequestsStartIndex(1))
|
||||
|
||||
t.Run("empty requests continues", func(t *testing.T) {
|
||||
newSt, err := requests.ProcessDepositRequests(t.Context(), st, []*enginev1.DepositRequest{})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, newSt, st)
|
||||
})
|
||||
t.Run("nil request errors", func(t *testing.T) {
|
||||
_, err = requests.ProcessDepositRequests(t.Context(), st, []*enginev1.DepositRequest{nil})
|
||||
require.ErrorContains(t, "nil deposit request", err)
|
||||
})
|
||||
|
||||
vals := st.Validators()
|
||||
vals[0].PublicKey = sk.PublicKey().Marshal()
|
||||
vals[0].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
bals := st.Balances()
|
||||
bals[0] = params.BeaconConfig().MinActivationBalance + 2000
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
require.NoError(t, st.SetPendingDeposits(make([]*eth.PendingDeposit, 0))) // reset pbd as the deterministic state populates this already
|
||||
withdrawalCred := make([]byte, 32)
|
||||
withdrawalCred[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
depositMessage := ð.DepositMessage{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
Amount: 1000,
|
||||
WithdrawalCredentials: withdrawalCred,
|
||||
}
|
||||
domain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil)
|
||||
require.NoError(t, err)
|
||||
sr, err := signing.ComputeSigningRoot(depositMessage, domain)
|
||||
require.NoError(t, err)
|
||||
sig := sk.Sign(sr[:])
|
||||
reqs := []*enginev1.DepositRequest{
|
||||
{
|
||||
Pubkey: depositMessage.PublicKey,
|
||||
Index: 0,
|
||||
WithdrawalCredentials: depositMessage.WithdrawalCredentials,
|
||||
Amount: depositMessage.Amount,
|
||||
Signature: sig.Marshal(),
|
||||
},
|
||||
}
|
||||
st, err = requests.ProcessDepositRequests(t.Context(), st, reqs)
|
||||
require.NoError(t, err)
|
||||
|
||||
pbd, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbd))
|
||||
require.Equal(t, uint64(1000), pbd[0].Amount)
|
||||
require.DeepEqual(t, bytesutil.SafeCopyBytes(reqs[0].Pubkey), pbd[0].PublicKey)
|
||||
}
|
||||
9
beacon-chain/core/requests/log.go
Normal file
9
beacon-chain/core/requests/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package requests
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/core/requests")
|
||||
@@ -1,4 +1,4 @@
|
||||
package electra
|
||||
package requests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -88,7 +88,7 @@ import (
|
||||
// withdrawable_epoch=withdrawable_epoch,
|
||||
// ))
|
||||
func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []*enginev1.WithdrawalRequest) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "electra.ProcessWithdrawalRequests")
|
||||
ctx, span := trace.StartSpan(ctx, "requests.ProcessWithdrawalRequests")
|
||||
defer span.End()
|
||||
currentEpoch := slots.ToEpoch(st.Slot())
|
||||
if len(wrs) == 0 {
|
||||
@@ -1,9 +1,9 @@
|
||||
package electra_test
|
||||
package requests_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -289,7 +289,7 @@ func TestProcessWithdrawRequests(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
got, err := electra.ProcessWithdrawalRequests(t.Context(), tt.args.st, tt.args.wrs)
|
||||
got, err := requests.ProcessWithdrawalRequests(t.Context(), tt.args.st, tt.args.wrs)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ProcessWithdrawalRequests() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
@@ -27,6 +27,7 @@ go_library(
|
||||
"//beacon-chain/core/execution:go_default_library",
|
||||
"//beacon-chain/core/fulu:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/requests:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition/interop:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
coreRequests "github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
|
||||
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
@@ -97,7 +98,7 @@ func electraOperations(ctx context.Context, st state.BeaconState, block interfac
|
||||
return nil, electra.NewExecReqError("nil deposit request")
|
||||
}
|
||||
}
|
||||
st, err = electra.ProcessDepositRequests(ctx, st, requests.Deposits)
|
||||
st, err = coreRequests.ProcessDepositRequests(ctx, st, requests.Deposits)
|
||||
if err != nil {
|
||||
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process deposit requests").Error())
|
||||
}
|
||||
@@ -107,7 +108,7 @@ func electraOperations(ctx context.Context, st state.BeaconState, block interfac
|
||||
return nil, electra.NewExecReqError("nil withdrawal request")
|
||||
}
|
||||
}
|
||||
st, err = electra.ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
|
||||
st, err = coreRequests.ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
|
||||
if err != nil {
|
||||
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process withdrawal requests").Error())
|
||||
}
|
||||
@@ -116,7 +117,7 @@ func electraOperations(ctx context.Context, st state.BeaconState, block interfac
|
||||
return nil, electra.NewExecReqError("nil consolidation request")
|
||||
}
|
||||
}
|
||||
if err := electra.ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
|
||||
if err := coreRequests.ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
|
||||
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process consolidation requests").Error())
|
||||
}
|
||||
return st, nil
|
||||
|
||||
@@ -64,7 +64,6 @@ go_library(
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
"//runtime/prereqs:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
|
||||
@@ -66,7 +66,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/prometheus"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/prereqs"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -469,10 +468,6 @@ func (b *BeaconNode) OperationFeed() event.SubscriberSender {
|
||||
func (b *BeaconNode) Start() {
|
||||
b.lock.Lock()
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"version": version.Version(),
|
||||
}).Info("Starting beacon node")
|
||||
|
||||
b.services.StartAll()
|
||||
|
||||
stop := b.stop
|
||||
|
||||
@@ -68,7 +68,6 @@ func TestNodeClose_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNodeStart_Ok(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
app := cli.App{}
|
||||
tmp := fmt.Sprintf("%s/datadirtest2", t.TempDir())
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
@@ -97,11 +96,9 @@ func TestNodeStart_Ok(t *testing.T) {
|
||||
}()
|
||||
time.Sleep(3 * time.Second)
|
||||
node.Close()
|
||||
require.LogsContain(t, hook, "Starting beacon node")
|
||||
}
|
||||
|
||||
func TestNodeStart_SyncChecker(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
app := cli.App{}
|
||||
tmp := fmt.Sprintf("%s/datadirtest2", t.TempDir())
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
@@ -127,7 +124,6 @@ func TestNodeStart_SyncChecker(t *testing.T) {
|
||||
time.Sleep(3 * time.Second)
|
||||
assert.NotNil(t, node.syncChecker.Svc)
|
||||
node.Close()
|
||||
require.LogsContain(t, hook, "Starting beacon node")
|
||||
}
|
||||
|
||||
// TestClearDB tests clearing the database
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api"
|
||||
@@ -26,7 +25,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v7/network/httputil"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
@@ -1044,112 +1042,27 @@ func (s *Server) GetBlockRoot(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.GetBlockRoot")
|
||||
defer span.End()
|
||||
|
||||
var err error
|
||||
var root []byte
|
||||
blockID := r.PathValue("block_id")
|
||||
if blockID == "" {
|
||||
httputil.HandleError(w, "block_id is required in URL params", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
switch blockID {
|
||||
case "head":
|
||||
root, err = s.ChainInfoFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not retrieve head root: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if root == nil {
|
||||
httputil.HandleError(w, "No head root was found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
case "finalized":
|
||||
finalized := s.ChainInfoFetcher.FinalizedCheckpt()
|
||||
root = finalized.Root
|
||||
case "genesis":
|
||||
blk, err := s.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not retrieve genesis block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
httputil.HandleError(w, "Could not find genesis block: "+err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
blkRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not hash genesis block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
root = blkRoot[:]
|
||||
default:
|
||||
isHex := strings.HasPrefix(blockID, "0x")
|
||||
if isHex {
|
||||
blockIDBytes, err := hexutil.Decode(blockID)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not decode block ID into bytes: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if len(blockIDBytes) != fieldparams.RootLength {
|
||||
httputil.HandleError(w, fmt.Sprintf("Block ID has length %d instead of %d", len(blockIDBytes), fieldparams.RootLength), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
blockID32 := bytesutil.ToBytes32(blockIDBytes)
|
||||
blk, err := s.BeaconDB.Block(ctx, blockID32)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not retrieve block for block root %#x: %v", blockID, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
httputil.HandleError(w, "Could not find block: "+err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
root = blockIDBytes
|
||||
} else {
|
||||
slot, err := strconv.ParseUint(blockID, 10, 64)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not parse block ID: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
hasRoots, roots, err := s.BeaconDB.BlockRootsBySlot(ctx, primitives.Slot(slot))
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not retrieve blocks for slot %d: %v", slot, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if !hasRoots {
|
||||
httputil.HandleError(w, "Could not find any blocks with given slot", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
root = roots[0][:]
|
||||
if len(roots) == 1 {
|
||||
break
|
||||
}
|
||||
for _, blockRoot := range roots {
|
||||
canonical, err := s.ChainInfoFetcher.IsCanonical(ctx, blockRoot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not determine if block root is canonical: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if canonical {
|
||||
root = blockRoot[:]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
root, err := s.Blocker.BlockRoot(ctx, []byte(blockID))
|
||||
if !shared.WriteBlockRootFetchError(w, err) {
|
||||
return
|
||||
}
|
||||
|
||||
b32Root := bytesutil.ToBytes32(root)
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, b32Root)
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check if block is optimistic: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
response := &structs.BlockRootResponse{
|
||||
Data: &structs.BlockRoot{
|
||||
Root: hexutil.Encode(root),
|
||||
Root: hexutil.Encode(root[:]),
|
||||
},
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Finalized: s.FinalizationFetcher.IsFinalized(ctx, b32Root),
|
||||
Finalized: s.FinalizationFetcher.IsFinalized(ctx, root),
|
||||
}
|
||||
httputil.WriteJson(w, response)
|
||||
}
|
||||
|
||||
@@ -2509,6 +2509,10 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
HeadFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
FinalizationFetcher: mockChainFetcher,
|
||||
Blocker: &lookup.BeaconDbBlocker{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
},
|
||||
}
|
||||
|
||||
root, err := genBlk.Block.HashTreeRoot()
|
||||
@@ -2524,7 +2528,7 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
{
|
||||
name: "bad formatting",
|
||||
blockID: map[string]string{"block_id": "3bad0"},
|
||||
wantErr: "Could not parse block ID",
|
||||
wantErr: "Invalid block ID",
|
||||
wantCode: http.StatusBadRequest,
|
||||
},
|
||||
{
|
||||
@@ -2572,7 +2576,7 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
{
|
||||
name: "non-existent root",
|
||||
blockID: map[string]string{"block_id": hexutil.Encode(bytesutil.PadTo([]byte("hi there"), 32))},
|
||||
wantErr: "Could not find block",
|
||||
wantErr: "Block not found",
|
||||
wantCode: http.StatusNotFound,
|
||||
},
|
||||
{
|
||||
@@ -2585,7 +2589,7 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
{
|
||||
name: "no block",
|
||||
blockID: map[string]string{"block_id": "105"},
|
||||
wantErr: "Could not find any blocks with given slot",
|
||||
wantErr: "Block not found",
|
||||
wantCode: http.StatusNotFound,
|
||||
},
|
||||
}
|
||||
@@ -2633,6 +2637,10 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
HeadFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
FinalizationFetcher: mockChainFetcher,
|
||||
Blocker: &lookup.BeaconDbBlocker{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
@@ -2668,6 +2676,10 @@ func TestServer_GetBlockRoot(t *testing.T) {
|
||||
HeadFetcher: mockChainFetcher,
|
||||
OptimisticModeFetcher: mockChainFetcher,
|
||||
FinalizationFetcher: mockChainFetcher,
|
||||
Blocker: &lookup.BeaconDbBlocker{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: mockChainFetcher,
|
||||
},
|
||||
}
|
||||
t.Run("true", func(t *testing.T) {
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
|
||||
@@ -26,21 +26,30 @@ func WriteStateFetchError(w http.ResponseWriter, err error) {
|
||||
httputil.HandleError(w, "Could not get state: "+err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
// WriteBlockFetchError writes an appropriate error based on the supplied argument.
|
||||
// The argument error should be a result of fetching block.
|
||||
func WriteBlockFetchError(w http.ResponseWriter, blk interfaces.ReadOnlySignedBeaconBlock, err error) bool {
|
||||
// writeBlockIdError handles common block ID lookup errors.
|
||||
// Returns true if an error was handled and written to the response, false if no error.
|
||||
func writeBlockIdError(w http.ResponseWriter, err error, fallbackMsg string) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
var blockNotFoundErr *lookup.BlockNotFoundError
|
||||
if errors.As(err, &blockNotFoundErr) {
|
||||
httputil.HandleError(w, "Block not found: "+blockNotFoundErr.Error(), http.StatusNotFound)
|
||||
return false
|
||||
return true
|
||||
}
|
||||
var invalidBlockIdErr *lookup.BlockIdParseError
|
||||
if errors.As(err, &invalidBlockIdErr) {
|
||||
httputil.HandleError(w, "Invalid block ID: "+invalidBlockIdErr.Error(), http.StatusBadRequest)
|
||||
return false
|
||||
return true
|
||||
}
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get block from block ID: "+err.Error(), http.StatusInternalServerError)
|
||||
httputil.HandleError(w, fallbackMsg+": "+err.Error(), http.StatusInternalServerError)
|
||||
return true
|
||||
}
|
||||
|
||||
// WriteBlockFetchError writes an appropriate error based on the supplied argument.
|
||||
// The argument error should be a result of fetching block.
|
||||
func WriteBlockFetchError(w http.ResponseWriter, blk interfaces.ReadOnlySignedBeaconBlock, err error) bool {
|
||||
if writeBlockIdError(w, err, "Could not get block from block ID") {
|
||||
return false
|
||||
}
|
||||
if err = blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
@@ -49,3 +58,10 @@ func WriteBlockFetchError(w http.ResponseWriter, blk interfaces.ReadOnlySignedBe
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// WriteBlockRootFetchError writes an appropriate error based on the supplied argument.
|
||||
// The argument error should be a result of fetching block root.
|
||||
// Returns true if no error occurred, false otherwise.
|
||||
func WriteBlockRootFetchError(w http.ResponseWriter, err error) bool {
|
||||
return !writeBlockIdError(w, err, "Could not get block root from block ID")
|
||||
}
|
||||
|
||||
@@ -105,3 +105,59 @@ func TestWriteBlockFetchError(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestWriteBlockRootFetchError tests the WriteBlockRootFetchError function
|
||||
// to ensure that the correct error message and code are written to the response
|
||||
// and that the function returns the correct boolean value.
|
||||
func TestWriteBlockRootFetchError(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
err error
|
||||
expectedMessage string
|
||||
expectedCode int
|
||||
expectedReturn bool
|
||||
}{
|
||||
{
|
||||
name: "Nil error should return true",
|
||||
err: nil,
|
||||
expectedReturn: true,
|
||||
},
|
||||
{
|
||||
name: "BlockNotFoundError should return 404",
|
||||
err: lookup.NewBlockNotFoundError("block not found at slot 123"),
|
||||
expectedMessage: "Block not found",
|
||||
expectedCode: http.StatusNotFound,
|
||||
expectedReturn: false,
|
||||
},
|
||||
{
|
||||
name: "BlockIdParseError should return 400",
|
||||
err: &lookup.BlockIdParseError{},
|
||||
expectedMessage: "Invalid block ID",
|
||||
expectedCode: http.StatusBadRequest,
|
||||
expectedReturn: false,
|
||||
},
|
||||
{
|
||||
name: "Generic error should return 500",
|
||||
err: errors.New("database connection failed"),
|
||||
expectedMessage: "Could not get block root from block ID",
|
||||
expectedCode: http.StatusInternalServerError,
|
||||
expectedReturn: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
writer := httptest.NewRecorder()
|
||||
result := WriteBlockRootFetchError(writer, c.err)
|
||||
|
||||
assert.Equal(t, c.expectedReturn, result, "incorrect return value")
|
||||
if !c.expectedReturn {
|
||||
assert.Equal(t, c.expectedCode, writer.Code, "incorrect status code")
|
||||
assert.StringContains(t, c.expectedMessage, writer.Body.String(), "incorrect error message")
|
||||
|
||||
e := &httputil.DefaultJsonError{}
|
||||
assert.NoError(t, json.Unmarshal(writer.Body.Bytes(), e), "failed to unmarshal response")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,6 +60,7 @@ func (e BlockIdParseError) Error() string {
|
||||
// Blocker is responsible for retrieving blocks.
|
||||
type Blocker interface {
|
||||
Block(ctx context.Context, id []byte) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
BlockRoot(ctx context.Context, id []byte) ([fieldparams.RootLength]byte, error)
|
||||
BlobSidecars(ctx context.Context, id string, opts ...options.BlobsOption) ([]*blocks.VerifiedROBlob, *core.RpcError)
|
||||
Blobs(ctx context.Context, id string, opts ...options.BlobsOption) ([][]byte, *core.RpcError)
|
||||
DataColumns(ctx context.Context, id string, indices []int) ([]blocks.VerifiedRODataColumn, *core.RpcError)
|
||||
@@ -225,6 +226,18 @@ func (p *BeaconDbBlocker) Block(ctx context.Context, id []byte) (interfaces.Read
|
||||
return blk, nil
|
||||
}
|
||||
|
||||
// BlockRoot returns the block root for a given identifier. The identifier can be one of:
|
||||
// - "head" (canonical head in node's view)
|
||||
// - "genesis"
|
||||
// - "finalized"
|
||||
// - "justified"
|
||||
// - <slot>
|
||||
// - <hex encoded block root with '0x' prefix>
|
||||
func (p *BeaconDbBlocker) BlockRoot(ctx context.Context, id []byte) ([fieldparams.RootLength]byte, error) {
|
||||
root, _, err := p.resolveBlockID(ctx, string(id))
|
||||
return root, err
|
||||
}
|
||||
|
||||
// blobsContext holds common information needed for blob retrieval
|
||||
type blobsContext struct {
|
||||
root [fieldparams.RootLength]byte
|
||||
|
||||
@@ -168,6 +168,111 @@ func TestGetBlock(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockRoot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
|
||||
genBlk, blkContainers := testutil.FillDBWithBlocks(ctx, t, beaconDB)
|
||||
canonicalRoots := make(map[[32]byte]bool)
|
||||
|
||||
for _, bContr := range blkContainers {
|
||||
canonicalRoots[bytesutil.ToBytes32(bContr.BlockRoot)] = true
|
||||
}
|
||||
headBlock := blkContainers[len(blkContainers)-1]
|
||||
|
||||
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpb.BeaconBlockContainer_Phase0Block).Phase0Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
fetcher := &BeaconDbBlocker{
|
||||
BeaconDB: beaconDB,
|
||||
ChainInfoFetcher: &mockChain.ChainService{
|
||||
DB: beaconDB,
|
||||
Block: wsb,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
CurrentJustifiedCheckPoint: ðpb.Checkpoint{Root: blkContainers[32].BlockRoot},
|
||||
CanonicalRoots: canonicalRoots,
|
||||
},
|
||||
}
|
||||
|
||||
genesisRoot, err := genBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blockID []byte
|
||||
want [32]byte
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "slot",
|
||||
blockID: []byte("30"),
|
||||
want: bytesutil.ToBytes32(blkContainers[30].BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "bad formatting",
|
||||
blockID: []byte("3bad0"),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "head",
|
||||
blockID: []byte("head"),
|
||||
want: bytesutil.ToBytes32(headBlock.BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "finalized",
|
||||
blockID: []byte("finalized"),
|
||||
want: bytesutil.ToBytes32(blkContainers[64].BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "justified",
|
||||
blockID: []byte("justified"),
|
||||
want: bytesutil.ToBytes32(blkContainers[32].BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "genesis",
|
||||
blockID: []byte("genesis"),
|
||||
want: genesisRoot,
|
||||
},
|
||||
{
|
||||
name: "genesis root",
|
||||
blockID: genesisRoot[:],
|
||||
want: genesisRoot,
|
||||
},
|
||||
{
|
||||
name: "root",
|
||||
blockID: blkContainers[20].BlockRoot,
|
||||
want: bytesutil.ToBytes32(blkContainers[20].BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "hex root",
|
||||
blockID: []byte(hexutil.Encode(blkContainers[20].BlockRoot)),
|
||||
want: bytesutil.ToBytes32(blkContainers[20].BlockRoot),
|
||||
},
|
||||
{
|
||||
name: "non-existent root",
|
||||
blockID: bytesutil.PadTo([]byte("hi there"), 32),
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no block at slot",
|
||||
blockID: []byte("105"),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := fetcher.BlockRoot(ctx, tt.blockID)
|
||||
if tt.wantErr {
|
||||
assert.NotEqual(t, err, nil, "no error has been returned")
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, tt.want, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlobsErrorHandling(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
// MockBlocker is a fake implementation of lookup.Blocker.
|
||||
type MockBlocker struct {
|
||||
BlockToReturn interfaces.ReadOnlySignedBeaconBlock
|
||||
RootToReturn [32]byte
|
||||
ErrorToReturn error
|
||||
SlotBlockMap map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock
|
||||
RootBlockMap map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
|
||||
@@ -39,6 +40,14 @@ func (m *MockBlocker) Block(_ context.Context, b []byte) (interfaces.ReadOnlySig
|
||||
return m.SlotBlockMap[primitives.Slot(slotNumber)], nil
|
||||
}
|
||||
|
||||
// BlockRoot --
|
||||
func (m *MockBlocker) BlockRoot(_ context.Context, _ []byte) ([32]byte, error) {
|
||||
if m.ErrorToReturn != nil {
|
||||
return [32]byte{}, m.ErrorToReturn
|
||||
}
|
||||
return m.RootToReturn, nil
|
||||
}
|
||||
|
||||
// BlobSidecars --
|
||||
func (*MockBlocker) BlobSidecars(_ context.Context, _ string, _ ...options.BlobsOption) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
return nil, &core.RpcError{}
|
||||
|
||||
@@ -113,77 +113,100 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, uint64, err
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
withdrawals := make([]*enginev1.Withdrawal, 0, params.BeaconConfig().MaxWithdrawalsPerPayload)
|
||||
validatorIndex := b.nextWithdrawalValidatorIndex
|
||||
withdrawalIndex := b.nextWithdrawalIndex
|
||||
epoch := slots.ToEpoch(b.slot)
|
||||
|
||||
// Electra partial withdrawals functionality.
|
||||
var processedPartialWithdrawalsCount uint64
|
||||
if b.version >= version.Electra {
|
||||
for _, w := range b.pendingPartialWithdrawals {
|
||||
if w.WithdrawableEpoch > epoch || len(withdrawals) >= int(params.BeaconConfig().MaxPendingPartialsPerWithdrawalsSweep) {
|
||||
break
|
||||
}
|
||||
|
||||
v, err := b.validatorAtIndexReadOnly(w.Index)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to determine withdrawals at index %d: %w", w.Index, err)
|
||||
}
|
||||
vBal, err := b.balanceAtIndex(w.Index)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("could not retrieve balance at index %d: %w", w.Index, err)
|
||||
}
|
||||
hasSufficientEffectiveBalance := v.EffectiveBalance() >= params.BeaconConfig().MinActivationBalance
|
||||
var totalWithdrawn uint64
|
||||
for _, wi := range withdrawals {
|
||||
if wi.ValidatorIndex == w.Index {
|
||||
totalWithdrawn += wi.Amount
|
||||
}
|
||||
}
|
||||
balance, err := mathutil.Sub64(vBal, totalWithdrawn)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "failed to subtract balance %d with total withdrawn %d", vBal, totalWithdrawn)
|
||||
}
|
||||
hasExcessBalance := balance > params.BeaconConfig().MinActivationBalance
|
||||
if v.ExitEpoch() == params.BeaconConfig().FarFutureEpoch && hasSufficientEffectiveBalance && hasExcessBalance {
|
||||
amount := min(balance-params.BeaconConfig().MinActivationBalance, w.Amount)
|
||||
withdrawals = append(withdrawals, &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: w.Index,
|
||||
Address: v.GetWithdrawalCredentials()[12:],
|
||||
Amount: amount,
|
||||
})
|
||||
withdrawalIndex++
|
||||
}
|
||||
processedPartialWithdrawalsCount++
|
||||
}
|
||||
withdrawalIndex, processedPartialWithdrawalsCount, err := b.appendPendingPartialWithdrawals(withdrawalIndex, &withdrawals)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
err = b.appendValidatorsSweepWithdrawals(withdrawalIndex, &withdrawals)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return withdrawals, processedPartialWithdrawalsCount, nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) appendPendingPartialWithdrawals(withdrawalIndex uint64, withdrawals *[]*enginev1.Withdrawal) (uint64, uint64, error) {
|
||||
if b.version < version.Electra {
|
||||
return withdrawalIndex, 0, nil
|
||||
}
|
||||
|
||||
ws := *withdrawals
|
||||
epoch := slots.ToEpoch(b.slot)
|
||||
var processedPartialWithdrawalsCount uint64
|
||||
for _, w := range b.pendingPartialWithdrawals {
|
||||
if w.WithdrawableEpoch > epoch || len(ws) >= int(params.BeaconConfig().MaxPendingPartialsPerWithdrawalsSweep) {
|
||||
break
|
||||
}
|
||||
|
||||
v, err := b.validatorAtIndexReadOnly(w.Index)
|
||||
if err != nil {
|
||||
return withdrawalIndex, 0, fmt.Errorf("failed to determine withdrawals at index %d: %w", w.Index, err)
|
||||
}
|
||||
vBal, err := b.balanceAtIndex(w.Index)
|
||||
if err != nil {
|
||||
return withdrawalIndex, 0, fmt.Errorf("could not retrieve balance at index %d: %w", w.Index, err)
|
||||
}
|
||||
hasSufficientEffectiveBalance := v.EffectiveBalance() >= params.BeaconConfig().MinActivationBalance
|
||||
var totalWithdrawn uint64
|
||||
for _, wi := range ws {
|
||||
if wi.ValidatorIndex == w.Index {
|
||||
totalWithdrawn += wi.Amount
|
||||
}
|
||||
}
|
||||
balance, err := mathutil.Sub64(vBal, totalWithdrawn)
|
||||
if err != nil {
|
||||
return withdrawalIndex, 0, errors.Wrapf(err, "failed to subtract balance %d with total withdrawn %d", vBal, totalWithdrawn)
|
||||
}
|
||||
hasExcessBalance := balance > params.BeaconConfig().MinActivationBalance
|
||||
if v.ExitEpoch() == params.BeaconConfig().FarFutureEpoch && hasSufficientEffectiveBalance && hasExcessBalance {
|
||||
amount := min(balance-params.BeaconConfig().MinActivationBalance, w.Amount)
|
||||
ws = append(ws, &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: w.Index,
|
||||
Address: v.GetWithdrawalCredentials()[12:],
|
||||
Amount: amount,
|
||||
})
|
||||
withdrawalIndex++
|
||||
}
|
||||
processedPartialWithdrawalsCount++
|
||||
}
|
||||
|
||||
*withdrawals = ws
|
||||
return withdrawalIndex, processedPartialWithdrawalsCount, nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) appendValidatorsSweepWithdrawals(withdrawalIndex uint64, withdrawals *[]*enginev1.Withdrawal) error {
|
||||
ws := *withdrawals
|
||||
validatorIndex := b.nextWithdrawalValidatorIndex
|
||||
validatorsLen := b.validatorsLen()
|
||||
epoch := slots.ToEpoch(b.slot)
|
||||
bound := min(uint64(validatorsLen), params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
||||
for range bound {
|
||||
val, err := b.validatorAtIndexReadOnly(validatorIndex)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "could not retrieve validator at index %d", validatorIndex)
|
||||
return errors.Wrapf(err, "could not retrieve validator at index %d", validatorIndex)
|
||||
}
|
||||
balance, err := b.balanceAtIndex(validatorIndex)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "could not retrieve balance at index %d", validatorIndex)
|
||||
return errors.Wrapf(err, "could not retrieve balance at index %d", validatorIndex)
|
||||
}
|
||||
if b.version >= version.Electra {
|
||||
var partiallyWithdrawnBalance uint64
|
||||
for _, w := range withdrawals {
|
||||
for _, w := range ws {
|
||||
if w.ValidatorIndex == validatorIndex {
|
||||
partiallyWithdrawnBalance += w.Amount
|
||||
}
|
||||
}
|
||||
balance, err = mathutil.Sub64(balance, partiallyWithdrawnBalance)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "could not subtract balance %d with partial withdrawn balance %d", balance, partiallyWithdrawnBalance)
|
||||
return errors.Wrapf(err, "could not subtract balance %d with partial withdrawn balance %d", balance, partiallyWithdrawnBalance)
|
||||
}
|
||||
}
|
||||
if helpers.IsFullyWithdrawableValidator(val, balance, epoch, b.version) {
|
||||
withdrawals = append(withdrawals, &enginev1.Withdrawal{
|
||||
ws = append(ws, &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: validatorIndex,
|
||||
Address: bytesutil.SafeCopyBytes(val.GetWithdrawalCredentials()[ETH1AddressOffset:]),
|
||||
@@ -191,7 +214,7 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, uint64, err
|
||||
})
|
||||
withdrawalIndex++
|
||||
} else if helpers.IsPartiallyWithdrawableValidator(val, balance, epoch, b.version) {
|
||||
withdrawals = append(withdrawals, &enginev1.Withdrawal{
|
||||
ws = append(ws, &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: validatorIndex,
|
||||
Address: bytesutil.SafeCopyBytes(val.GetWithdrawalCredentials()[ETH1AddressOffset:]),
|
||||
@@ -199,7 +222,7 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, uint64, err
|
||||
})
|
||||
withdrawalIndex++
|
||||
}
|
||||
if uint64(len(withdrawals)) == params.BeaconConfig().MaxWithdrawalsPerPayload {
|
||||
if uint64(len(ws)) == params.BeaconConfig().MaxWithdrawalsPerPayload {
|
||||
break
|
||||
}
|
||||
validatorIndex += 1
|
||||
@@ -208,7 +231,8 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, uint64, err
|
||||
}
|
||||
}
|
||||
|
||||
return withdrawals, processedPartialWithdrawalsCount, nil
|
||||
*withdrawals = ws
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) PendingPartialWithdrawals() ([]*ethpb.PendingPartialWithdrawal, error) {
|
||||
|
||||
3
changelog/bastin_add-version-log-at-startup.md
Normal file
3
changelog/bastin_add-version-log-at-startup.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Added a version log at startup to display the version of the build.
|
||||
3
changelog/bastin_fix-check-logs-bug.md
Normal file
3
changelog/bastin_fix-check-logs-bug.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Fixed a bug in `hack/check-logs.sh` where untracked files were ignored.
|
||||
3
changelog/james-prysm_blocker-for-block-root.md
Normal file
3
changelog/james-prysm_blocker-for-block-root.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- optimizing /eth/v1/beacon/blocks/{block_id}/root endpoint by reusing blocker lookup instead of duplicated logic.
|
||||
7
changelog/james-prysm_grpc-fallback.md
Normal file
7
changelog/james-prysm_grpc-fallback.md
Normal file
@@ -0,0 +1,7 @@
|
||||
### Changed
|
||||
|
||||
- gRPC fallback now matches rest api implementation and will also check and connect to only synced nodes.
|
||||
|
||||
### Removed
|
||||
|
||||
- gRPC resolver for load balancing, the new implementation matches rest api's so we should remove the resolver so it's handled the same way for consistency.
|
||||
2
changelog/terencechain_core-requests.md
Normal file
2
changelog/terencechain_core-requests.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Ignored
|
||||
- Move withdrawal/consolidation request processing into `beacon-chain/core/requests` to avoid fork/package dependency cycles.
|
||||
2
changelog/terencechain_refactor-expected-withdrawals.md
Normal file
2
changelog/terencechain_refactor-expected-withdrawals.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Ignored
|
||||
- Refactor expected withdrawals into reusable helpers for future forks.
|
||||
@@ -243,6 +243,11 @@ func before(ctx *cli.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Log Prysm version on startup. After initializing log-file and ephemeral log-file.
|
||||
log.WithFields(logrus.Fields{
|
||||
"version": version.Version(),
|
||||
}).Info("Prysm Beacon Chain started")
|
||||
|
||||
if err := cmd.ExpandSingleEndpointIfFile(ctx, flags.ExecutionEngineEndpoint); err != nil {
|
||||
return errors.Wrap(err, "failed to expand single endpoint")
|
||||
}
|
||||
|
||||
@@ -221,6 +221,11 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
// Log Prysm version on startup. After initializing log-file and ephemeral log-file.
|
||||
log.WithFields(logrus.Fields{
|
||||
"version": version.Version(),
|
||||
}).Info("Prysm Validator started")
|
||||
|
||||
// Fix data dir for Windows users.
|
||||
outdatedDataDir := filepath.Join(file.HomeDir(), "AppData", "Roaming", "Eth2Validators")
|
||||
currentDataDir := flags.DefaultValidatorDir()
|
||||
|
||||
@@ -2,7 +2,10 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["hashtree.go"],
|
||||
srcs = [
|
||||
"hashtree.go",
|
||||
"log.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/crypto/hash/htr",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/OffchainLabs/hashtree"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/prysmaticlabs/gohashtree"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const minSliceSizeToParallelize = 5000
|
||||
|
||||
9
crypto/hash/htr/log.go
Normal file
9
crypto/hash/htr/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package htr
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "crypto/hash/htr")
|
||||
@@ -8,12 +8,13 @@ cd "$ROOT_DIR"
|
||||
./hack/gen-logs.sh
|
||||
|
||||
# Fail if that changed anything
|
||||
if ! git diff --quiet -- ./; then
|
||||
if ! git diff --quiet -- ./ || [[ -n "$(git ls-files --others --exclude-standard -- ./)" ]]; then
|
||||
echo "ERROR: log.go files are out of date. Please run:"
|
||||
echo " ./hack/gen-logs.sh"
|
||||
echo "and commit the changes."
|
||||
echo
|
||||
git diff --stat -- ./ || true
|
||||
git status --porcelain -- ./ || true
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ func addLogWriter(w io.Writer) {
|
||||
|
||||
// ConfigurePersistentLogging adds a log-to-file writer. File content is identical to stdout.
|
||||
func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Level, vmodule map[string]logrus.Level) error {
|
||||
logrus.WithField("logFileName", logFileName).Info("Logs will be made persistent")
|
||||
logrus.WithField("logFileName", logFileName).Debug("Logs will be made persistent")
|
||||
if err := file.MkdirAll(filepath.Dir(logFileName)); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -43,7 +43,7 @@ func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Le
|
||||
if format != "text" {
|
||||
addLogWriter(f)
|
||||
|
||||
logrus.Info("File logging initialized")
|
||||
logrus.Debug("File logging initialized")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ func ConfigurePersistentLogging(logFileName string, format string, lvl logrus.Le
|
||||
AllowedLevels: logrus.AllLevels[:max(lvl, maxVmoduleLevel)+1],
|
||||
})
|
||||
|
||||
logrus.Info("File logging initialized")
|
||||
logrus.Debug("File logging initialized")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -103,7 +103,7 @@ func ConfigureEphemeralLogFile(datadirPath string, app string) error {
|
||||
AllowedLevels: logrus.AllLevels[:ephemeralLogFileVerbosity+1],
|
||||
})
|
||||
|
||||
logrus.Info("Ephemeral log file initialized")
|
||||
logrus.Debug("Ephemeral log file initialized")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -5388,7 +5388,7 @@
|
||||
|
||||
- name: process_consolidation_request
|
||||
sources:
|
||||
- file: beacon-chain/core/electra/consolidations.go
|
||||
- file: beacon-chain/core/requests/consolidations.go
|
||||
search: func ProcessConsolidationRequests(
|
||||
spec: |
|
||||
<spec fn="process_consolidation_request" fork="electra" hash="4da4b0fb">
|
||||
@@ -5528,7 +5528,7 @@
|
||||
|
||||
- name: process_deposit_request
|
||||
sources:
|
||||
- file: beacon-chain/core/electra/deposits.go
|
||||
- file: beacon-chain/core/requests/deposits.go
|
||||
search: func processDepositRequest(
|
||||
spec: |
|
||||
<spec fn="process_deposit_request" fork="electra" hash="547c4a35">
|
||||
@@ -6993,7 +6993,7 @@
|
||||
|
||||
- name: process_withdrawal_request
|
||||
sources:
|
||||
- file: beacon-chain/core/electra/withdrawals.go
|
||||
- file: beacon-chain/core/requests/withdrawals.go
|
||||
search: func ProcessWithdrawalRequests(
|
||||
spec: |
|
||||
<spec fn="process_withdrawal_request" fork="electra" hash="c21a0a53">
|
||||
|
||||
@@ -26,9 +26,9 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/electra:go_default_library",
|
||||
"//beacon-chain/core/gloas:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/requests:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
@@ -32,7 +32,7 @@ func RunConsolidationTest(t *testing.T, config string, fork string, block blockW
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, electra.ProcessConsolidationRequests(ctx, s, er.Consolidations)
|
||||
return s, requests.ProcessConsolidationRequests(ctx, s, er.Consolidations)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
@@ -29,7 +29,7 @@ func RunDepositRequestsTest(t *testing.T, config string, fork string, block bloc
|
||||
RunBlockOperationTest(t, folderPath, blk, sszToState, func(ctx context.Context, s state.BeaconState, b interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
|
||||
e, err := b.Block().Body().ExecutionRequests()
|
||||
require.NoError(t, err, "Failed to get execution requests")
|
||||
return electra.ProcessDepositRequests(ctx, s, e.Deposits)
|
||||
return requests.ProcessDepositRequests(ctx, s, e.Deposits)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
@@ -33,7 +33,7 @@ func RunWithdrawalRequestTest(t *testing.T, config string, fork string, block bl
|
||||
bod := b.Block().Body()
|
||||
e, err := bod.ExecutionRequests()
|
||||
require.NoError(t, err)
|
||||
return electra.ProcessWithdrawalRequests(ctx, s, e.Withdrawals)
|
||||
return requests.ProcessWithdrawalRequests(ctx, s, e.Withdrawals)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//api/grpc:go_default_library",
|
||||
"//api/rest:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//cmd/validator/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
|
||||
@@ -3,14 +3,13 @@ package accounts
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
grpcutil "github.com/OffchainLabs/prysm/v7/api/grpc"
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/accounts/wallet"
|
||||
beaconApi "github.com/OffchainLabs/prysm/v7/validator/client/beacon-api"
|
||||
iface "github.com/OffchainLabs/prysm/v7/validator/client/iface"
|
||||
nodeClientFactory "github.com/OffchainLabs/prysm/v7/validator/client/node-client-factory"
|
||||
validatorClientFactory "github.com/OffchainLabs/prysm/v7/validator/client/validator-client-factory"
|
||||
@@ -77,22 +76,17 @@ func (acm *CLIManager) prepareBeaconClients(ctx context.Context) (*iface.Validat
|
||||
}
|
||||
|
||||
ctx = grpcutil.AppendHeaders(ctx, acm.grpcHeaders)
|
||||
grpcConn, err := grpc.DialContext(ctx, acm.beaconRPCProvider, acm.dialOpts...)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not dial endpoint %s", acm.beaconRPCProvider)
|
||||
}
|
||||
conn := validatorHelpers.NewNodeConnection(
|
||||
grpcConn,
|
||||
acm.beaconApiEndpoint,
|
||||
validatorHelpers.WithBeaconApiTimeout(acm.beaconApiTimeout),
|
||||
)
|
||||
|
||||
restHandler := beaconApi.NewBeaconApiRestHandler(
|
||||
http.Client{Timeout: acm.beaconApiTimeout},
|
||||
acm.beaconApiEndpoint,
|
||||
conn, err := validatorHelpers.NewNodeConnection(
|
||||
validatorHelpers.WithGrpc(ctx, acm.beaconRPCProvider, acm.dialOpts),
|
||||
validatorHelpers.WithREST(acm.beaconApiEndpoint, rest.WithHttpTimeout(acm.beaconApiTimeout)),
|
||||
)
|
||||
validatorClient := validatorClientFactory.NewValidatorClient(conn, restHandler)
|
||||
nodeClient := nodeClientFactory.NewNodeClient(conn, restHandler)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
validatorClient := validatorClientFactory.NewValidatorClient(conn)
|
||||
nodeClient := nodeClientFactory.NewNodeClient(conn)
|
||||
|
||||
return &validatorClient, &nodeClient, nil
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ go_library(
|
||||
"log.go",
|
||||
"log_helpers.go",
|
||||
"metrics.go",
|
||||
"multiple_endpoints_grpc_resolver.go",
|
||||
"propose.go",
|
||||
"registration.go",
|
||||
"runner.go",
|
||||
@@ -29,6 +28,7 @@ go_library(
|
||||
"//api/client:go_default_library",
|
||||
"//api/client/event:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//api/rest:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//async:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
@@ -58,7 +58,6 @@ go_library(
|
||||
"//time/slots:go_default_library",
|
||||
"//validator/accounts/iface:go_default_library",
|
||||
"//validator/accounts/wallet:go_default_library",
|
||||
"//validator/client/beacon-api:go_default_library",
|
||||
"//validator/client/beacon-chain-client-factory:go_default_library",
|
||||
"//validator/client/iface:go_default_library",
|
||||
"//validator/client/node-client-factory:go_default_library",
|
||||
@@ -86,13 +85,11 @@ go_library(
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc//:go_default_library",
|
||||
"@io_opentelemetry_go_contrib_instrumentation_net_http_otelhttp//:go_default_library",
|
||||
"@io_opentelemetry_go_otel_trace//:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//credentials:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
"@org_golang_google_grpc//resolver:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
@@ -124,6 +121,8 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api/grpc:go_default_library",
|
||||
"//api/rest:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
|
||||
@@ -26,7 +26,6 @@ go_library(
|
||||
"propose_exit.go",
|
||||
"prysm_beacon_chain_client.go",
|
||||
"registration.go",
|
||||
"rest_handler_client.go",
|
||||
"state_validators.go",
|
||||
"status.go",
|
||||
"stream_blocks.go",
|
||||
@@ -43,6 +42,7 @@ go_library(
|
||||
"//api:go_default_library",
|
||||
"//api/apiutil:go_default_library",
|
||||
"//api/client/event:go_default_library",
|
||||
"//api/rest:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
@@ -111,6 +111,7 @@ go_test(
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/apiutil:go_default_library",
|
||||
"//api/rest:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared/testing:go_default_library",
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
@@ -17,7 +18,7 @@ import (
|
||||
|
||||
type beaconApiChainClient struct {
|
||||
fallbackClient iface.ChainClient
|
||||
jsonRestHandler RestHandler
|
||||
jsonRestHandler rest.RestHandler
|
||||
stateValidatorsProvider StateValidatorsProvider
|
||||
}
|
||||
|
||||
@@ -327,7 +328,7 @@ func (c beaconApiChainClient) ValidatorParticipation(ctx context.Context, in *et
|
||||
return nil, errors.New("beaconApiChainClient.ValidatorParticipation is not implemented. To use a fallback client, pass a fallback client as the last argument of NewBeaconApiChainClientWithFallback.")
|
||||
}
|
||||
|
||||
func NewBeaconApiChainClientWithFallback(jsonRestHandler RestHandler, fallbackClient iface.ChainClient) iface.ChainClient {
|
||||
func NewBeaconApiChainClientWithFallback(jsonRestHandler rest.RestHandler, fallbackClient iface.ChainClient) iface.ChainClient {
|
||||
return &beaconApiChainClient{
|
||||
jsonRestHandler: jsonRestHandler,
|
||||
fallbackClient: fallbackClient,
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/client/iface"
|
||||
@@ -20,7 +21,7 @@ var (
|
||||
|
||||
type beaconApiNodeClient struct {
|
||||
fallbackClient iface.NodeClient
|
||||
jsonRestHandler RestHandler
|
||||
jsonRestHandler rest.RestHandler
|
||||
genesisProvider GenesisProvider
|
||||
}
|
||||
|
||||
@@ -115,7 +116,7 @@ func (c *beaconApiNodeClient) IsReady(ctx context.Context) bool {
|
||||
return statusCode == http.StatusOK
|
||||
}
|
||||
|
||||
func NewNodeClientWithFallback(jsonRestHandler RestHandler, fallbackClient iface.NodeClient) iface.NodeClient {
|
||||
func NewNodeClientWithFallback(jsonRestHandler rest.RestHandler, fallbackClient iface.NodeClient) iface.NodeClient {
|
||||
b := &beaconApiNodeClient{
|
||||
jsonRestHandler: jsonRestHandler,
|
||||
fallbackClient: fallbackClient,
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/client/event"
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
@@ -22,13 +23,13 @@ type beaconApiValidatorClient struct {
|
||||
genesisProvider GenesisProvider
|
||||
dutiesProvider dutiesProvider
|
||||
stateValidatorsProvider StateValidatorsProvider
|
||||
jsonRestHandler RestHandler
|
||||
jsonRestHandler rest.RestHandler
|
||||
beaconBlockConverter BeaconBlockConverter
|
||||
prysmChainClient iface.PrysmChainClient
|
||||
isEventStreamRunning bool
|
||||
}
|
||||
|
||||
func NewBeaconApiValidatorClient(jsonRestHandler RestHandler, opts ...ValidatorClientOpt) iface.ValidatorClient {
|
||||
func NewBeaconApiValidatorClient(jsonRestHandler rest.RestHandler, opts ...ValidatorClientOpt) iface.ValidatorClient {
|
||||
c := &beaconApiValidatorClient{
|
||||
genesisProvider: &beaconApiGenesisProvider{jsonRestHandler: jsonRestHandler},
|
||||
dutiesProvider: beaconApiDutiesProvider{jsonRestHandler: jsonRestHandler},
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/apiutil"
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -27,7 +28,7 @@ type dutiesProvider interface {
|
||||
}
|
||||
|
||||
type beaconApiDutiesProvider struct {
|
||||
jsonRestHandler RestHandler
|
||||
jsonRestHandler rest.RestHandler
|
||||
}
|
||||
|
||||
type attesterDuty struct {
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
@@ -20,7 +21,7 @@ type GenesisProvider interface {
|
||||
}
|
||||
|
||||
type beaconApiGenesisProvider struct {
|
||||
jsonRestHandler RestHandler
|
||||
jsonRestHandler rest.RestHandler
|
||||
genesis *structs.Genesis
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/apiutil"
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
validator2 "github.com/OffchainLabs/prysm/v7/consensus-types/validator"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
@@ -18,7 +19,7 @@ import (
|
||||
)
|
||||
|
||||
// NewPrysmChainClient returns implementation of iface.PrysmChainClient.
|
||||
func NewPrysmChainClient(jsonRestHandler RestHandler, nodeClient iface.NodeClient) iface.PrysmChainClient {
|
||||
func NewPrysmChainClient(jsonRestHandler rest.RestHandler, nodeClient iface.NodeClient) iface.PrysmChainClient {
|
||||
return prysmChainClient{
|
||||
jsonRestHandler: jsonRestHandler,
|
||||
nodeClient: nodeClient,
|
||||
@@ -26,7 +27,7 @@ func NewPrysmChainClient(jsonRestHandler RestHandler, nodeClient iface.NodeClien
|
||||
}
|
||||
|
||||
type prysmChainClient struct {
|
||||
jsonRestHandler RestHandler
|
||||
jsonRestHandler rest.RestHandler
|
||||
nodeClient iface.NodeClient
|
||||
}
|
||||
|
||||
|
||||
@@ -12,13 +12,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api"
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/network/httputil"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
@@ -45,10 +44,7 @@ func TestGet(t *testing.T) {
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
jsonRestHandler := BeaconApiRestHandler{
|
||||
client: http.Client{Timeout: time.Second * 5},
|
||||
host: server.URL,
|
||||
}
|
||||
jsonRestHandler := rest.NewRestHandler(http.Client{Timeout: time.Second * 5}, server.URL)
|
||||
resp := &structs.GetGenesisResponse{}
|
||||
require.NoError(t, jsonRestHandler.Get(ctx, endpoint+"?arg1=abc&arg2=def", resp))
|
||||
assert.DeepEqual(t, genesisJson, resp)
|
||||
@@ -79,10 +75,7 @@ func TestGetSSZ(t *testing.T) {
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
jsonRestHandler := BeaconApiRestHandler{
|
||||
client: http.Client{Timeout: time.Second * 5},
|
||||
host: server.URL,
|
||||
}
|
||||
jsonRestHandler := rest.NewRestHandler(http.Client{Timeout: time.Second * 5}, server.URL)
|
||||
|
||||
body, header, err := jsonRestHandler.GetSSZ(ctx, endpoint)
|
||||
require.NoError(t, err)
|
||||
@@ -108,10 +101,7 @@ func TestGetSSZ(t *testing.T) {
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
jsonRestHandler := BeaconApiRestHandler{
|
||||
client: http.Client{Timeout: time.Second * 5},
|
||||
host: server.URL,
|
||||
}
|
||||
jsonRestHandler := rest.NewRestHandler(http.Client{Timeout: time.Second * 5}, server.URL)
|
||||
|
||||
body, header, err := jsonRestHandler.GetSSZ(ctx, endpoint)
|
||||
require.NoError(t, err)
|
||||
@@ -136,10 +126,7 @@ func TestGetSSZ(t *testing.T) {
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
jsonRestHandler := BeaconApiRestHandler{
|
||||
client: http.Client{Timeout: time.Second * 5},
|
||||
host: server.URL,
|
||||
}
|
||||
jsonRestHandler := rest.NewRestHandler(http.Client{Timeout: time.Second * 5}, server.URL)
|
||||
|
||||
_, _, err := jsonRestHandler.GetSSZ(ctx, endpoint)
|
||||
require.NoError(t, err)
|
||||
@@ -161,7 +148,7 @@ func TestAcceptOverrideSSZ(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}))
|
||||
defer srv.Close()
|
||||
c := NewBeaconApiRestHandler(http.Client{Timeout: time.Second * 5}, srv.URL)
|
||||
c := rest.NewRestHandler(http.Client{Timeout: time.Second * 5}, srv.URL)
|
||||
_, _, err := c.GetSSZ(t.Context(), "/test")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -204,162 +191,12 @@ func TestPost(t *testing.T) {
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
jsonRestHandler := BeaconApiRestHandler{
|
||||
client: http.Client{Timeout: time.Second * 5},
|
||||
host: server.URL,
|
||||
}
|
||||
jsonRestHandler := rest.NewRestHandler(http.Client{Timeout: time.Second * 5}, server.URL)
|
||||
resp := &structs.GetGenesisResponse{}
|
||||
require.NoError(t, jsonRestHandler.Post(ctx, endpoint, headers, bytes.NewBuffer(dataBytes), resp))
|
||||
assert.DeepEqual(t, genesisJson, resp)
|
||||
}
|
||||
|
||||
func Test_decodeResp(t *testing.T) {
|
||||
type j struct {
|
||||
Foo string `json:"foo"`
|
||||
}
|
||||
t.Run("200 JSON with charset", func(t *testing.T) {
|
||||
body := bytes.Buffer{}
|
||||
r := &http.Response{
|
||||
Status: "200",
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(&body),
|
||||
Header: map[string][]string{"Content-Type": {"application/json; charset=utf-8"}},
|
||||
}
|
||||
require.NoError(t, decodeResp(r, nil))
|
||||
})
|
||||
t.Run("200 non-JSON", func(t *testing.T) {
|
||||
body := bytes.Buffer{}
|
||||
r := &http.Response{
|
||||
Status: "200",
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(&body),
|
||||
Header: map[string][]string{"Content-Type": {api.OctetStreamMediaType}},
|
||||
}
|
||||
require.NoError(t, decodeResp(r, nil))
|
||||
})
|
||||
t.Run("204 non-JSON", func(t *testing.T) {
|
||||
body := bytes.Buffer{}
|
||||
r := &http.Response{
|
||||
Status: "204",
|
||||
StatusCode: http.StatusNoContent,
|
||||
Body: io.NopCloser(&body),
|
||||
Header: map[string][]string{"Content-Type": {api.OctetStreamMediaType}},
|
||||
}
|
||||
require.NoError(t, decodeResp(r, nil))
|
||||
})
|
||||
t.Run("500 non-JSON", func(t *testing.T) {
|
||||
body := bytes.Buffer{}
|
||||
_, err := body.WriteString("foo")
|
||||
require.NoError(t, err)
|
||||
r := &http.Response{
|
||||
Status: "500",
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
Body: io.NopCloser(&body),
|
||||
Header: map[string][]string{"Content-Type": {api.OctetStreamMediaType}},
|
||||
}
|
||||
err = decodeResp(r, nil)
|
||||
errJson := &httputil.DefaultJsonError{}
|
||||
require.Equal(t, true, errors.As(err, &errJson))
|
||||
assert.Equal(t, http.StatusInternalServerError, errJson.Code)
|
||||
assert.Equal(t, "foo", errJson.Message)
|
||||
})
|
||||
t.Run("200 JSON with resp", func(t *testing.T) {
|
||||
body := bytes.Buffer{}
|
||||
b, err := json.Marshal(&j{Foo: "foo"})
|
||||
require.NoError(t, err)
|
||||
body.Write(b)
|
||||
r := &http.Response{
|
||||
Status: "200",
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(&body),
|
||||
Header: map[string][]string{"Content-Type": {api.JsonMediaType}},
|
||||
}
|
||||
resp := &j{}
|
||||
require.NoError(t, decodeResp(r, resp))
|
||||
assert.Equal(t, "foo", resp.Foo)
|
||||
})
|
||||
t.Run("200 JSON without resp", func(t *testing.T) {
|
||||
body := bytes.Buffer{}
|
||||
r := &http.Response{
|
||||
Status: "200",
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(&body),
|
||||
Header: map[string][]string{"Content-Type": {api.JsonMediaType}},
|
||||
}
|
||||
require.NoError(t, decodeResp(r, nil))
|
||||
})
|
||||
t.Run("204 JSON", func(t *testing.T) {
|
||||
body := bytes.Buffer{}
|
||||
r := &http.Response{
|
||||
Status: "204",
|
||||
StatusCode: http.StatusNoContent,
|
||||
Body: io.NopCloser(&body),
|
||||
Header: map[string][]string{"Content-Type": {api.JsonMediaType}},
|
||||
}
|
||||
require.NoError(t, decodeResp(r, nil))
|
||||
})
|
||||
t.Run("500 JSON", func(t *testing.T) {
|
||||
body := bytes.Buffer{}
|
||||
b, err := json.Marshal(&httputil.DefaultJsonError{Code: http.StatusInternalServerError, Message: "error"})
|
||||
require.NoError(t, err)
|
||||
body.Write(b)
|
||||
r := &http.Response{
|
||||
Status: "500",
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
Body: io.NopCloser(&body),
|
||||
Header: map[string][]string{"Content-Type": {api.JsonMediaType}},
|
||||
}
|
||||
err = decodeResp(r, nil)
|
||||
errJson := &httputil.DefaultJsonError{}
|
||||
require.Equal(t, true, errors.As(err, &errJson))
|
||||
assert.Equal(t, http.StatusInternalServerError, errJson.Code)
|
||||
assert.Equal(t, "error", errJson.Message)
|
||||
})
|
||||
t.Run("200 JSON cannot decode", func(t *testing.T) {
|
||||
body := bytes.Buffer{}
|
||||
_, err := body.WriteString("foo")
|
||||
require.NoError(t, err)
|
||||
r := &http.Response{
|
||||
Status: "200",
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(&body),
|
||||
Header: map[string][]string{"Content-Type": {api.JsonMediaType}},
|
||||
Request: &http.Request{},
|
||||
}
|
||||
resp := &j{}
|
||||
err = decodeResp(r, resp)
|
||||
assert.ErrorContains(t, "failed to decode response body into json", err)
|
||||
})
|
||||
t.Run("500 JSON cannot decode", func(t *testing.T) {
|
||||
body := bytes.Buffer{}
|
||||
_, err := body.WriteString("foo")
|
||||
require.NoError(t, err)
|
||||
r := &http.Response{
|
||||
Status: "500",
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
Body: io.NopCloser(&body),
|
||||
Header: map[string][]string{"Content-Type": {api.JsonMediaType}},
|
||||
Request: &http.Request{},
|
||||
}
|
||||
err = decodeResp(r, nil)
|
||||
assert.ErrorContains(t, "failed to decode response body into error json", err)
|
||||
})
|
||||
t.Run("500 not JSON", func(t *testing.T) {
|
||||
body := bytes.Buffer{}
|
||||
_, err := body.WriteString("foo")
|
||||
require.NoError(t, err)
|
||||
r := &http.Response{
|
||||
Status: "500",
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
Body: io.NopCloser(&body),
|
||||
Header: map[string][]string{"Content-Type": {"text/plain"}},
|
||||
Request: &http.Request{},
|
||||
}
|
||||
err = decodeResp(r, nil)
|
||||
assert.ErrorContains(t, "HTTP request unsuccessful (500: foo)", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetStatusCode(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
const endpoint = "/eth/v1/node/health"
|
||||
@@ -401,10 +238,7 @@ func TestGetStatusCode(t *testing.T) {
|
||||
server := httptest.NewServer(mux)
|
||||
defer server.Close()
|
||||
|
||||
jsonRestHandler := BeaconApiRestHandler{
|
||||
client: http.Client{Timeout: time.Second * 5},
|
||||
host: server.URL,
|
||||
}
|
||||
jsonRestHandler := rest.NewRestHandler(http.Client{Timeout: time.Second * 5}, server.URL)
|
||||
|
||||
statusCode, err := jsonRestHandler.GetStatusCode(ctx, endpoint)
|
||||
require.NoError(t, err)
|
||||
@@ -413,10 +247,7 @@ func TestGetStatusCode(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("returns error on connection failure", func(t *testing.T) {
|
||||
jsonRestHandler := BeaconApiRestHandler{
|
||||
client: http.Client{Timeout: time.Millisecond * 100},
|
||||
host: "http://localhost:99999", // Invalid port
|
||||
}
|
||||
jsonRestHandler := rest.NewRestHandler(http.Client{Timeout: time.Millisecond * 100}, "http://localhost:99999")
|
||||
|
||||
_, err := jsonRestHandler.GetStatusCode(ctx, endpoint)
|
||||
require.ErrorContains(t, "failed to perform request", err)
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/apiutil"
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/pkg/errors"
|
||||
@@ -21,7 +22,7 @@ type StateValidatorsProvider interface {
|
||||
}
|
||||
|
||||
type beaconApiStateValidatorsProvider struct {
|
||||
jsonRestHandler RestHandler
|
||||
jsonRestHandler rest.RestHandler
|
||||
}
|
||||
|
||||
func (c beaconApiStateValidatorsProvider) StateValidators(
|
||||
|
||||
@@ -9,19 +9,17 @@ import (
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
)
|
||||
|
||||
func NewChainClient(validatorConn validatorHelpers.NodeConnection, jsonRestHandler beaconApi.RestHandler) iface.ChainClient {
|
||||
grpcClient := grpcApi.NewGrpcChainClient(validatorConn.GetGrpcClientConn())
|
||||
func NewChainClient(validatorConn validatorHelpers.NodeConnection) iface.ChainClient {
|
||||
grpcClient := grpcApi.NewGrpcChainClient(validatorConn)
|
||||
if features.Get().EnableBeaconRESTApi {
|
||||
return beaconApi.NewBeaconApiChainClientWithFallback(jsonRestHandler, grpcClient)
|
||||
} else {
|
||||
return grpcClient
|
||||
return beaconApi.NewBeaconApiChainClientWithFallback(validatorConn.GetRestHandler(), grpcClient)
|
||||
}
|
||||
return grpcClient
|
||||
}
|
||||
|
||||
func NewPrysmChainClient(validatorConn validatorHelpers.NodeConnection, jsonRestHandler beaconApi.RestHandler) iface.PrysmChainClient {
|
||||
func NewPrysmChainClient(validatorConn validatorHelpers.NodeConnection) iface.PrysmChainClient {
|
||||
if features.Get().EnableBeaconRESTApi {
|
||||
return beaconApi.NewPrysmChainClient(jsonRestHandler, nodeClientFactory.NewNodeClient(validatorConn, jsonRestHandler))
|
||||
} else {
|
||||
return grpcApi.NewGrpcPrysmChainClient(validatorConn.GetGrpcClientConn())
|
||||
return beaconApi.NewPrysmChainClient(validatorConn.GetRestHandler(), nodeClientFactory.NewNodeClient(validatorConn))
|
||||
}
|
||||
return grpcApi.NewGrpcPrysmChainClient(validatorConn)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"grpc_beacon_chain_client.go",
|
||||
"grpc_client_manager.go",
|
||||
"grpc_node_client.go",
|
||||
"grpc_prysm_beacon_chain_client.go",
|
||||
"grpc_validator_client.go",
|
||||
@@ -25,6 +26,7 @@ go_library(
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//validator/client/iface:go_default_library",
|
||||
"//validator/helpers:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_golang_protobuf//ptypes/empty",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
@@ -39,6 +41,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
"grpc_client_manager_test.go",
|
||||
"grpc_prysm_beacon_chain_client_test.go",
|
||||
"grpc_validator_client_test.go",
|
||||
],
|
||||
@@ -56,7 +59,9 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"//testing/validator-mock:go_default_library",
|
||||
"//validator/client/iface:go_default_library",
|
||||
"//validator/helpers:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
"@org_uber_go_mock//gomock:go_default_library",
|
||||
],
|
||||
|
||||
@@ -5,38 +5,42 @@ import (
|
||||
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/client/iface"
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type grpcChainClient struct {
|
||||
beaconChainClient ethpb.BeaconChainClient
|
||||
*grpcClientManager[ethpb.BeaconChainClient]
|
||||
}
|
||||
|
||||
func (c *grpcChainClient) ChainHead(ctx context.Context, in *empty.Empty) (*ethpb.ChainHead, error) {
|
||||
return c.beaconChainClient.GetChainHead(ctx, in)
|
||||
return c.getClient().GetChainHead(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcChainClient) ValidatorBalances(ctx context.Context, in *ethpb.ListValidatorBalancesRequest) (*ethpb.ValidatorBalances, error) {
|
||||
return c.beaconChainClient.ListValidatorBalances(ctx, in)
|
||||
return c.getClient().ListValidatorBalances(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcChainClient) Validators(ctx context.Context, in *ethpb.ListValidatorsRequest) (*ethpb.Validators, error) {
|
||||
return c.beaconChainClient.ListValidators(ctx, in)
|
||||
return c.getClient().ListValidators(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcChainClient) ValidatorQueue(ctx context.Context, in *empty.Empty) (*ethpb.ValidatorQueue, error) {
|
||||
return c.beaconChainClient.GetValidatorQueue(ctx, in)
|
||||
return c.getClient().GetValidatorQueue(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcChainClient) ValidatorPerformance(ctx context.Context, in *ethpb.ValidatorPerformanceRequest) (*ethpb.ValidatorPerformanceResponse, error) {
|
||||
return c.beaconChainClient.GetValidatorPerformance(ctx, in)
|
||||
return c.getClient().GetValidatorPerformance(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcChainClient) ValidatorParticipation(ctx context.Context, in *ethpb.GetValidatorParticipationRequest) (*ethpb.ValidatorParticipationResponse, error) {
|
||||
return c.beaconChainClient.GetValidatorParticipation(ctx, in)
|
||||
return c.getClient().GetValidatorParticipation(ctx, in)
|
||||
}
|
||||
|
||||
func NewGrpcChainClient(cc grpc.ClientConnInterface) iface.ChainClient {
|
||||
return &grpcChainClient{ethpb.NewBeaconChainClient(cc)}
|
||||
// NewGrpcChainClient creates a new gRPC chain client that supports
|
||||
// dynamic connection switching via the NodeConnection's GrpcConnectionProvider.
|
||||
func NewGrpcChainClient(conn validatorHelpers.NodeConnection) iface.ChainClient {
|
||||
return &grpcChainClient{
|
||||
grpcClientManager: newGrpcClientManager(conn, ethpb.NewBeaconChainClient),
|
||||
}
|
||||
}
|
||||
|
||||
54
validator/client/grpc-api/grpc_client_manager.go
Normal file
54
validator/client/grpc-api/grpc_client_manager.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package grpc_api
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// grpcClientManager handles dynamic gRPC client recreation when the connection changes.
|
||||
// It uses generics to work with any gRPC client type.
|
||||
type grpcClientManager[T any] struct {
|
||||
sync.RWMutex
|
||||
conn validatorHelpers.NodeConnection
|
||||
client T
|
||||
lastHost string
|
||||
newClient func(grpc.ClientConnInterface) T
|
||||
}
|
||||
|
||||
// newGrpcClientManager creates a new client manager with the given connection and client constructor.
|
||||
func newGrpcClientManager[T any](
|
||||
conn validatorHelpers.NodeConnection,
|
||||
newClient func(grpc.ClientConnInterface) T,
|
||||
) *grpcClientManager[T] {
|
||||
return &grpcClientManager[T]{
|
||||
conn: conn,
|
||||
newClient: newClient,
|
||||
client: newClient(conn.GetGrpcClientConn()),
|
||||
lastHost: conn.GetGrpcConnectionProvider().CurrentHost(),
|
||||
}
|
||||
}
|
||||
|
||||
// getClient returns the current client, recreating it if the connection has changed.
|
||||
func (m *grpcClientManager[T]) getClient() T {
|
||||
currentHost := m.conn.GetGrpcConnectionProvider().CurrentHost()
|
||||
m.RLock()
|
||||
if m.lastHost == currentHost {
|
||||
client := m.client
|
||||
m.RUnlock()
|
||||
return client
|
||||
}
|
||||
m.RUnlock()
|
||||
|
||||
// Connection changed, need to recreate client
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
// Double-check after acquiring write lock
|
||||
if m.lastHost == currentHost {
|
||||
return m.client
|
||||
}
|
||||
m.client = m.newClient(m.conn.GetGrpcClientConn())
|
||||
m.lastHost = currentHost
|
||||
return m.client
|
||||
}
|
||||
168
validator/client/grpc-api/grpc_client_manager_test.go
Normal file
168
validator/client/grpc-api/grpc_client_manager_test.go
Normal file
@@ -0,0 +1,168 @@
|
||||
package grpc_api
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// mockProvider implements grpcutil.GrpcConnectionProvider for testing.
|
||||
type mockProvider struct {
|
||||
hosts []string
|
||||
currentIndex int
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (m *mockProvider) CurrentConn() *grpc.ClientConn { return nil }
|
||||
func (m *mockProvider) Hosts() []string { return m.hosts }
|
||||
func (m *mockProvider) Close() {}
|
||||
|
||||
func (m *mockProvider) CurrentHost() string {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
return m.hosts[m.currentIndex]
|
||||
}
|
||||
|
||||
func (m *mockProvider) SwitchHost(index int) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.currentIndex = index
|
||||
return nil
|
||||
}
|
||||
|
||||
// nextHost is a test helper for round-robin simulation (not part of the interface).
|
||||
func (m *mockProvider) nextHost() {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.currentIndex = (m.currentIndex + 1) % len(m.hosts)
|
||||
}
|
||||
|
||||
// testClient is a simple type for testing the generic client manager.
|
||||
type testClient struct{ id int }
|
||||
|
||||
// testManager creates a manager with client creation counting.
|
||||
func testManager(t *testing.T, provider *mockProvider) (*grpcClientManager[*testClient], *int) {
|
||||
conn, err := validatorHelpers.NewNodeConnection(validatorHelpers.WithGrpcProvider(provider))
|
||||
require.NoError(t, err)
|
||||
|
||||
clientCount := new(int)
|
||||
newClient := func(grpc.ClientConnInterface) *testClient {
|
||||
*clientCount++
|
||||
return &testClient{id: *clientCount}
|
||||
}
|
||||
|
||||
manager := newGrpcClientManager(conn, newClient)
|
||||
require.NotNil(t, manager)
|
||||
return manager, clientCount
|
||||
}
|
||||
|
||||
func TestGrpcClientManager(t *testing.T) {
|
||||
t.Run("tracks host", func(t *testing.T) {
|
||||
provider := &mockProvider{hosts: []string{"host1:4000", "host2:4000"}}
|
||||
manager, count := testManager(t, provider)
|
||||
assert.Equal(t, 1, *count)
|
||||
assert.Equal(t, "host1:4000", manager.lastHost)
|
||||
})
|
||||
|
||||
t.Run("same host returns same client", func(t *testing.T) {
|
||||
provider := &mockProvider{hosts: []string{"host1:4000", "host2:4000"}}
|
||||
manager, count := testManager(t, provider)
|
||||
|
||||
c1, c2, c3 := manager.getClient(), manager.getClient(), manager.getClient()
|
||||
assert.Equal(t, 1, *count)
|
||||
assert.Equal(t, c1, c2)
|
||||
assert.Equal(t, c2, c3)
|
||||
})
|
||||
|
||||
t.Run("host change recreates client", func(t *testing.T) {
|
||||
provider := &mockProvider{hosts: []string{"host1:4000", "host2:4000"}}
|
||||
manager, count := testManager(t, provider)
|
||||
|
||||
c1 := manager.getClient()
|
||||
assert.Equal(t, 1, c1.id)
|
||||
|
||||
provider.nextHost()
|
||||
c2 := manager.getClient()
|
||||
assert.Equal(t, 2, *count)
|
||||
assert.Equal(t, 2, c2.id)
|
||||
|
||||
// Same host again - no recreation
|
||||
c3 := manager.getClient()
|
||||
assert.Equal(t, 2, *count)
|
||||
assert.Equal(t, c2, c3)
|
||||
})
|
||||
|
||||
t.Run("multiple host switches", func(t *testing.T) {
|
||||
provider := &mockProvider{hosts: []string{"host1:4000", "host2:4000", "host3:4000"}}
|
||||
manager, count := testManager(t, provider)
|
||||
assert.Equal(t, 1, *count)
|
||||
|
||||
for expected := 2; expected <= 4; expected++ {
|
||||
provider.nextHost()
|
||||
_ = manager.getClient()
|
||||
assert.Equal(t, expected, *count)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestGrpcClientManager_Concurrent(t *testing.T) {
|
||||
t.Run("concurrent access same host", func(t *testing.T) {
|
||||
provider := &mockProvider{hosts: []string{"host1:4000", "host2:4000"}}
|
||||
manager, _ := testManager(t, provider)
|
||||
|
||||
var clientCount int
|
||||
var countMu sync.Mutex
|
||||
// Override with thread-safe counter
|
||||
manager.newClient = func(grpc.ClientConnInterface) *testClient {
|
||||
countMu.Lock()
|
||||
clientCount++
|
||||
id := clientCount
|
||||
countMu.Unlock()
|
||||
return &testClient{id: id}
|
||||
}
|
||||
manager.client = manager.newClient(nil)
|
||||
clientCount = 1
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for range 100 {
|
||||
wg.Go(func() { _ = manager.getClient() })
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
countMu.Lock()
|
||||
assert.Equal(t, 1, clientCount)
|
||||
countMu.Unlock()
|
||||
})
|
||||
|
||||
t.Run("concurrent with host changes", func(t *testing.T) {
|
||||
provider := &mockProvider{hosts: []string{"host1:4000", "host2:4000"}}
|
||||
manager, _ := testManager(t, provider)
|
||||
|
||||
var clientCount int
|
||||
var countMu sync.Mutex
|
||||
manager.newClient = func(grpc.ClientConnInterface) *testClient {
|
||||
countMu.Lock()
|
||||
clientCount++
|
||||
id := clientCount
|
||||
countMu.Unlock()
|
||||
return &testClient{id: id}
|
||||
}
|
||||
manager.client = manager.newClient(nil)
|
||||
clientCount = 1
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for range 50 {
|
||||
wg.Go(func() { _ = manager.getClient() })
|
||||
wg.Go(func() { provider.nextHost() })
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
countMu.Lock()
|
||||
assert.NotEqual(t, 0, clientCount, "Should have created at least one client")
|
||||
countMu.Unlock()
|
||||
})
|
||||
}
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/client/iface"
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -14,35 +14,48 @@ var (
|
||||
)
|
||||
|
||||
type grpcNodeClient struct {
|
||||
nodeClient ethpb.NodeClient
|
||||
*grpcClientManager[ethpb.NodeClient]
|
||||
}
|
||||
|
||||
func (c *grpcNodeClient) SyncStatus(ctx context.Context, in *empty.Empty) (*ethpb.SyncStatus, error) {
|
||||
return c.nodeClient.GetSyncStatus(ctx, in)
|
||||
return c.getClient().GetSyncStatus(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcNodeClient) Genesis(ctx context.Context, in *empty.Empty) (*ethpb.Genesis, error) {
|
||||
return c.nodeClient.GetGenesis(ctx, in)
|
||||
return c.getClient().GetGenesis(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcNodeClient) Version(ctx context.Context, in *empty.Empty) (*ethpb.Version, error) {
|
||||
return c.nodeClient.GetVersion(ctx, in)
|
||||
return c.getClient().GetVersion(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcNodeClient) Peers(ctx context.Context, in *empty.Empty) (*ethpb.Peers, error) {
|
||||
return c.nodeClient.ListPeers(ctx, in)
|
||||
return c.getClient().ListPeers(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcNodeClient) IsReady(ctx context.Context) bool {
|
||||
_, err := c.nodeClient.GetHealth(ctx, ðpb.HealthRequest{})
|
||||
_, err := c.getClient().GetHealth(ctx, ðpb.HealthRequest{})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get health of node")
|
||||
log.WithError(err).Debug("Failed to get health of node")
|
||||
return false
|
||||
}
|
||||
// Then check sync status - we only want fully synced nodes
|
||||
syncStatus, err := c.getClient().GetSyncStatus(ctx, &empty.Empty{})
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Failed to get sync status of node")
|
||||
return false
|
||||
}
|
||||
if syncStatus.Syncing {
|
||||
log.Debug("Node is syncing, not fully synced")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func NewNodeClient(cc grpc.ClientConnInterface) iface.NodeClient {
|
||||
g := &grpcNodeClient{nodeClient: ethpb.NewNodeClient(cc)}
|
||||
return g
|
||||
// NewNodeClient creates a new gRPC node client that supports
|
||||
// dynamic connection switching via the NodeConnection's GrpcConnectionProvider.
|
||||
func NewNodeClient(conn validatorHelpers.NodeConnection) iface.NodeClient {
|
||||
return &grpcNodeClient{
|
||||
grpcClientManager: newGrpcClientManager(conn, ethpb.NewNodeClient),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,9 +12,9 @@ import (
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/eth/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/client/iface"
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type grpcPrysmChainClient struct {
|
||||
@@ -95,6 +95,8 @@ func (c *grpcPrysmChainClient) ValidatorPerformance(ctx context.Context, in *eth
|
||||
return c.chainClient.ValidatorPerformance(ctx, in)
|
||||
}
|
||||
|
||||
func NewGrpcPrysmChainClient(cc grpc.ClientConnInterface) iface.PrysmChainClient {
|
||||
return &grpcPrysmChainClient{chainClient: &grpcChainClient{ethpb.NewBeaconChainClient(cc)}}
|
||||
// NewGrpcPrysmChainClient creates a new gRPC Prysm chain client that supports
|
||||
// dynamic connection switching via the NodeConnection's GrpcConnectionProvider.
|
||||
func NewGrpcPrysmChainClient(conn validatorHelpers.NodeConnection) iface.PrysmChainClient {
|
||||
return &grpcPrysmChainClient{chainClient: NewGrpcChainClient(conn)}
|
||||
}
|
||||
|
||||
@@ -14,24 +14,24 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/client/iface"
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type grpcValidatorClient struct {
|
||||
beaconNodeValidatorClient ethpb.BeaconNodeValidatorClient
|
||||
isEventStreamRunning bool
|
||||
*grpcClientManager[ethpb.BeaconNodeValidatorClient]
|
||||
isEventStreamRunning bool
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) Duties(ctx context.Context, in *ethpb.DutiesRequest) (*ethpb.ValidatorDutiesContainer, error) {
|
||||
if features.Get().DisableDutiesV2 {
|
||||
return c.getDuties(ctx, in)
|
||||
}
|
||||
dutiesResponse, err := c.beaconNodeValidatorClient.GetDutiesV2(ctx, in)
|
||||
dutiesResponse, err := c.getClient().GetDutiesV2(ctx, in)
|
||||
if err != nil {
|
||||
if status.Code(err) == codes.Unimplemented {
|
||||
log.Warn("GetDutiesV2 returned status code unavailable, falling back to GetDuties")
|
||||
@@ -47,7 +47,7 @@ func (c *grpcValidatorClient) Duties(ctx context.Context, in *ethpb.DutiesReques
|
||||
|
||||
// getDuties is calling the v1 of get duties
|
||||
func (c *grpcValidatorClient) getDuties(ctx context.Context, in *ethpb.DutiesRequest) (*ethpb.ValidatorDutiesContainer, error) {
|
||||
dutiesResponse, err := c.beaconNodeValidatorClient.GetDuties(ctx, in)
|
||||
dutiesResponse, err := c.getClient().GetDuties(ctx, in)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(
|
||||
client.ErrConnectionIssue,
|
||||
@@ -147,108 +147,108 @@ func toValidatorDutyV2(duty *ethpb.DutiesV2Response_Duty) (*ethpb.ValidatorDuty,
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) CheckDoppelGanger(ctx context.Context, in *ethpb.DoppelGangerRequest) (*ethpb.DoppelGangerResponse, error) {
|
||||
return c.beaconNodeValidatorClient.CheckDoppelGanger(ctx, in)
|
||||
return c.getClient().CheckDoppelGanger(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) DomainData(ctx context.Context, in *ethpb.DomainRequest) (*ethpb.DomainResponse, error) {
|
||||
return c.beaconNodeValidatorClient.DomainData(ctx, in)
|
||||
return c.getClient().DomainData(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) AttestationData(ctx context.Context, in *ethpb.AttestationDataRequest) (*ethpb.AttestationData, error) {
|
||||
return c.beaconNodeValidatorClient.GetAttestationData(ctx, in)
|
||||
return c.getClient().GetAttestationData(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) BeaconBlock(ctx context.Context, in *ethpb.BlockRequest) (*ethpb.GenericBeaconBlock, error) {
|
||||
return c.beaconNodeValidatorClient.GetBeaconBlock(ctx, in)
|
||||
return c.getClient().GetBeaconBlock(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) FeeRecipientByPubKey(ctx context.Context, in *ethpb.FeeRecipientByPubKeyRequest) (*ethpb.FeeRecipientByPubKeyResponse, error) {
|
||||
return c.beaconNodeValidatorClient.GetFeeRecipientByPubKey(ctx, in)
|
||||
return c.getClient().GetFeeRecipientByPubKey(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SyncCommitteeContribution(ctx context.Context, in *ethpb.SyncCommitteeContributionRequest) (*ethpb.SyncCommitteeContribution, error) {
|
||||
return c.beaconNodeValidatorClient.GetSyncCommitteeContribution(ctx, in)
|
||||
return c.getClient().GetSyncCommitteeContribution(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SyncMessageBlockRoot(ctx context.Context, in *empty.Empty) (*ethpb.SyncMessageBlockRootResponse, error) {
|
||||
return c.beaconNodeValidatorClient.GetSyncMessageBlockRoot(ctx, in)
|
||||
return c.getClient().GetSyncMessageBlockRoot(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SyncSubcommitteeIndex(ctx context.Context, in *ethpb.SyncSubcommitteeIndexRequest) (*ethpb.SyncSubcommitteeIndexResponse, error) {
|
||||
return c.beaconNodeValidatorClient.GetSyncSubcommitteeIndex(ctx, in)
|
||||
return c.getClient().GetSyncSubcommitteeIndex(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) MultipleValidatorStatus(ctx context.Context, in *ethpb.MultipleValidatorStatusRequest) (*ethpb.MultipleValidatorStatusResponse, error) {
|
||||
return c.beaconNodeValidatorClient.MultipleValidatorStatus(ctx, in)
|
||||
return c.getClient().MultipleValidatorStatus(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) PrepareBeaconProposer(ctx context.Context, in *ethpb.PrepareBeaconProposerRequest) (*empty.Empty, error) {
|
||||
return c.beaconNodeValidatorClient.PrepareBeaconProposer(ctx, in)
|
||||
return c.getClient().PrepareBeaconProposer(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) ProposeAttestation(ctx context.Context, in *ethpb.Attestation) (*ethpb.AttestResponse, error) {
|
||||
return c.beaconNodeValidatorClient.ProposeAttestation(ctx, in)
|
||||
return c.getClient().ProposeAttestation(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) ProposeAttestationElectra(ctx context.Context, in *ethpb.SingleAttestation) (*ethpb.AttestResponse, error) {
|
||||
return c.beaconNodeValidatorClient.ProposeAttestationElectra(ctx, in)
|
||||
return c.getClient().ProposeAttestationElectra(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) ProposeBeaconBlock(ctx context.Context, in *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
return c.beaconNodeValidatorClient.ProposeBeaconBlock(ctx, in)
|
||||
return c.getClient().ProposeBeaconBlock(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) ProposeExit(ctx context.Context, in *ethpb.SignedVoluntaryExit) (*ethpb.ProposeExitResponse, error) {
|
||||
return c.beaconNodeValidatorClient.ProposeExit(ctx, in)
|
||||
return c.getClient().ProposeExit(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) StreamBlocksAltair(ctx context.Context, in *ethpb.StreamBlocksRequest) (ethpb.BeaconNodeValidator_StreamBlocksAltairClient, error) {
|
||||
return c.beaconNodeValidatorClient.StreamBlocksAltair(ctx, in)
|
||||
return c.getClient().StreamBlocksAltair(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubmitAggregateSelectionProof(ctx context.Context, in *ethpb.AggregateSelectionRequest, _ primitives.ValidatorIndex, _ uint64) (*ethpb.AggregateSelectionResponse, error) {
|
||||
return c.beaconNodeValidatorClient.SubmitAggregateSelectionProof(ctx, in)
|
||||
return c.getClient().SubmitAggregateSelectionProof(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubmitAggregateSelectionProofElectra(ctx context.Context, in *ethpb.AggregateSelectionRequest, _ primitives.ValidatorIndex, _ uint64) (*ethpb.AggregateSelectionElectraResponse, error) {
|
||||
return c.beaconNodeValidatorClient.SubmitAggregateSelectionProofElectra(ctx, in)
|
||||
return c.getClient().SubmitAggregateSelectionProofElectra(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubmitSignedAggregateSelectionProof(ctx context.Context, in *ethpb.SignedAggregateSubmitRequest) (*ethpb.SignedAggregateSubmitResponse, error) {
|
||||
return c.beaconNodeValidatorClient.SubmitSignedAggregateSelectionProof(ctx, in)
|
||||
return c.getClient().SubmitSignedAggregateSelectionProof(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubmitSignedAggregateSelectionProofElectra(ctx context.Context, in *ethpb.SignedAggregateSubmitElectraRequest) (*ethpb.SignedAggregateSubmitResponse, error) {
|
||||
return c.beaconNodeValidatorClient.SubmitSignedAggregateSelectionProofElectra(ctx, in)
|
||||
return c.getClient().SubmitSignedAggregateSelectionProofElectra(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubmitSignedContributionAndProof(ctx context.Context, in *ethpb.SignedContributionAndProof) (*empty.Empty, error) {
|
||||
return c.beaconNodeValidatorClient.SubmitSignedContributionAndProof(ctx, in)
|
||||
return c.getClient().SubmitSignedContributionAndProof(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubmitSyncMessage(ctx context.Context, in *ethpb.SyncCommitteeMessage) (*empty.Empty, error) {
|
||||
return c.beaconNodeValidatorClient.SubmitSyncMessage(ctx, in)
|
||||
return c.getClient().SubmitSyncMessage(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubmitValidatorRegistrations(ctx context.Context, in *ethpb.SignedValidatorRegistrationsV1) (*empty.Empty, error) {
|
||||
return c.beaconNodeValidatorClient.SubmitValidatorRegistrations(ctx, in)
|
||||
return c.getClient().SubmitValidatorRegistrations(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, _ []*ethpb.ValidatorDuty) (*empty.Empty, error) {
|
||||
return c.beaconNodeValidatorClient.SubscribeCommitteeSubnets(ctx, in)
|
||||
return c.getClient().SubscribeCommitteeSubnets(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) ValidatorIndex(ctx context.Context, in *ethpb.ValidatorIndexRequest) (*ethpb.ValidatorIndexResponse, error) {
|
||||
return c.beaconNodeValidatorClient.ValidatorIndex(ctx, in)
|
||||
return c.getClient().ValidatorIndex(ctx, in)
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) ValidatorStatus(ctx context.Context, in *ethpb.ValidatorStatusRequest) (*ethpb.ValidatorStatusResponse, error) {
|
||||
return c.beaconNodeValidatorClient.ValidatorStatus(ctx, in)
|
||||
return c.getClient().ValidatorStatus(ctx, in)
|
||||
}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func (c *grpcValidatorClient) WaitForChainStart(ctx context.Context, in *empty.Empty) (*ethpb.ChainStartResponse, error) {
|
||||
stream, err := c.beaconNodeValidatorClient.WaitForChainStart(ctx, in)
|
||||
stream, err := c.getClient().WaitForChainStart(ctx, in)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(
|
||||
client.ErrConnectionIssue,
|
||||
@@ -260,13 +260,13 @@ func (c *grpcValidatorClient) WaitForChainStart(ctx context.Context, in *empty.E
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) AssignValidatorToSubnet(ctx context.Context, in *ethpb.AssignValidatorToSubnetRequest) (*empty.Empty, error) {
|
||||
return c.beaconNodeValidatorClient.AssignValidatorToSubnet(ctx, in)
|
||||
return c.getClient().AssignValidatorToSubnet(ctx, in)
|
||||
}
|
||||
func (c *grpcValidatorClient) AggregatedSigAndAggregationBits(
|
||||
ctx context.Context,
|
||||
in *ethpb.AggregatedSigAndAggregationBitsRequest,
|
||||
) (*ethpb.AggregatedSigAndAggregationBitsResponse, error) {
|
||||
return c.beaconNodeValidatorClient.AggregatedSigAndAggregationBits(ctx, in)
|
||||
return c.getClient().AggregatedSigAndAggregationBits(ctx, in)
|
||||
}
|
||||
|
||||
func (*grpcValidatorClient) AggregatedSelections(context.Context, []iface.BeaconCommitteeSelection) ([]iface.BeaconCommitteeSelection, error) {
|
||||
@@ -277,8 +277,12 @@ func (*grpcValidatorClient) AggregatedSyncSelections(context.Context, []iface.Sy
|
||||
return nil, iface.ErrNotSupported
|
||||
}
|
||||
|
||||
func NewGrpcValidatorClient(cc grpc.ClientConnInterface) iface.ValidatorClient {
|
||||
return &grpcValidatorClient{ethpb.NewBeaconNodeValidatorClient(cc), false}
|
||||
// NewGrpcValidatorClient creates a new gRPC validator client that supports
|
||||
// dynamic connection switching via the NodeConnection's GrpcConnectionProvider.
|
||||
func NewGrpcValidatorClient(conn validatorHelpers.NodeConnection) iface.ValidatorClient {
|
||||
return &grpcValidatorClient{
|
||||
grpcClientManager: newGrpcClientManager(conn, ethpb.NewBeaconNodeValidatorClient),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *grpcValidatorClient) StartEventStream(ctx context.Context, topics []string, eventsChannel chan<- *eventClient.Event) {
|
||||
@@ -308,7 +312,7 @@ func (c *grpcValidatorClient) StartEventStream(ctx context.Context, topics []str
|
||||
log.Warn("gRPC only supports the head topic, other topics will be ignored")
|
||||
}
|
||||
|
||||
stream, err := c.beaconNodeValidatorClient.StreamSlots(ctx, ðpb.StreamSlotsRequest{VerifiedOnly: true})
|
||||
stream, err := c.getClient().StreamSlots(ctx, ðpb.StreamSlotsRequest{VerifiedOnly: true})
|
||||
if err != nil {
|
||||
eventsChannel <- &eventClient.Event{
|
||||
EventType: eventClient.EventConnectionError,
|
||||
@@ -374,11 +378,20 @@ func (c *grpcValidatorClient) EventStreamIsRunning() bool {
|
||||
return c.isEventStreamRunning
|
||||
}
|
||||
|
||||
func (*grpcValidatorClient) Host() string {
|
||||
log.Warn(iface.ErrNotSupported)
|
||||
return ""
|
||||
func (c *grpcValidatorClient) Host() string {
|
||||
return c.grpcClientManager.conn.GetGrpcConnectionProvider().CurrentHost()
|
||||
}
|
||||
|
||||
func (*grpcValidatorClient) SetHost(_ string) {
|
||||
log.Warn(iface.ErrNotSupported)
|
||||
func (c *grpcValidatorClient) SetHost(host string) {
|
||||
provider := c.grpcClientManager.conn.GetGrpcConnectionProvider()
|
||||
// Find the index of the requested host and switch to it
|
||||
for i, h := range provider.Hosts() {
|
||||
if h == host {
|
||||
if err := provider.SwitchHost(i); err != nil {
|
||||
log.WithError(err).WithField("host", host).Error("Failed to set gRPC host")
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
log.WithField("host", host).Warn("Requested gRPC host not found in configured endpoints")
|
||||
}
|
||||
|
||||
@@ -14,8 +14,10 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
mock2 "github.com/OffchainLabs/prysm/v7/testing/mock"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"go.uber.org/mock/gomock"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
@@ -133,7 +135,15 @@ func TestWaitForChainStart_StreamSetupFails(t *testing.T) {
|
||||
gomock.Any(),
|
||||
).Return(nil, errors.New("failed stream"))
|
||||
|
||||
validatorClient := &grpcValidatorClient{beaconNodeValidatorClient, true}
|
||||
validatorClient := &grpcValidatorClient{
|
||||
grpcClientManager: newGrpcClientManager(
|
||||
helpers.MockNodeConnection(),
|
||||
func(_ grpc.ClientConnInterface) eth.BeaconNodeValidatorClient {
|
||||
return beaconNodeValidatorClient
|
||||
},
|
||||
),
|
||||
isEventStreamRunning: true,
|
||||
}
|
||||
_, err := validatorClient.WaitForChainStart(t.Context(), &emptypb.Empty{})
|
||||
want := "could not setup beacon chain ChainStart streaming client"
|
||||
assert.ErrorContains(t, want, err)
|
||||
@@ -146,7 +156,15 @@ func TestStartEventStream(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
beaconNodeValidatorClient := mock2.NewMockBeaconNodeValidatorClient(ctrl)
|
||||
grpcClient := &grpcValidatorClient{beaconNodeValidatorClient, true}
|
||||
grpcClient := &grpcValidatorClient{
|
||||
grpcClientManager: newGrpcClientManager(
|
||||
helpers.MockNodeConnection(),
|
||||
func(_ grpc.ClientConnInterface) eth.BeaconNodeValidatorClient {
|
||||
return beaconNodeValidatorClient
|
||||
},
|
||||
),
|
||||
isEventStreamRunning: true,
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
topics []string
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// Modification of a default grpc passthrough resolver (google.golang.org/grpc/resolver/passthrough) allowing to use multiple addresses
|
||||
// in grpc endpoint. Example:
|
||||
// conn, err := grpc.DialContext(ctx, "127.0.0.1:4000,127.0.0.1:4001", grpc.WithInsecure(), grpc.WithResolvers(&multipleEndpointsGrpcResolverBuilder{}))
|
||||
// It can be used with any grpc load balancer (pick_first, round_robin). Default is pick_first.
|
||||
// Round robin can be used by adding the following option:
|
||||
// grpc.WithDefaultServiceConfig("{\"loadBalancingConfig\":[{\"round_robin\":{}}]}")
|
||||
type multipleEndpointsGrpcResolverBuilder struct{}
|
||||
|
||||
// Build creates and starts multiple endpoints resolver.
|
||||
func (*multipleEndpointsGrpcResolverBuilder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) {
|
||||
r := &multipleEndpointsGrpcResolver{
|
||||
target: target,
|
||||
cc: cc,
|
||||
}
|
||||
r.start()
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Scheme returns default scheme.
|
||||
func (*multipleEndpointsGrpcResolverBuilder) Scheme() string {
|
||||
return resolver.GetDefaultScheme()
|
||||
}
|
||||
|
||||
type multipleEndpointsGrpcResolver struct {
|
||||
target resolver.Target
|
||||
cc resolver.ClientConn
|
||||
}
|
||||
|
||||
func (r *multipleEndpointsGrpcResolver) start() {
|
||||
ep := r.target.Endpoint()
|
||||
endpoints := strings.Split(ep, ",")
|
||||
var addrs []resolver.Address
|
||||
for _, endpoint := range endpoints {
|
||||
addrs = append(addrs, resolver.Address{Addr: endpoint, ServerName: endpoint})
|
||||
}
|
||||
if err := r.cc.UpdateState(resolver.State{Addresses: addrs}); err != nil {
|
||||
log.WithError(err).Error("Failed to update grpc connection state")
|
||||
}
|
||||
}
|
||||
|
||||
// ResolveNow --
|
||||
func (*multipleEndpointsGrpcResolver) ResolveNow(_ resolver.ResolveNowOptions) {}
|
||||
|
||||
// Close --
|
||||
func (*multipleEndpointsGrpcResolver) Close() {}
|
||||
@@ -8,11 +8,10 @@ import (
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
)
|
||||
|
||||
func NewNodeClient(validatorConn validatorHelpers.NodeConnection, jsonRestHandler beaconApi.RestHandler) iface.NodeClient {
|
||||
grpcClient := grpcApi.NewNodeClient(validatorConn.GetGrpcClientConn())
|
||||
func NewNodeClient(validatorConn validatorHelpers.NodeConnection) iface.NodeClient {
|
||||
grpcClient := grpcApi.NewNodeClient(validatorConn)
|
||||
if features.Get().EnableBeaconRESTApi {
|
||||
return beaconApi.NewNodeClientWithFallback(jsonRestHandler, grpcClient)
|
||||
} else {
|
||||
return grpcClient
|
||||
return beaconApi.NewNodeClientWithFallback(validatorConn.GetRestHandler(), grpcClient)
|
||||
}
|
||||
return grpcClient
|
||||
}
|
||||
|
||||
@@ -2,13 +2,11 @@ package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
api "github.com/OffchainLabs/prysm/v7/api/client"
|
||||
eventClient "github.com/OffchainLabs/prysm/v7/api/client/event"
|
||||
grpcutil "github.com/OffchainLabs/prysm/v7/api/grpc"
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/OffchainLabs/prysm/v7/async/event"
|
||||
lruwrpr "github.com/OffchainLabs/prysm/v7/cache/lru"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -17,7 +15,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/accounts/wallet"
|
||||
beaconApi "github.com/OffchainLabs/prysm/v7/validator/client/beacon-api"
|
||||
beaconChainClientFactory "github.com/OffchainLabs/prysm/v7/validator/client/beacon-chain-client-factory"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/client/iface"
|
||||
nodeclientfactory "github.com/OffchainLabs/prysm/v7/validator/client/node-client-factory"
|
||||
@@ -35,7 +32,6 @@ import (
|
||||
grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"github.com/pkg/errors"
|
||||
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -72,6 +68,7 @@ type Config struct {
|
||||
DB db.Database
|
||||
Wallet *wallet.Wallet
|
||||
WalletInitializedFeed *event.Feed
|
||||
Conn validatorHelpers.NodeConnection // Optional: inject connection for testing
|
||||
MaxHealthChecks int
|
||||
GRPCMaxCallRecvMsgSize int
|
||||
GRPCRetries uint
|
||||
@@ -122,6 +119,12 @@ func NewValidatorService(ctx context.Context, cfg *Config) (*ValidatorService, e
|
||||
maxHealthChecks: cfg.MaxHealthChecks,
|
||||
}
|
||||
|
||||
// Use injected connection if provided (for testing)
|
||||
if cfg.Conn != nil {
|
||||
s.conn = cfg.Conn
|
||||
return s, nil
|
||||
}
|
||||
|
||||
dialOpts := ConstructDialOptions(
|
||||
cfg.GRPCMaxCallRecvMsgSize,
|
||||
cfg.BeaconNodeCert,
|
||||
@@ -134,19 +137,21 @@ func NewValidatorService(ctx context.Context, cfg *Config) (*ValidatorService, e
|
||||
|
||||
s.ctx = grpcutil.AppendHeaders(ctx, cfg.GRPCHeaders)
|
||||
|
||||
grpcConn, err := grpc.DialContext(ctx, cfg.BeaconNodeGRPCEndpoint, dialOpts...)
|
||||
conn, err := validatorHelpers.NewNodeConnection(
|
||||
validatorHelpers.WithGrpc(s.ctx, cfg.BeaconNodeGRPCEndpoint, dialOpts),
|
||||
validatorHelpers.WithREST(cfg.BeaconApiEndpoint,
|
||||
rest.WithHttpHeaders(cfg.BeaconApiHeaders),
|
||||
rest.WithHttpTimeout(cfg.BeaconApiTimeout),
|
||||
rest.WithTracing(),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
if cfg.BeaconNodeCert != "" {
|
||||
if cfg.BeaconNodeCert != "" && cfg.BeaconNodeGRPCEndpoint != "" {
|
||||
log.Info("Established secure gRPC connection")
|
||||
}
|
||||
s.conn = validatorHelpers.NewNodeConnection(
|
||||
grpcConn,
|
||||
cfg.BeaconApiEndpoint,
|
||||
validatorHelpers.WithBeaconApiHeaders(cfg.BeaconApiHeaders),
|
||||
validatorHelpers.WithBeaconApiTimeout(cfg.BeaconApiTimeout),
|
||||
)
|
||||
s.conn = conn
|
||||
|
||||
return s, nil
|
||||
}
|
||||
@@ -181,20 +186,13 @@ func (v *ValidatorService) Start() {
|
||||
return
|
||||
}
|
||||
|
||||
u := strings.ReplaceAll(v.conn.GetBeaconApiUrl(), " ", "")
|
||||
hosts := strings.Split(u, ",")
|
||||
if len(hosts) == 0 {
|
||||
log.WithError(err).Error("No API hosts provided")
|
||||
restProvider := v.conn.GetRestConnectionProvider()
|
||||
if restProvider == nil || len(restProvider.Hosts()) == 0 {
|
||||
log.Error("No REST API hosts provided")
|
||||
return
|
||||
}
|
||||
|
||||
headersTransport := api.NewCustomHeadersTransport(http.DefaultTransport, v.conn.GetBeaconApiHeaders())
|
||||
restHandler := beaconApi.NewBeaconApiRestHandler(
|
||||
http.Client{Timeout: v.conn.GetBeaconApiTimeout(), Transport: otelhttp.NewTransport(headersTransport)},
|
||||
hosts[0],
|
||||
)
|
||||
|
||||
validatorClient := validatorclientfactory.NewValidatorClient(v.conn, restHandler)
|
||||
validatorClient := validatorclientfactory.NewValidatorClient(v.conn)
|
||||
|
||||
v.validator = &validator{
|
||||
slotFeed: new(event.Feed),
|
||||
@@ -208,12 +206,12 @@ func (v *ValidatorService) Start() {
|
||||
graffiti: v.graffiti,
|
||||
graffitiStruct: v.graffitiStruct,
|
||||
graffitiOrderedIndex: graffitiOrderedIndex,
|
||||
beaconNodeHosts: hosts,
|
||||
conn: v.conn,
|
||||
currentHostIndex: 0,
|
||||
validatorClient: validatorClient,
|
||||
chainClient: beaconChainClientFactory.NewChainClient(v.conn, restHandler),
|
||||
nodeClient: nodeclientfactory.NewNodeClient(v.conn, restHandler),
|
||||
prysmChainClient: beaconChainClientFactory.NewPrysmChainClient(v.conn, restHandler),
|
||||
chainClient: beaconChainClientFactory.NewChainClient(v.conn),
|
||||
nodeClient: nodeclientfactory.NewNodeClient(v.conn),
|
||||
prysmChainClient: beaconChainClientFactory.NewPrysmChainClient(v.conn),
|
||||
db: v.db,
|
||||
km: nil,
|
||||
web3SignerConfig: v.web3SignerConfig,
|
||||
@@ -369,7 +367,6 @@ func ConstructDialOptions(
|
||||
grpcprometheus.StreamClientInterceptor,
|
||||
grpcretry.StreamClientInterceptor(),
|
||||
),
|
||||
grpc.WithResolvers(&multipleEndpointsGrpcResolverBuilder{}),
|
||||
}
|
||||
|
||||
dialOpts = append(dialOpts, extraOpts...)
|
||||
|
||||
@@ -33,7 +33,10 @@ func TestStop_CancelsContext(t *testing.T) {
|
||||
|
||||
func TestNew_Insecure(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
_, err := NewValidatorService(t.Context(), &Config{})
|
||||
_, err := NewValidatorService(t.Context(), &Config{
|
||||
BeaconNodeGRPCEndpoint: "localhost:4000",
|
||||
BeaconApiEndpoint: "http://localhost:3500",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.LogsContain(t, hook, "You are using an insecure gRPC connection")
|
||||
}
|
||||
@@ -58,7 +61,11 @@ func TestStart_GrpcHeaders(t *testing.T) {
|
||||
"Authorization", "this is a valid value",
|
||||
},
|
||||
} {
|
||||
cfg := &Config{GRPCHeaders: strings.Split(input, ",")}
|
||||
cfg := &Config{
|
||||
BeaconNodeGRPCEndpoint: "localhost:4000",
|
||||
BeaconApiEndpoint: "http://localhost:3500",
|
||||
GRPCHeaders: strings.Split(input, ","),
|
||||
}
|
||||
validatorService, err := NewValidatorService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
md, _ := metadata.FromOutgoingContext(validatorService.ctx)
|
||||
|
||||
@@ -10,12 +10,10 @@ import (
|
||||
|
||||
func NewValidatorClient(
|
||||
validatorConn validatorHelpers.NodeConnection,
|
||||
jsonRestHandler beaconApi.RestHandler,
|
||||
opt ...beaconApi.ValidatorClientOpt,
|
||||
) iface.ValidatorClient {
|
||||
if features.Get().EnableBeaconRESTApi {
|
||||
return beaconApi.NewBeaconApiValidatorClient(jsonRestHandler, opt...)
|
||||
} else {
|
||||
return grpcApi.NewGrpcValidatorClient(validatorConn.GetGrpcClientConn())
|
||||
return beaconApi.NewBeaconApiValidatorClient(validatorConn.GetRestHandler(), opt...)
|
||||
}
|
||||
return grpcApi.NewGrpcValidatorClient(validatorConn)
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/validator/db"
|
||||
dbCommon "github.com/OffchainLabs/prysm/v7/validator/db/common"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/graffiti"
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/keymanager"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/keymanager/local"
|
||||
remoteweb3signer "github.com/OffchainLabs/prysm/v7/validator/keymanager/remote-web3signer"
|
||||
@@ -101,9 +102,9 @@ type validator struct {
|
||||
pubkeyToStatus map[[fieldparams.BLSPubkeyLength]byte]*validatorStatus
|
||||
wallet *wallet.Wallet
|
||||
walletInitializedChan chan *wallet.Wallet
|
||||
currentHostIndex uint64
|
||||
walletInitializedFeed *event.Feed
|
||||
graffitiOrderedIndex uint64
|
||||
conn validatorHelpers.NodeConnection
|
||||
submittedAtts map[submittedAttKey]*submittedAtt
|
||||
validatorsRegBatchSize int
|
||||
validatorClient iface.ValidatorClient
|
||||
@@ -114,7 +115,7 @@ type validator struct {
|
||||
km keymanager.IKeymanager
|
||||
accountChangedSub event.Subscription
|
||||
ticker slots.Ticker
|
||||
beaconNodeHosts []string
|
||||
currentHostIndex uint64
|
||||
genesisTime time.Time
|
||||
graffiti []byte
|
||||
voteStats voteStats
|
||||
@@ -1311,34 +1312,64 @@ func (v *validator) Host() string {
|
||||
}
|
||||
|
||||
func (v *validator) changeHost() {
|
||||
next := (v.currentHostIndex + 1) % uint64(len(v.beaconNodeHosts))
|
||||
hosts := v.hosts()
|
||||
if len(hosts) <= 1 {
|
||||
return
|
||||
}
|
||||
next := (v.currentHostIndex + 1) % uint64(len(hosts))
|
||||
log.WithFields(logrus.Fields{
|
||||
"currentHost": v.beaconNodeHosts[v.currentHostIndex],
|
||||
"nextHost": v.beaconNodeHosts[next],
|
||||
"currentHost": hosts[v.currentHostIndex],
|
||||
"nextHost": hosts[next],
|
||||
}).Warn("Beacon node is not responding, switching host")
|
||||
v.validatorClient.SetHost(v.beaconNodeHosts[next])
|
||||
v.validatorClient.SetHost(hosts[next])
|
||||
v.currentHostIndex = next
|
||||
}
|
||||
|
||||
// hosts returns the list of configured beacon node hosts.
|
||||
func (v *validator) hosts() []string {
|
||||
if features.Get().EnableBeaconRESTApi {
|
||||
return v.conn.GetRestConnectionProvider().Hosts()
|
||||
}
|
||||
return v.conn.GetGrpcConnectionProvider().Hosts()
|
||||
}
|
||||
|
||||
// numHosts returns the number of configured beacon node hosts.
|
||||
func (v *validator) numHosts() int {
|
||||
return len(v.hosts())
|
||||
}
|
||||
|
||||
func (v *validator) FindHealthyHost(ctx context.Context) bool {
|
||||
// Tail-recursive closure keeps retry count private.
|
||||
var check func(remaining int) bool
|
||||
check = func(remaining int) bool {
|
||||
if v.nodeClient.IsReady(ctx) { // ready → done
|
||||
numHosts := v.numHosts()
|
||||
startingHost := v.Host()
|
||||
attemptedHosts := []string{}
|
||||
|
||||
// Check all hosts for a fully synced node
|
||||
for i := range numHosts {
|
||||
if v.nodeClient.IsReady(ctx) {
|
||||
if len(attemptedHosts) > 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
"previousHost": startingHost,
|
||||
"newHost": v.Host(),
|
||||
"failedAttempts": attemptedHosts,
|
||||
}).Info("Failover succeeded: connected to healthy beacon node")
|
||||
}
|
||||
return true
|
||||
}
|
||||
if len(v.beaconNodeHosts) == 1 && features.Get().EnableBeaconRESTApi {
|
||||
log.WithField("host", v.Host()).Warn("Beacon node is not responding, no backup node configured")
|
||||
return false
|
||||
log.WithField("host", v.Host()).Debug("Beacon node not fully synced")
|
||||
attemptedHosts = append(attemptedHosts, v.Host())
|
||||
|
||||
// Try next host if not the last iteration
|
||||
if i < numHosts-1 {
|
||||
v.changeHost()
|
||||
}
|
||||
if remaining == 0 || !features.Get().EnableBeaconRESTApi {
|
||||
return false // exhausted or REST disabled
|
||||
}
|
||||
v.changeHost()
|
||||
return check(remaining - 1) // recurse
|
||||
}
|
||||
|
||||
return check(len(v.beaconNodeHosts))
|
||||
if numHosts == 1 {
|
||||
log.WithField("host", v.Host()).Warn("Beacon node is not fully synced, no backup node configured")
|
||||
} else {
|
||||
log.Warn("No fully synced beacon node found")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (v *validator) filterAndCacheActiveKeys(ctx context.Context, pubkeys [][fieldparams.BLSPubkeyLength]byte, slot primitives.Slot) ([][fieldparams.BLSPubkeyLength]byte, error) {
|
||||
|
||||
@@ -16,6 +16,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
grpcutil "github.com/OffchainLabs/prysm/v7/api/grpc"
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/async/event"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/validator/flags"
|
||||
@@ -37,6 +39,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/validator/accounts/wallet"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/client/iface"
|
||||
dbTest "github.com/OffchainLabs/prysm/v7/validator/db/testing"
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/keymanager"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/keymanager/local"
|
||||
remoteweb3signer "github.com/OffchainLabs/prysm/v7/validator/keymanager/remote-web3signer"
|
||||
@@ -2792,18 +2795,27 @@ func TestValidator_Host(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidator_ChangeHost(t *testing.T) {
|
||||
// Enable REST API mode for this test since changeHost only calls SetHost in REST API mode
|
||||
resetCfg := features.InitWithReset(&features.Flags{EnableBeaconRESTApi: true})
|
||||
defer resetCfg()
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
hosts := []string{"http://localhost:8080", "http://localhost:8081"}
|
||||
restProvider := &rest.MockRestProvider{MockHosts: hosts}
|
||||
conn, err := validatorHelpers.NewNodeConnection(validatorHelpers.WithRestProvider(restProvider))
|
||||
require.NoError(t, err)
|
||||
|
||||
client := validatormock.NewMockValidatorClient(ctrl)
|
||||
v := validator{
|
||||
validatorClient: client,
|
||||
beaconNodeHosts: []string{"http://localhost:8080", "http://localhost:8081"},
|
||||
conn: conn,
|
||||
currentHostIndex: 0,
|
||||
}
|
||||
|
||||
client.EXPECT().SetHost(v.beaconNodeHosts[1])
|
||||
client.EXPECT().SetHost(v.beaconNodeHosts[0])
|
||||
client.EXPECT().SetHost(hosts[1])
|
||||
client.EXPECT().SetHost(hosts[0])
|
||||
v.changeHost()
|
||||
assert.Equal(t, uint64(1), v.currentHostIndex)
|
||||
v.changeHost()
|
||||
@@ -2838,12 +2850,16 @@ func TestUpdateValidatorStatusCache(t *testing.T) {
|
||||
gomock.Any(),
|
||||
gomock.Any()).Return(mockResponse, nil)
|
||||
|
||||
mockProvider := &grpcutil.MockGrpcProvider{MockHosts: []string{"localhost:4000", "localhost:4001"}}
|
||||
conn, err := validatorHelpers.NewNodeConnection(validatorHelpers.WithGrpcProvider(mockProvider))
|
||||
require.NoError(t, err)
|
||||
|
||||
v := &validator{
|
||||
validatorClient: client,
|
||||
beaconNodeHosts: []string{"http://localhost:8080", "http://localhost:8081"},
|
||||
conn: conn,
|
||||
currentHostIndex: 0,
|
||||
pubkeyToStatus: map[[fieldparams.BLSPubkeyLength]byte]*validatorStatus{
|
||||
[fieldparams.BLSPubkeyLength]byte{0x03}: &validatorStatus{ // add non existent key and status to cache, should be fully removed on update
|
||||
[fieldparams.BLSPubkeyLength]byte{0x03}: { // add non existent key and status to cache, should be fully removed on update
|
||||
publicKey: []byte{0x03},
|
||||
status: ðpb.ValidatorStatusResponse{
|
||||
Status: ethpb.ValidatorStatus_ACTIVE,
|
||||
@@ -2853,7 +2869,7 @@ func TestUpdateValidatorStatusCache(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
err := v.updateValidatorStatusCache(ctx, pubkeys)
|
||||
err = v.updateValidatorStatusCache(ctx, pubkeys)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// make sure the nonexistent key is fully removed
|
||||
|
||||
@@ -10,6 +10,8 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/validator/helpers",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/grpc:go_default_library",
|
||||
"//api/rest:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//validator/db/iface:go_default_library",
|
||||
@@ -24,18 +26,23 @@ go_test(
|
||||
srcs = [
|
||||
"converts_test.go",
|
||||
"metadata_test.go",
|
||||
"node_connection_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api/grpc:go_default_library",
|
||||
"//api/rest:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/proposer:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//validator/db/common:go_default_library",
|
||||
"//validator/db/iface:go_default_library",
|
||||
"//validator/slashing-protection-history/format:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,78 +1,152 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"time"
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
grpcutil "github.com/OffchainLabs/prysm/v7/api/grpc"
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// Use an interface with a private dummy function to force all other packages to call NewNodeConnection
|
||||
// NodeConnection provides access to both gRPC and REST API connections to a beacon node.
|
||||
type NodeConnection interface {
|
||||
// GetGrpcClientConn returns the current gRPC client connection.
|
||||
// Returns nil if no gRPC provider is configured.
|
||||
GetGrpcClientConn() *grpc.ClientConn
|
||||
GetBeaconApiUrl() string
|
||||
GetBeaconApiHeaders() map[string][]string
|
||||
setBeaconApiHeaders(map[string][]string)
|
||||
GetBeaconApiTimeout() time.Duration
|
||||
setBeaconApiTimeout(time.Duration)
|
||||
dummy()
|
||||
// GetGrpcConnectionProvider returns the gRPC connection provider.
|
||||
GetGrpcConnectionProvider() grpcutil.GrpcConnectionProvider
|
||||
// GetRestConnectionProvider returns the REST connection provider.
|
||||
GetRestConnectionProvider() rest.RestConnectionProvider
|
||||
// GetRestHandler returns the REST handler for making API requests.
|
||||
// Returns nil if no REST provider is configured.
|
||||
GetRestHandler() rest.RestHandler
|
||||
// GetHttpClient returns the configured HTTP client for REST API requests.
|
||||
// Returns nil if no REST provider is configured.
|
||||
GetHttpClient() *http.Client
|
||||
}
|
||||
|
||||
type nodeConnection struct {
|
||||
grpcClientConn *grpc.ClientConn
|
||||
beaconApiUrl string
|
||||
beaconApiHeaders map[string][]string
|
||||
beaconApiTimeout time.Duration
|
||||
}
|
||||
|
||||
// NodeConnectionOption is a functional option for configuring the node connection.
|
||||
type NodeConnectionOption func(nc NodeConnection)
|
||||
|
||||
// WithBeaconApiHeaders sets the HTTP headers that should be sent to the server along with each request.
|
||||
func WithBeaconApiHeaders(headers map[string][]string) NodeConnectionOption {
|
||||
return func(nc NodeConnection) {
|
||||
nc.setBeaconApiHeaders(headers)
|
||||
}
|
||||
}
|
||||
|
||||
// WithBeaconApiTimeout sets the HTTP request timeout.
|
||||
func WithBeaconApiTimeout(timeout time.Duration) NodeConnectionOption {
|
||||
return func(nc NodeConnection) {
|
||||
nc.setBeaconApiTimeout(timeout)
|
||||
}
|
||||
grpcConnectionProvider grpcutil.GrpcConnectionProvider
|
||||
restConnectionProvider rest.RestConnectionProvider
|
||||
}
|
||||
|
||||
func (c *nodeConnection) GetGrpcClientConn() *grpc.ClientConn {
|
||||
return c.grpcClientConn
|
||||
}
|
||||
|
||||
func (c *nodeConnection) GetBeaconApiUrl() string {
|
||||
return c.beaconApiUrl
|
||||
}
|
||||
|
||||
func (c *nodeConnection) GetBeaconApiHeaders() map[string][]string {
|
||||
return c.beaconApiHeaders
|
||||
}
|
||||
|
||||
func (c *nodeConnection) setBeaconApiHeaders(headers map[string][]string) {
|
||||
c.beaconApiHeaders = headers
|
||||
}
|
||||
|
||||
func (c *nodeConnection) GetBeaconApiTimeout() time.Duration {
|
||||
return c.beaconApiTimeout
|
||||
}
|
||||
|
||||
func (c *nodeConnection) setBeaconApiTimeout(timeout time.Duration) {
|
||||
c.beaconApiTimeout = timeout
|
||||
}
|
||||
|
||||
func (*nodeConnection) dummy() {}
|
||||
|
||||
func NewNodeConnection(grpcConn *grpc.ClientConn, beaconApiUrl string, opts ...NodeConnectionOption) NodeConnection {
|
||||
conn := &nodeConnection{}
|
||||
conn.grpcClientConn = grpcConn
|
||||
conn.beaconApiUrl = beaconApiUrl
|
||||
for _, opt := range opts {
|
||||
opt(conn)
|
||||
if c.grpcConnectionProvider == nil {
|
||||
return nil
|
||||
}
|
||||
return c.grpcConnectionProvider.CurrentConn()
|
||||
}
|
||||
|
||||
func (c *nodeConnection) GetGrpcConnectionProvider() grpcutil.GrpcConnectionProvider {
|
||||
return c.grpcConnectionProvider
|
||||
}
|
||||
|
||||
func (c *nodeConnection) GetRestConnectionProvider() rest.RestConnectionProvider {
|
||||
return c.restConnectionProvider
|
||||
}
|
||||
|
||||
func (c *nodeConnection) GetRestHandler() rest.RestHandler {
|
||||
if c.restConnectionProvider == nil {
|
||||
return nil
|
||||
}
|
||||
return c.restConnectionProvider.RestHandler()
|
||||
}
|
||||
|
||||
func (c *nodeConnection) GetHttpClient() *http.Client {
|
||||
if c.restConnectionProvider == nil {
|
||||
return nil
|
||||
}
|
||||
return c.restConnectionProvider.HttpClient()
|
||||
}
|
||||
|
||||
// nodeConnectionBuilder is used internally to build a NodeConnection.
|
||||
type nodeConnectionBuilder struct {
|
||||
grpcProvider grpcutil.GrpcConnectionProvider
|
||||
restProvider rest.RestConnectionProvider
|
||||
}
|
||||
|
||||
// NodeConnectionOption is a functional option for configuring a NodeConnection.
|
||||
type NodeConnectionOption func(*nodeConnectionBuilder) error
|
||||
|
||||
// WithGrpc configures a gRPC connection provider for the NodeConnection.
|
||||
// If endpoint is empty, this option is a no-op.
|
||||
func WithGrpc(ctx context.Context, endpoint string, dialOpts []grpc.DialOption) NodeConnectionOption {
|
||||
return func(b *nodeConnectionBuilder) error {
|
||||
if endpoint == "" {
|
||||
return nil
|
||||
}
|
||||
provider, err := grpcutil.NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create gRPC connection provider")
|
||||
}
|
||||
b.grpcProvider = provider
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithREST configures a REST connection provider for the NodeConnection.
|
||||
// If endpoint is empty, this option is a no-op.
|
||||
func WithREST(endpoint string, opts ...rest.RestConnectionProviderOption) NodeConnectionOption {
|
||||
return func(b *nodeConnectionBuilder) error {
|
||||
if endpoint == "" {
|
||||
return nil
|
||||
}
|
||||
provider, err := rest.NewRestConnectionProvider(endpoint, opts...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create REST connection provider")
|
||||
}
|
||||
b.restProvider = provider
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithGrpcProvider sets a pre-built gRPC connection provider.
|
||||
// This is primarily useful for testing with mock providers.
|
||||
func WithGrpcProvider(provider grpcutil.GrpcConnectionProvider) NodeConnectionOption {
|
||||
return func(b *nodeConnectionBuilder) error {
|
||||
b.grpcProvider = provider
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithRestProvider sets a pre-built REST connection provider.
|
||||
// This is primarily useful for testing with mock providers.
|
||||
func WithRestProvider(provider rest.RestConnectionProvider) NodeConnectionOption {
|
||||
return func(b *nodeConnectionBuilder) error {
|
||||
b.restProvider = provider
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewNodeConnection creates a new NodeConnection with the given options.
|
||||
// At least one provider (gRPC or REST) must be configured via options.
|
||||
// Returns an error if no providers are configured.
|
||||
func NewNodeConnection(opts ...NodeConnectionOption) (NodeConnection, error) {
|
||||
b := &nodeConnectionBuilder{}
|
||||
for _, opt := range opts {
|
||||
if err := opt(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if b.grpcProvider == nil && b.restProvider == nil {
|
||||
return nil, errors.New("at least one beacon node endpoint must be provided (--beacon-rpc-provider or --beacon-rest-api-provider)")
|
||||
}
|
||||
|
||||
return &nodeConnection{
|
||||
grpcConnectionProvider: b.grpcProvider,
|
||||
restConnectionProvider: b.restProvider,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MockNodeConnection creates a minimal NodeConnection for testing.
|
||||
// It uses a mock gRPC provider with no actual connection.
|
||||
func MockNodeConnection() NodeConnection {
|
||||
return &nodeConnection{
|
||||
grpcConnectionProvider: &grpcutil.MockGrpcProvider{
|
||||
MockHosts: []string{"mock:4000"},
|
||||
},
|
||||
}
|
||||
return conn
|
||||
}
|
||||
|
||||
102
validator/helpers/node_connection_test.go
Normal file
102
validator/helpers/node_connection_test.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
grpcutil "github.com/OffchainLabs/prysm/v7/api/grpc"
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestNewNodeConnection(t *testing.T) {
|
||||
t.Run("with both providers", func(t *testing.T) {
|
||||
grpcProvider := &grpcutil.MockGrpcProvider{MockHosts: []string{"localhost:4000"}}
|
||||
restProvider := &rest.MockRestProvider{MockHosts: []string{"http://localhost:3500"}}
|
||||
conn, err := NewNodeConnection(
|
||||
WithGrpcProvider(grpcProvider),
|
||||
WithRestProvider(restProvider),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, grpcProvider, conn.GetGrpcConnectionProvider())
|
||||
assert.Equal(t, restProvider, conn.GetRestConnectionProvider())
|
||||
})
|
||||
|
||||
t.Run("with only rest provider", func(t *testing.T) {
|
||||
restProvider := &rest.MockRestProvider{MockHosts: []string{"http://localhost:3500"}}
|
||||
conn, err := NewNodeConnection(WithRestProvider(restProvider))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, (grpcutil.GrpcConnectionProvider)(nil), conn.GetGrpcConnectionProvider())
|
||||
assert.Equal(t, (*grpc.ClientConn)(nil), conn.GetGrpcClientConn())
|
||||
assert.Equal(t, restProvider, conn.GetRestConnectionProvider())
|
||||
})
|
||||
|
||||
t.Run("with only grpc provider", func(t *testing.T) {
|
||||
grpcProvider := &grpcutil.MockGrpcProvider{MockHosts: []string{"localhost:4000"}}
|
||||
conn, err := NewNodeConnection(WithGrpcProvider(grpcProvider))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, grpcProvider, conn.GetGrpcConnectionProvider())
|
||||
assert.Equal(t, (rest.RestConnectionProvider)(nil), conn.GetRestConnectionProvider())
|
||||
assert.Equal(t, (*http.Client)(nil), conn.GetHttpClient())
|
||||
})
|
||||
|
||||
t.Run("with no providers returns error", func(t *testing.T) {
|
||||
conn, err := NewNodeConnection()
|
||||
require.ErrorContains(t, "at least one beacon node endpoint must be provided", err)
|
||||
assert.Equal(t, (NodeConnection)(nil), conn)
|
||||
})
|
||||
|
||||
t.Run("with empty endpoints is no-op", func(t *testing.T) {
|
||||
// Empty endpoints should be skipped, resulting in no providers
|
||||
conn, err := NewNodeConnection(
|
||||
WithGrpc(context.Background(), "", nil),
|
||||
WithREST(""),
|
||||
)
|
||||
require.ErrorContains(t, "at least one beacon node endpoint must be provided", err)
|
||||
assert.Equal(t, (NodeConnection)(nil), conn)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNodeConnection_GetGrpcClientConn(t *testing.T) {
|
||||
t.Run("delegates to provider", func(t *testing.T) {
|
||||
// We can't easily create a real grpc.ClientConn in tests,
|
||||
// but we can verify the delegation works with nil
|
||||
grpcProvider := &grpcutil.MockGrpcProvider{MockConn: nil, MockHosts: []string{"localhost:4000"}}
|
||||
conn, err := NewNodeConnection(WithGrpcProvider(grpcProvider))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should delegate to provider.CurrentConn()
|
||||
assert.Equal(t, grpcProvider.CurrentConn(), conn.GetGrpcClientConn())
|
||||
})
|
||||
|
||||
t.Run("returns nil when provider is nil", func(t *testing.T) {
|
||||
restProvider := &rest.MockRestProvider{MockHosts: []string{"http://localhost:3500"}}
|
||||
conn, err := NewNodeConnection(WithRestProvider(restProvider))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, (*grpc.ClientConn)(nil), conn.GetGrpcClientConn())
|
||||
})
|
||||
}
|
||||
|
||||
func TestNodeConnection_GetHttpClient(t *testing.T) {
|
||||
t.Run("delegates to provider", func(t *testing.T) {
|
||||
mockClient := &http.Client{}
|
||||
restProvider := &rest.MockRestProvider{MockClient: mockClient, MockHosts: []string{"http://localhost:3500"}}
|
||||
conn, err := NewNodeConnection(WithRestProvider(restProvider))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, mockClient, conn.GetHttpClient())
|
||||
})
|
||||
|
||||
t.Run("returns nil when provider is nil", func(t *testing.T) {
|
||||
grpcProvider := &grpcutil.MockGrpcProvider{MockHosts: []string{"localhost:4000"}}
|
||||
conn, err := NewNodeConnection(WithGrpcProvider(grpcProvider))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, (*http.Client)(nil), conn.GetHttpClient())
|
||||
})
|
||||
}
|
||||
@@ -46,7 +46,6 @@ go_library(
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
"//runtime/prereqs:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//validator/accounts/wallet:go_default_library",
|
||||
"//validator/client:go_default_library",
|
||||
"//validator/db:go_default_library",
|
||||
|
||||
@@ -30,7 +30,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/prereqs"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/accounts/wallet"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/client"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/db"
|
||||
@@ -124,10 +123,6 @@ func NewValidatorClient(cliCtx *cli.Context) (*ValidatorClient, error) {
|
||||
func (c *ValidatorClient) Start() {
|
||||
c.lock.Lock()
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"version": version.Version(),
|
||||
}).Info("Starting validator node")
|
||||
|
||||
c.services.StartAll()
|
||||
|
||||
stop := c.stop
|
||||
|
||||
@@ -41,6 +41,8 @@ func TestNode_Builds(t *testing.T) {
|
||||
set.String("wallet-password-file", passwordFile, "path to wallet password")
|
||||
set.String("keymanager-kind", "imported", "keymanager kind")
|
||||
set.String("verbosity", "debug", "log verbosity")
|
||||
set.String("beacon-rpc-provider", "localhost:4000", "beacon node RPC endpoint")
|
||||
set.String("beacon-rest-api-provider", "http://localhost:3500", "beacon node REST API endpoint")
|
||||
require.NoError(t, set.Set(flags.WalletPasswordFileFlag.Name, passwordFile))
|
||||
ctx := cli.NewContext(&app, set, nil)
|
||||
opts := []accounts.Option{
|
||||
|
||||
@@ -23,9 +23,9 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/client:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//api/pagination:go_default_library",
|
||||
"//api/rest:go_default_library",
|
||||
"//api/server:go_default_library",
|
||||
"//api/server/httprest:go_default_library",
|
||||
"//api/server/middleware:go_default_library",
|
||||
@@ -55,7 +55,6 @@ go_library(
|
||||
"//validator/accounts/petnames:go_default_library",
|
||||
"//validator/accounts/wallet:go_default_library",
|
||||
"//validator/client:go_default_library",
|
||||
"//validator/client/beacon-api:go_default_library",
|
||||
"//validator/client/beacon-chain-client-factory:go_default_library",
|
||||
"//validator/client/iface:go_default_library",
|
||||
"//validator/client/node-client-factory:go_default_library",
|
||||
@@ -79,7 +78,6 @@ go_library(
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_wealdtech_go_eth2_wallet_encryptor_keystorev4//:go_default_library",
|
||||
"@io_opentelemetry_go_contrib_instrumentation_net_http_otelhttp//:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
@@ -106,6 +104,7 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//cmd/validator/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
@@ -135,6 +134,7 @@ go_test(
|
||||
"//validator/db/iface:go_default_library",
|
||||
"//validator/db/kv:go_default_library",
|
||||
"//validator/db/testing:go_default_library",
|
||||
"//validator/helpers:go_default_library",
|
||||
"//validator/keymanager:go_default_library",
|
||||
"//validator/keymanager/derived:go_default_library",
|
||||
"//validator/keymanager/remote-web3signer:go_default_library",
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
api "github.com/OffchainLabs/prysm/v7/api/client"
|
||||
grpcutil "github.com/OffchainLabs/prysm/v7/api/grpc"
|
||||
"github.com/OffchainLabs/prysm/v7/api/rest"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/client"
|
||||
beaconApi "github.com/OffchainLabs/prysm/v7/validator/client/beacon-api"
|
||||
beaconChainClientFactory "github.com/OffchainLabs/prysm/v7/validator/client/beacon-chain-client-factory"
|
||||
nodeClientFactory "github.com/OffchainLabs/prysm/v7/validator/client/node-client-factory"
|
||||
validatorClientFactory "github.com/OffchainLabs/prysm/v7/validator/client/validator-client-factory"
|
||||
@@ -17,7 +14,6 @@ import (
|
||||
grpcopentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
|
||||
grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"github.com/pkg/errors"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
@@ -41,30 +37,26 @@ func (s *Server) registerBeaconClient() error {
|
||||
|
||||
s.ctx = grpcutil.AppendHeaders(s.ctx, s.grpcHeaders)
|
||||
|
||||
grpcConn, err := grpc.DialContext(s.ctx, s.beaconNodeEndpoint, dialOpts...)
|
||||
conn, err := validatorHelpers.NewNodeConnection(
|
||||
validatorHelpers.WithGrpc(s.ctx, s.beaconNodeEndpoint, dialOpts),
|
||||
validatorHelpers.WithREST(s.beaconApiEndpoint,
|
||||
rest.WithHttpHeaders(s.beaconApiHeaders),
|
||||
rest.WithHttpTimeout(s.beaconApiTimeout),
|
||||
rest.WithTracing(),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not dial endpoint: %s", s.beaconNodeEndpoint)
|
||||
return err
|
||||
}
|
||||
if s.beaconNodeCert != "" {
|
||||
if s.beaconNodeCert != "" && s.beaconNodeEndpoint != "" {
|
||||
log.Info("Established secure gRPC connection")
|
||||
}
|
||||
s.healthClient = ethpb.NewHealthClient(grpcConn)
|
||||
if grpcConn := conn.GetGrpcClientConn(); grpcConn != nil {
|
||||
s.healthClient = ethpb.NewHealthClient(grpcConn)
|
||||
}
|
||||
|
||||
conn := validatorHelpers.NewNodeConnection(
|
||||
grpcConn,
|
||||
s.beaconApiEndpoint,
|
||||
validatorHelpers.WithBeaconApiHeaders(s.beaconApiHeaders),
|
||||
validatorHelpers.WithBeaconApiTimeout(s.beaconApiTimeout),
|
||||
)
|
||||
|
||||
headersTransport := api.NewCustomHeadersTransport(http.DefaultTransport, conn.GetBeaconApiHeaders())
|
||||
restHandler := beaconApi.NewBeaconApiRestHandler(
|
||||
http.Client{Timeout: s.beaconApiTimeout, Transport: otelhttp.NewTransport(headersTransport)},
|
||||
s.beaconApiEndpoint,
|
||||
)
|
||||
|
||||
s.chainClient = beaconChainClientFactory.NewChainClient(conn, restHandler)
|
||||
s.nodeClient = nodeClientFactory.NewNodeClient(conn, restHandler)
|
||||
s.beaconNodeValidatorClient = validatorClientFactory.NewValidatorClient(conn, restHandler)
|
||||
s.chainClient = beaconChainClientFactory.NewChainClient(conn)
|
||||
s.nodeClient = nodeClientFactory.NewNodeClient(conn)
|
||||
s.beaconNodeValidatorClient = validatorClientFactory.NewValidatorClient(conn)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,19 +3,17 @@ package rpc
|
||||
import (
|
||||
"testing"
|
||||
|
||||
grpcutil "github.com/OffchainLabs/prysm/v7/api/grpc"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func TestGrpcHeaders(t *testing.T) {
|
||||
s := &Server{
|
||||
ctx: t.Context(),
|
||||
grpcHeaders: []string{"first=value1", "second=value2"},
|
||||
}
|
||||
err := s.registerBeaconClient()
|
||||
require.NoError(t, err)
|
||||
md, _ := metadata.FromOutgoingContext(s.ctx)
|
||||
ctx := t.Context()
|
||||
grpcHeaders := []string{"first=value1", "second=value2"}
|
||||
ctx = grpcutil.AppendHeaders(ctx, grpcHeaders)
|
||||
md, _ := metadata.FromOutgoingContext(ctx)
|
||||
require.Equal(t, 2, md.Len(), "MetadataV0 contains wrong number of values")
|
||||
assert.Equal(t, "value1", md.Get("first")[0])
|
||||
assert.Equal(t, "value2", md.Get("second")[0])
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/validator/accounts/wallet"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/client"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/client/testutil"
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/keymanager"
|
||||
"github.com/google/uuid"
|
||||
"github.com/tyler-smith/go-bip39"
|
||||
@@ -46,6 +47,7 @@ func TestServer_CreateWallet_Local(t *testing.T) {
|
||||
km, err := w.InitializeKeymanager(ctx, iface.InitKeymanagerConfig{ListenForChanges: false})
|
||||
require.NoError(t, err)
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Conn: validatorHelpers.MockNodeConnection(),
|
||||
Wallet: w,
|
||||
Validator: &testutil.FakeValidator{
|
||||
Km: km,
|
||||
@@ -443,6 +445,7 @@ func TestServer_WalletConfig(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
s.wallet = w
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Conn: validatorHelpers.MockNodeConnection(),
|
||||
Wallet: w,
|
||||
Validator: &testutil.FakeValidator{
|
||||
Km: km,
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/validator/accounts/iface"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/client"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/client/testutil"
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/keymanager"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/keymanager/derived"
|
||||
constant "github.com/OffchainLabs/prysm/v7/validator/testing"
|
||||
@@ -53,6 +54,7 @@ func TestServer_ListAccounts(t *testing.T) {
|
||||
km, err := w.InitializeKeymanager(ctx, iface.InitKeymanagerConfig{ListenForChanges: false})
|
||||
require.NoError(t, err)
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Conn: validatorHelpers.MockNodeConnection(),
|
||||
Wallet: w,
|
||||
Validator: &testutil.FakeValidator{
|
||||
Km: km,
|
||||
@@ -158,6 +160,7 @@ func TestServer_BackupAccounts(t *testing.T) {
|
||||
km, err := w.InitializeKeymanager(ctx, iface.InitKeymanagerConfig{ListenForChanges: false})
|
||||
require.NoError(t, err)
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Conn: validatorHelpers.MockNodeConnection(),
|
||||
Wallet: w,
|
||||
Validator: &testutil.FakeValidator{
|
||||
Km: km,
|
||||
@@ -282,6 +285,7 @@ func TestServer_VoluntaryExit(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Conn: validatorHelpers.MockNodeConnection(),
|
||||
Wallet: w,
|
||||
Validator: &testutil.FakeValidator{
|
||||
Km: km,
|
||||
|
||||
@@ -34,6 +34,7 @@ import (
|
||||
DBIface "github.com/OffchainLabs/prysm/v7/validator/db/iface"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/db/kv"
|
||||
dbtest "github.com/OffchainLabs/prysm/v7/validator/db/testing"
|
||||
validatorHelpers "github.com/OffchainLabs/prysm/v7/validator/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/keymanager"
|
||||
"github.com/OffchainLabs/prysm/v7/validator/keymanager/derived"
|
||||
remoteweb3signer "github.com/OffchainLabs/prysm/v7/validator/keymanager/remote-web3signer"
|
||||
@@ -52,6 +53,7 @@ func TestServer_ListKeystores(t *testing.T) {
|
||||
t.Run("wallet not ready", func(t *testing.T) {
|
||||
m := &testutil.FakeValidator{}
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Conn: validatorHelpers.MockNodeConnection(),
|
||||
Validator: m,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -81,6 +83,7 @@ func TestServer_ListKeystores(t *testing.T) {
|
||||
km, err := w.InitializeKeymanager(ctx, iface.InitKeymanagerConfig{ListenForChanges: false})
|
||||
require.NoError(t, err)
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Conn: validatorHelpers.MockNodeConnection(),
|
||||
Wallet: w,
|
||||
Validator: &testutil.FakeValidator{
|
||||
Km: km,
|
||||
@@ -147,6 +150,7 @@ func TestServer_ImportKeystores(t *testing.T) {
|
||||
km, err := w.InitializeKeymanager(ctx, iface.InitKeymanagerConfig{ListenForChanges: false})
|
||||
require.NoError(t, err)
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Conn: validatorHelpers.MockNodeConnection(),
|
||||
Wallet: w,
|
||||
Validator: &testutil.FakeValidator{
|
||||
Km: km,
|
||||
@@ -368,6 +372,7 @@ func TestServer_ImportKeystores_WrongKeymanagerKind(t *testing.T) {
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Conn: validatorHelpers.MockNodeConnection(),
|
||||
Wallet: w,
|
||||
Validator: &testutil.FakeValidator{
|
||||
Km: km,
|
||||
@@ -652,6 +657,7 @@ func TestServer_DeleteKeystores_WrongKeymanagerKind(t *testing.T) {
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Conn: validatorHelpers.MockNodeConnection(),
|
||||
Wallet: w,
|
||||
Validator: &testutil.FakeValidator{
|
||||
Km: km,
|
||||
@@ -695,6 +701,7 @@ func setupServerWithWallet(t testing.TB) *Server {
|
||||
km, err := w.InitializeKeymanager(ctx, iface.InitKeymanagerConfig{ListenForChanges: false})
|
||||
require.NoError(t, err)
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Conn: validatorHelpers.MockNodeConnection(),
|
||||
Wallet: w,
|
||||
Validator: &testutil.FakeValidator{
|
||||
Km: km,
|
||||
@@ -730,6 +737,7 @@ func TestServer_SetVoluntaryExit(t *testing.T) {
|
||||
|
||||
m := &testutil.FakeValidator{Km: km}
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Conn: validatorHelpers.MockNodeConnection(),
|
||||
Validator: m,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -953,6 +961,7 @@ func TestServer_GetGasLimit(t *testing.T) {
|
||||
err := m.SetProposerSettings(ctx, tt.args)
|
||||
require.NoError(t, err)
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Conn: validatorHelpers.MockNodeConnection(),
|
||||
Validator: m,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -1111,6 +1120,7 @@ func TestServer_SetGasLimit(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
validatorDB := dbtest.SetupDB(t, t.TempDir(), [][fieldparams.BLSPubkeyLength]byte{}, isSlashingProtectionMinimal)
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Conn: validatorHelpers.MockNodeConnection(),
|
||||
Validator: m,
|
||||
DB: validatorDB,
|
||||
})
|
||||
@@ -1300,6 +1310,7 @@ func TestServer_DeleteGasLimit(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
validatorDB := dbtest.SetupDB(t, t.TempDir(), [][fieldparams.BLSPubkeyLength]byte{}, isSlashingProtectionMinimal)
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Conn: validatorHelpers.MockNodeConnection(),
|
||||
Validator: m,
|
||||
DB: validatorDB,
|
||||
})
|
||||
@@ -1348,6 +1359,7 @@ func TestServer_ListRemoteKeys(t *testing.T) {
|
||||
km, err := w.InitializeKeymanager(ctx, iface.InitKeymanagerConfig{ListenForChanges: false, Web3SignerConfig: config})
|
||||
require.NoError(t, err)
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Conn: validatorHelpers.MockNodeConnection(),
|
||||
Wallet: w,
|
||||
Validator: &testutil.FakeValidator{
|
||||
Km: km,
|
||||
@@ -1404,6 +1416,7 @@ func TestServer_ImportRemoteKeys(t *testing.T) {
|
||||
km, err := w.InitializeKeymanager(ctx, iface.InitKeymanagerConfig{ListenForChanges: false, Web3SignerConfig: config})
|
||||
require.NoError(t, err)
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Conn: validatorHelpers.MockNodeConnection(),
|
||||
Wallet: w,
|
||||
Validator: &testutil.FakeValidator{
|
||||
Km: km,
|
||||
@@ -1466,6 +1479,7 @@ func TestServer_DeleteRemoteKeys(t *testing.T) {
|
||||
km, err := w.InitializeKeymanager(ctx, iface.InitKeymanagerConfig{ListenForChanges: false, Web3SignerConfig: config})
|
||||
require.NoError(t, err)
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Conn: validatorHelpers.MockNodeConnection(),
|
||||
Wallet: w,
|
||||
Validator: &testutil.FakeValidator{
|
||||
Km: km,
|
||||
@@ -1567,6 +1581,7 @@ func TestServer_ListFeeRecipientByPubkey(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Conn: validatorHelpers.MockNodeConnection(),
|
||||
Validator: m,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -1591,6 +1606,7 @@ func TestServer_ListFeeRecipientByPubKey_NoFeeRecipientSet(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Conn: validatorHelpers.MockNodeConnection(),
|
||||
Validator: &testutil.FakeValidator{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -1780,6 +1796,7 @@ func TestServer_FeeRecipientByPubkey(t *testing.T) {
|
||||
|
||||
// save a default here
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Conn: validatorHelpers.MockNodeConnection(),
|
||||
Validator: m,
|
||||
DB: validatorDB,
|
||||
})
|
||||
@@ -1890,6 +1907,7 @@ func TestServer_DeleteFeeRecipientByPubkey(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
validatorDB := dbtest.SetupDB(t, t.TempDir(), [][fieldparams.BLSPubkeyLength]byte{}, isSlashingProtectionMinimal)
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Conn: validatorHelpers.MockNodeConnection(),
|
||||
Validator: m,
|
||||
DB: validatorDB,
|
||||
})
|
||||
@@ -1940,6 +1958,7 @@ func TestServer_Graffiti(t *testing.T) {
|
||||
graffiti := "graffiti"
|
||||
m := &testutil.FakeValidator{}
|
||||
vs, err := client.NewValidatorService(t.Context(), &client.Config{
|
||||
Conn: validatorHelpers.MockNodeConnection(),
|
||||
Validator: m,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
Reference in New Issue
Block a user