mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-02-05 10:35:08 -05:00
Compare commits
2 Commits
feat/proce
...
process-ex
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
69f6d32c01 | ||
|
|
fb4847deaa |
2
.github/workflows/go.yml
vendored
2
.github/workflows/go.yml
vendored
@@ -2,7 +2,7 @@ name: Go
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: [ master, develop ]
|
branches: [ master ]
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: [ '*' ]
|
branches: [ '*' ]
|
||||||
merge_group:
|
merge_group:
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ formatters:
|
|||||||
generated: lax
|
generated: lax
|
||||||
paths:
|
paths:
|
||||||
- validator/web/site_data.go
|
- validator/web/site_data.go
|
||||||
|
- .*_test.go
|
||||||
- proto
|
- proto
|
||||||
- tools/analyzers
|
- tools/analyzers
|
||||||
- third_party$
|
- third_party$
|
||||||
|
|||||||
@@ -3,16 +3,13 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
|||||||
go_library(
|
go_library(
|
||||||
name = "go_default_library",
|
name = "go_default_library",
|
||||||
srcs = [
|
srcs = [
|
||||||
"grpc_connection_provider.go",
|
|
||||||
"grpcutils.go",
|
"grpcutils.go",
|
||||||
"log.go",
|
"log.go",
|
||||||
"mock_grpc_provider.go",
|
|
||||||
"parameters.go",
|
"parameters.go",
|
||||||
],
|
],
|
||||||
importpath = "github.com/OffchainLabs/prysm/v7/api/grpc",
|
importpath = "github.com/OffchainLabs/prysm/v7/api/grpc",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"@com_github_pkg_errors//:go_default_library",
|
|
||||||
"@com_github_sirupsen_logrus//:go_default_library",
|
"@com_github_sirupsen_logrus//:go_default_library",
|
||||||
"@org_golang_google_grpc//:go_default_library",
|
"@org_golang_google_grpc//:go_default_library",
|
||||||
"@org_golang_google_grpc//metadata:go_default_library",
|
"@org_golang_google_grpc//metadata:go_default_library",
|
||||||
@@ -21,17 +18,12 @@ go_library(
|
|||||||
|
|
||||||
go_test(
|
go_test(
|
||||||
name = "go_default_test",
|
name = "go_default_test",
|
||||||
srcs = [
|
srcs = ["grpcutils_test.go"],
|
||||||
"grpc_connection_provider_test.go",
|
|
||||||
"grpcutils_test.go",
|
|
||||||
],
|
|
||||||
embed = [":go_default_library"],
|
embed = [":go_default_library"],
|
||||||
deps = [
|
deps = [
|
||||||
"//testing/assert:go_default_library",
|
"//testing/assert:go_default_library",
|
||||||
"//testing/require:go_default_library",
|
"//testing/require:go_default_library",
|
||||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||||
"@org_golang_google_grpc//:go_default_library",
|
|
||||||
"@org_golang_google_grpc//credentials/insecure:go_default_library",
|
|
||||||
"@org_golang_google_grpc//metadata:go_default_library",
|
"@org_golang_google_grpc//metadata:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,173 +0,0 @@
|
|||||||
package grpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GrpcConnectionProvider manages gRPC connections for failover support.
|
|
||||||
// It allows switching between different beacon node endpoints when the current one becomes unavailable.
|
|
||||||
// Only one connection is maintained at a time - when switching hosts, the old connection is closed.
|
|
||||||
type GrpcConnectionProvider interface {
|
|
||||||
// CurrentConn returns the currently active gRPC connection.
|
|
||||||
// The connection is created lazily on first call.
|
|
||||||
// Returns nil if the provider has been closed.
|
|
||||||
CurrentConn() *grpc.ClientConn
|
|
||||||
// CurrentHost returns the address of the currently active endpoint.
|
|
||||||
CurrentHost() string
|
|
||||||
// Hosts returns all configured endpoint addresses.
|
|
||||||
Hosts() []string
|
|
||||||
// SwitchHost switches to the endpoint at the given index.
|
|
||||||
// The new connection is created lazily on next CurrentConn() call.
|
|
||||||
SwitchHost(index int) error
|
|
||||||
// Close closes the current connection.
|
|
||||||
Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
type grpcConnectionProvider struct {
|
|
||||||
// Immutable after construction - no lock needed for reads
|
|
||||||
endpoints []string
|
|
||||||
ctx context.Context
|
|
||||||
dialOpts []grpc.DialOption
|
|
||||||
|
|
||||||
// Current connection state (protected by mutex)
|
|
||||||
currentIndex uint64
|
|
||||||
conn *grpc.ClientConn
|
|
||||||
|
|
||||||
mu sync.Mutex
|
|
||||||
closed bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewGrpcConnectionProvider creates a new connection provider that manages gRPC connections.
|
|
||||||
// The endpoint parameter can be a comma-separated list of addresses (e.g., "host1:4000,host2:4000").
|
|
||||||
// Only one connection is maintained at a time, created lazily on first use.
|
|
||||||
func NewGrpcConnectionProvider(
|
|
||||||
ctx context.Context,
|
|
||||||
endpoint string,
|
|
||||||
dialOpts []grpc.DialOption,
|
|
||||||
) (GrpcConnectionProvider, error) {
|
|
||||||
endpoints := parseEndpoints(endpoint)
|
|
||||||
if len(endpoints) == 0 {
|
|
||||||
return nil, errors.New("no gRPC endpoints provided")
|
|
||||||
}
|
|
||||||
|
|
||||||
log.WithFields(logrus.Fields{
|
|
||||||
"endpoints": endpoints,
|
|
||||||
"count": len(endpoints),
|
|
||||||
}).Info("Initialized gRPC connection provider")
|
|
||||||
|
|
||||||
return &grpcConnectionProvider{
|
|
||||||
endpoints: endpoints,
|
|
||||||
ctx: ctx,
|
|
||||||
dialOpts: dialOpts,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseEndpoints splits a comma-separated endpoint string into individual endpoints.
|
|
||||||
func parseEndpoints(endpoint string) []string {
|
|
||||||
if endpoint == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
endpoints := make([]string, 0, 1)
|
|
||||||
for p := range strings.SplitSeq(endpoint, ",") {
|
|
||||||
if p = strings.TrimSpace(p); p != "" {
|
|
||||||
endpoints = append(endpoints, p)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return endpoints
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *grpcConnectionProvider) CurrentConn() *grpc.ClientConn {
|
|
||||||
p.mu.Lock()
|
|
||||||
defer p.mu.Unlock()
|
|
||||||
|
|
||||||
if p.closed {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return existing connection if available
|
|
||||||
if p.conn != nil {
|
|
||||||
return p.conn
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create connection lazily
|
|
||||||
ep := p.endpoints[p.currentIndex]
|
|
||||||
conn, err := grpc.DialContext(p.ctx, ep, p.dialOpts...)
|
|
||||||
if err != nil {
|
|
||||||
log.WithError(err).WithField("endpoint", ep).Error("Failed to create gRPC connection")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
p.conn = conn
|
|
||||||
log.WithField("endpoint", ep).Debug("Created gRPC connection")
|
|
||||||
return conn
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *grpcConnectionProvider) CurrentHost() string {
|
|
||||||
p.mu.Lock()
|
|
||||||
defer p.mu.Unlock()
|
|
||||||
return p.endpoints[p.currentIndex]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *grpcConnectionProvider) Hosts() []string {
|
|
||||||
// Return a copy to maintain immutability
|
|
||||||
hosts := make([]string, len(p.endpoints))
|
|
||||||
copy(hosts, p.endpoints)
|
|
||||||
return hosts
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *grpcConnectionProvider) SwitchHost(index int) error {
|
|
||||||
if index < 0 || index >= len(p.endpoints) {
|
|
||||||
return errors.Errorf("invalid host index %d, must be between 0 and %d", index, len(p.endpoints)-1)
|
|
||||||
}
|
|
||||||
|
|
||||||
p.mu.Lock()
|
|
||||||
defer p.mu.Unlock()
|
|
||||||
|
|
||||||
if uint64(index) == p.currentIndex {
|
|
||||||
return nil // Already on this host
|
|
||||||
}
|
|
||||||
|
|
||||||
oldHost := p.endpoints[p.currentIndex]
|
|
||||||
oldConn := p.conn
|
|
||||||
|
|
||||||
p.conn = nil // Clear immediately - new connection created lazily
|
|
||||||
p.currentIndex = uint64(index)
|
|
||||||
|
|
||||||
// Close old connection asynchronously to avoid blocking the caller
|
|
||||||
if oldConn != nil {
|
|
||||||
go func() {
|
|
||||||
if err := oldConn.Close(); err != nil {
|
|
||||||
log.WithError(err).WithField("endpoint", oldHost).Debug("Failed to close previous connection")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
log.WithFields(logrus.Fields{
|
|
||||||
"previousHost": oldHost,
|
|
||||||
"newHost": p.endpoints[index],
|
|
||||||
}).Debug("Switched gRPC endpoint")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *grpcConnectionProvider) Close() {
|
|
||||||
p.mu.Lock()
|
|
||||||
defer p.mu.Unlock()
|
|
||||||
|
|
||||||
if p.closed {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.closed = true
|
|
||||||
|
|
||||||
if p.conn != nil {
|
|
||||||
if err := p.conn.Close(); err != nil {
|
|
||||||
log.WithError(err).WithField("endpoint", p.endpoints[p.currentIndex]).Debug("Failed to close gRPC connection")
|
|
||||||
}
|
|
||||||
p.conn = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,207 +0,0 @@
|
|||||||
package grpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/credentials/insecure"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestParseEndpoints(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
input string
|
|
||||||
expected []string
|
|
||||||
}{
|
|
||||||
{"single endpoint", "localhost:4000", []string{"localhost:4000"}},
|
|
||||||
{"multiple endpoints", "host1:4000,host2:4000,host3:4000", []string{"host1:4000", "host2:4000", "host3:4000"}},
|
|
||||||
{"endpoints with spaces", "host1:4000, host2:4000 , host3:4000", []string{"host1:4000", "host2:4000", "host3:4000"}},
|
|
||||||
{"empty string", "", nil},
|
|
||||||
{"only commas", ",,,", []string{}},
|
|
||||||
{"trailing comma", "host1:4000,host2:4000,", []string{"host1:4000", "host2:4000"}},
|
|
||||||
{"leading comma", ",host1:4000,host2:4000", []string{"host1:4000", "host2:4000"}},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got := parseEndpoints(tt.input)
|
|
||||||
if !reflect.DeepEqual(tt.expected, got) {
|
|
||||||
t.Errorf("parseEndpoints(%q) = %v, want %v", tt.input, got, tt.expected)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewGrpcConnectionProvider_Errors(t *testing.T) {
|
|
||||||
t.Run("no endpoints", func(t *testing.T) {
|
|
||||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
|
||||||
_, err := NewGrpcConnectionProvider(context.Background(), "", dialOpts)
|
|
||||||
require.ErrorContains(t, "no gRPC endpoints provided", err)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGrpcConnectionProvider_LazyConnection(t *testing.T) {
|
|
||||||
// Start only one server but configure provider with two endpoints
|
|
||||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
|
||||||
require.NoError(t, err)
|
|
||||||
server := grpc.NewServer()
|
|
||||||
go func() { _ = server.Serve(lis) }()
|
|
||||||
defer server.Stop()
|
|
||||||
|
|
||||||
validAddr := lis.Addr().String()
|
|
||||||
invalidAddr := "127.0.0.1:1" // Port 1 is unlikely to be listening
|
|
||||||
|
|
||||||
// Provider should succeed even though second endpoint is invalid (lazy connections)
|
|
||||||
endpoint := validAddr + "," + invalidAddr
|
|
||||||
ctx := context.Background()
|
|
||||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
|
||||||
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
|
|
||||||
require.NoError(t, err, "Provider creation should succeed with lazy connections")
|
|
||||||
defer func() { provider.Close() }()
|
|
||||||
|
|
||||||
// First endpoint should work
|
|
||||||
conn := provider.CurrentConn()
|
|
||||||
assert.NotNil(t, conn, "First connection should be created lazily")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGrpcConnectionProvider_SingleConnectionModel(t *testing.T) {
|
|
||||||
// Create provider with 3 endpoints
|
|
||||||
var addrs []string
|
|
||||||
var servers []*grpc.Server
|
|
||||||
|
|
||||||
for range 3 {
|
|
||||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
|
||||||
require.NoError(t, err)
|
|
||||||
server := grpc.NewServer()
|
|
||||||
go func() { _ = server.Serve(lis) }()
|
|
||||||
addrs = append(addrs, lis.Addr().String())
|
|
||||||
servers = append(servers, server)
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
for _, s := range servers {
|
|
||||||
s.Stop()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
endpoint := strings.Join(addrs, ",")
|
|
||||||
ctx := context.Background()
|
|
||||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
|
||||||
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { provider.Close() }()
|
|
||||||
|
|
||||||
// Access the internal state to verify single connection behavior
|
|
||||||
p := provider.(*grpcConnectionProvider)
|
|
||||||
|
|
||||||
// Initially no connection
|
|
||||||
p.mu.Lock()
|
|
||||||
assert.Equal(t, (*grpc.ClientConn)(nil), p.conn, "Connection should be nil before access")
|
|
||||||
p.mu.Unlock()
|
|
||||||
|
|
||||||
// Access connection - should create one
|
|
||||||
conn0 := provider.CurrentConn()
|
|
||||||
assert.NotNil(t, conn0)
|
|
||||||
|
|
||||||
p.mu.Lock()
|
|
||||||
assert.NotNil(t, p.conn, "Connection should be created after CurrentConn()")
|
|
||||||
firstConn := p.conn
|
|
||||||
p.mu.Unlock()
|
|
||||||
|
|
||||||
// Call CurrentConn again - should return same connection
|
|
||||||
conn0Again := provider.CurrentConn()
|
|
||||||
assert.Equal(t, conn0, conn0Again, "Should return same connection")
|
|
||||||
|
|
||||||
// Switch to different host - old connection should be closed, new one created lazily
|
|
||||||
require.NoError(t, provider.SwitchHost(1))
|
|
||||||
|
|
||||||
p.mu.Lock()
|
|
||||||
assert.Equal(t, (*grpc.ClientConn)(nil), p.conn, "Connection should be nil after SwitchHost (lazy)")
|
|
||||||
p.mu.Unlock()
|
|
||||||
|
|
||||||
// Get new connection
|
|
||||||
conn1 := provider.CurrentConn()
|
|
||||||
assert.NotNil(t, conn1)
|
|
||||||
assert.NotEqual(t, firstConn, conn1, "Should be a different connection after switching hosts")
|
|
||||||
}
|
|
||||||
|
|
||||||
// testProvider creates a provider with n test servers and returns cleanup function.
|
|
||||||
func testProvider(t *testing.T, n int) (GrpcConnectionProvider, []string, func()) {
|
|
||||||
var addrs []string
|
|
||||||
var cleanups []func()
|
|
||||||
|
|
||||||
for range n {
|
|
||||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
|
||||||
require.NoError(t, err)
|
|
||||||
server := grpc.NewServer()
|
|
||||||
go func() { _ = server.Serve(lis) }()
|
|
||||||
addrs = append(addrs, lis.Addr().String())
|
|
||||||
cleanups = append(cleanups, server.Stop)
|
|
||||||
}
|
|
||||||
|
|
||||||
endpoint := strings.Join(addrs, ",")
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
dialOpts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
|
||||||
provider, err := NewGrpcConnectionProvider(ctx, endpoint, dialOpts)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
cleanup := func() {
|
|
||||||
provider.Close()
|
|
||||||
for _, c := range cleanups {
|
|
||||||
c()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return provider, addrs, cleanup
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGrpcConnectionProvider(t *testing.T) {
|
|
||||||
provider, addrs, cleanup := testProvider(t, 3)
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
t.Run("initial state", func(t *testing.T) {
|
|
||||||
assert.Equal(t, 3, len(provider.Hosts()))
|
|
||||||
assert.Equal(t, addrs[0], provider.CurrentHost())
|
|
||||||
assert.NotNil(t, provider.CurrentConn())
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("SwitchHost", func(t *testing.T) {
|
|
||||||
require.NoError(t, provider.SwitchHost(1))
|
|
||||||
assert.Equal(t, addrs[1], provider.CurrentHost())
|
|
||||||
assert.NotNil(t, provider.CurrentConn()) // New connection created lazily
|
|
||||||
require.NoError(t, provider.SwitchHost(0))
|
|
||||||
assert.Equal(t, addrs[0], provider.CurrentHost())
|
|
||||||
require.ErrorContains(t, "invalid host index", provider.SwitchHost(-1))
|
|
||||||
require.ErrorContains(t, "invalid host index", provider.SwitchHost(3))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("SwitchHost circular", func(t *testing.T) {
|
|
||||||
// Test round-robin style switching using SwitchHost with manual index
|
|
||||||
indices := []int{1, 2, 0, 1} // Simulate circular switching
|
|
||||||
for i, idx := range indices {
|
|
||||||
require.NoError(t, provider.SwitchHost(idx))
|
|
||||||
assert.Equal(t, addrs[idx], provider.CurrentHost(), "iteration %d", i)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Hosts returns copy", func(t *testing.T) {
|
|
||||||
hosts := provider.Hosts()
|
|
||||||
original := hosts[0]
|
|
||||||
hosts[0] = "modified"
|
|
||||||
assert.Equal(t, original, provider.Hosts()[0])
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGrpcConnectionProvider_Close(t *testing.T) {
|
|
||||||
provider, _, cleanup := testProvider(t, 1)
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
assert.NotNil(t, provider.CurrentConn())
|
|
||||||
provider.Close()
|
|
||||||
assert.Equal(t, (*grpc.ClientConn)(nil), provider.CurrentConn())
|
|
||||||
provider.Close() // Double close is safe
|
|
||||||
}
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
package grpc
|
|
||||||
|
|
||||||
import "google.golang.org/grpc"
|
|
||||||
|
|
||||||
// MockGrpcProvider implements GrpcConnectionProvider for testing.
|
|
||||||
type MockGrpcProvider struct {
|
|
||||||
MockConn *grpc.ClientConn
|
|
||||||
MockHosts []string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MockGrpcProvider) CurrentConn() *grpc.ClientConn { return m.MockConn }
|
|
||||||
func (m *MockGrpcProvider) CurrentHost() string {
|
|
||||||
if len(m.MockHosts) > 0 {
|
|
||||||
return m.MockHosts[0]
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
func (m *MockGrpcProvider) Hosts() []string { return m.MockHosts }
|
|
||||||
func (m *MockGrpcProvider) SwitchHost(int) error { return nil }
|
|
||||||
func (m *MockGrpcProvider) Close() {}
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
|
||||||
|
|
||||||
go_library(
|
|
||||||
name = "go_default_library",
|
|
||||||
srcs = [
|
|
||||||
"log.go",
|
|
||||||
"mock_rest_provider.go",
|
|
||||||
"rest_connection_provider.go",
|
|
||||||
"rest_handler.go",
|
|
||||||
],
|
|
||||||
importpath = "github.com/OffchainLabs/prysm/v7/api/rest",
|
|
||||||
visibility = ["//visibility:public"],
|
|
||||||
deps = [
|
|
||||||
"//api:go_default_library",
|
|
||||||
"//api/apiutil:go_default_library",
|
|
||||||
"//api/client:go_default_library",
|
|
||||||
"//config/params:go_default_library",
|
|
||||||
"//network/httputil:go_default_library",
|
|
||||||
"//runtime/version:go_default_library",
|
|
||||||
"@com_github_pkg_errors//:go_default_library",
|
|
||||||
"@com_github_sirupsen_logrus//:go_default_library",
|
|
||||||
"@io_opentelemetry_go_contrib_instrumentation_net_http_otelhttp//:go_default_library",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
go_test(
|
|
||||||
name = "go_default_test",
|
|
||||||
srcs = ["rest_connection_provider_test.go"],
|
|
||||||
embed = [":go_default_library"],
|
|
||||||
deps = [
|
|
||||||
"//testing/assert:go_default_library",
|
|
||||||
"//testing/require:go_default_library",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
package rest
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MockRestProvider implements RestConnectionProvider for testing.
|
|
||||||
type MockRestProvider struct {
|
|
||||||
MockClient *http.Client
|
|
||||||
MockHandler RestHandler
|
|
||||||
MockHosts []string
|
|
||||||
HostIndex int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MockRestProvider) HttpClient() *http.Client { return m.MockClient }
|
|
||||||
func (m *MockRestProvider) RestHandler() RestHandler { return m.MockHandler }
|
|
||||||
func (m *MockRestProvider) CurrentHost() string {
|
|
||||||
if len(m.MockHosts) > 0 {
|
|
||||||
return m.MockHosts[m.HostIndex%len(m.MockHosts)]
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
func (m *MockRestProvider) Hosts() []string { return m.MockHosts }
|
|
||||||
func (m *MockRestProvider) SwitchHost(index int) error { m.HostIndex = index; return nil }
|
|
||||||
|
|
||||||
// MockRestHandler implements RestHandler for testing.
|
|
||||||
type MockRestHandler struct {
|
|
||||||
MockHost string
|
|
||||||
MockClient *http.Client
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MockRestHandler) Get(_ context.Context, _ string, _ any) error { return nil }
|
|
||||||
func (m *MockRestHandler) GetStatusCode(_ context.Context, _ string) (int, error) {
|
|
||||||
return http.StatusOK, nil
|
|
||||||
}
|
|
||||||
func (m *MockRestHandler) GetSSZ(_ context.Context, _ string) ([]byte, http.Header, error) {
|
|
||||||
return nil, nil, nil
|
|
||||||
}
|
|
||||||
func (m *MockRestHandler) Post(_ context.Context, _ string, _ map[string]string, _ *bytes.Buffer, _ any) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (m *MockRestHandler) PostSSZ(_ context.Context, _ string, _ map[string]string, _ *bytes.Buffer) ([]byte, http.Header, error) {
|
|
||||||
return nil, nil, nil
|
|
||||||
}
|
|
||||||
func (m *MockRestHandler) HttpClient() *http.Client { return m.MockClient }
|
|
||||||
func (m *MockRestHandler) Host() string { return m.MockHost }
|
|
||||||
func (m *MockRestHandler) SwitchHost(host string) { m.MockHost = host }
|
|
||||||
@@ -1,158 +0,0 @@
|
|||||||
package rest
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/api/client"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RestConnectionProvider manages HTTP client configuration for REST API with failover support.
|
|
||||||
// It allows switching between different beacon node REST endpoints when the current one becomes unavailable.
|
|
||||||
type RestConnectionProvider interface {
|
|
||||||
// HttpClient returns the configured HTTP client with headers, timeout, and optional tracing.
|
|
||||||
HttpClient() *http.Client
|
|
||||||
// RestHandler returns the REST handler for making API requests.
|
|
||||||
RestHandler() RestHandler
|
|
||||||
// CurrentHost returns the current REST API endpoint URL.
|
|
||||||
CurrentHost() string
|
|
||||||
// Hosts returns all configured REST API endpoint URLs.
|
|
||||||
Hosts() []string
|
|
||||||
// SwitchHost switches to the endpoint at the given index.
|
|
||||||
SwitchHost(index int) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// RestConnectionProviderOption is a functional option for configuring the REST connection provider.
|
|
||||||
type RestConnectionProviderOption func(*restConnectionProvider)
|
|
||||||
|
|
||||||
// WithHttpTimeout sets the HTTP client timeout.
|
|
||||||
func WithHttpTimeout(timeout time.Duration) RestConnectionProviderOption {
|
|
||||||
return func(p *restConnectionProvider) {
|
|
||||||
p.timeout = timeout
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithHttpHeaders sets custom HTTP headers to include in all requests.
|
|
||||||
func WithHttpHeaders(headers map[string][]string) RestConnectionProviderOption {
|
|
||||||
return func(p *restConnectionProvider) {
|
|
||||||
p.headers = headers
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithTracing enables OpenTelemetry tracing for HTTP requests.
|
|
||||||
func WithTracing() RestConnectionProviderOption {
|
|
||||||
return func(p *restConnectionProvider) {
|
|
||||||
p.enableTracing = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type restConnectionProvider struct {
|
|
||||||
endpoints []string
|
|
||||||
httpClient *http.Client
|
|
||||||
restHandler RestHandler
|
|
||||||
currentIndex atomic.Uint64
|
|
||||||
timeout time.Duration
|
|
||||||
headers map[string][]string
|
|
||||||
enableTracing bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRestConnectionProvider creates a new REST connection provider that manages HTTP client configuration.
|
|
||||||
// The endpoint parameter can be a comma-separated list of URLs (e.g., "http://host1:3500,http://host2:3500").
|
|
||||||
func NewRestConnectionProvider(endpoint string, opts ...RestConnectionProviderOption) (RestConnectionProvider, error) {
|
|
||||||
endpoints := parseEndpoints(endpoint)
|
|
||||||
if len(endpoints) == 0 {
|
|
||||||
return nil, errors.New("no REST API endpoints provided")
|
|
||||||
}
|
|
||||||
|
|
||||||
p := &restConnectionProvider{
|
|
||||||
endpoints: endpoints,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build the HTTP transport chain
|
|
||||||
var transport http.RoundTripper = http.DefaultTransport
|
|
||||||
|
|
||||||
// Add custom headers if configured
|
|
||||||
if len(p.headers) > 0 {
|
|
||||||
transport = client.NewCustomHeadersTransport(transport, p.headers)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add tracing if enabled
|
|
||||||
if p.enableTracing {
|
|
||||||
transport = otelhttp.NewTransport(transport)
|
|
||||||
}
|
|
||||||
|
|
||||||
p.httpClient = &http.Client{
|
|
||||||
Timeout: p.timeout,
|
|
||||||
Transport: transport,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the REST handler with the HTTP client and initial host
|
|
||||||
p.restHandler = newRestHandler(*p.httpClient, endpoints[0])
|
|
||||||
|
|
||||||
log.WithFields(logrus.Fields{
|
|
||||||
"endpoints": endpoints,
|
|
||||||
"count": len(endpoints),
|
|
||||||
}).Info("Initialized REST connection provider")
|
|
||||||
|
|
||||||
return p, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseEndpoints splits a comma-separated endpoint string into individual endpoints.
|
|
||||||
func parseEndpoints(endpoint string) []string {
|
|
||||||
if endpoint == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
endpoints := make([]string, 0, 1)
|
|
||||||
for p := range strings.SplitSeq(endpoint, ",") {
|
|
||||||
if p = strings.TrimSpace(p); p != "" {
|
|
||||||
endpoints = append(endpoints, p)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return endpoints
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *restConnectionProvider) HttpClient() *http.Client {
|
|
||||||
return p.httpClient
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *restConnectionProvider) RestHandler() RestHandler {
|
|
||||||
return p.restHandler
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *restConnectionProvider) CurrentHost() string {
|
|
||||||
return p.endpoints[p.currentIndex.Load()]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *restConnectionProvider) Hosts() []string {
|
|
||||||
// Return a copy to maintain immutability
|
|
||||||
hosts := make([]string, len(p.endpoints))
|
|
||||||
copy(hosts, p.endpoints)
|
|
||||||
return hosts
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *restConnectionProvider) SwitchHost(index int) error {
|
|
||||||
if index < 0 || index >= len(p.endpoints) {
|
|
||||||
return errors.Errorf("invalid host index %d, must be between 0 and %d", index, len(p.endpoints)-1)
|
|
||||||
}
|
|
||||||
|
|
||||||
oldIdx := p.currentIndex.Load()
|
|
||||||
p.currentIndex.Store(uint64(index))
|
|
||||||
|
|
||||||
// Update the rest handler's host
|
|
||||||
p.restHandler.SwitchHost(p.endpoints[index])
|
|
||||||
|
|
||||||
log.WithFields(logrus.Fields{
|
|
||||||
"previousHost": p.endpoints[oldIdx],
|
|
||||||
"newHost": p.endpoints[index],
|
|
||||||
}).Debug("Switched REST endpoint")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,80 +0,0 @@
|
|||||||
package rest
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestParseEndpoints(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
input string
|
|
||||||
expected []string
|
|
||||||
}{
|
|
||||||
{"single endpoint", "http://localhost:3500", []string{"http://localhost:3500"}},
|
|
||||||
{"multiple endpoints", "http://host1:3500,http://host2:3500,http://host3:3500", []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"}},
|
|
||||||
{"endpoints with spaces", "http://host1:3500, http://host2:3500 , http://host3:3500", []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"}},
|
|
||||||
{"empty string", "", nil},
|
|
||||||
{"only commas", ",,,", []string{}},
|
|
||||||
{"trailing comma", "http://host1:3500,http://host2:3500,", []string{"http://host1:3500", "http://host2:3500"}},
|
|
||||||
{"leading comma", ",http://host1:3500,http://host2:3500", []string{"http://host1:3500", "http://host2:3500"}},
|
|
||||||
}
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
got := parseEndpoints(tt.input)
|
|
||||||
if !reflect.DeepEqual(tt.expected, got) {
|
|
||||||
t.Errorf("parseEndpoints(%q) = %v, want %v", tt.input, got, tt.expected)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewRestConnectionProvider_Errors(t *testing.T) {
|
|
||||||
t.Run("no endpoints", func(t *testing.T) {
|
|
||||||
_, err := NewRestConnectionProvider("")
|
|
||||||
require.ErrorContains(t, "no REST API endpoints provided", err)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRestConnectionProvider(t *testing.T) {
|
|
||||||
provider, err := NewRestConnectionProvider("http://host1:3500,http://host2:3500,http://host3:3500")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t.Run("initial state", func(t *testing.T) {
|
|
||||||
assert.Equal(t, 3, len(provider.Hosts()))
|
|
||||||
assert.Equal(t, "http://host1:3500", provider.CurrentHost())
|
|
||||||
assert.NotNil(t, provider.HttpClient())
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("SwitchHost", func(t *testing.T) {
|
|
||||||
require.NoError(t, provider.SwitchHost(1))
|
|
||||||
assert.Equal(t, "http://host2:3500", provider.CurrentHost())
|
|
||||||
require.NoError(t, provider.SwitchHost(0))
|
|
||||||
assert.Equal(t, "http://host1:3500", provider.CurrentHost())
|
|
||||||
require.ErrorContains(t, "invalid host index", provider.SwitchHost(-1))
|
|
||||||
require.ErrorContains(t, "invalid host index", provider.SwitchHost(3))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Hosts returns copy", func(t *testing.T) {
|
|
||||||
hosts := provider.Hosts()
|
|
||||||
original := hosts[0]
|
|
||||||
hosts[0] = "modified"
|
|
||||||
assert.Equal(t, original, provider.Hosts()[0])
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRestConnectionProvider_WithOptions(t *testing.T) {
|
|
||||||
headers := map[string][]string{"Authorization": {"Bearer token"}}
|
|
||||||
provider, err := NewRestConnectionProvider(
|
|
||||||
"http://localhost:3500",
|
|
||||||
WithHttpHeaders(headers),
|
|
||||||
WithHttpTimeout(30000000000), // 30 seconds in nanoseconds
|
|
||||||
WithTracing(),
|
|
||||||
)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.NotNil(t, provider.HttpClient())
|
|
||||||
assert.Equal(t, "http://localhost:3500", provider.CurrentHost())
|
|
||||||
}
|
|
||||||
@@ -110,7 +110,7 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c
|
|||||||
ckzgCells := make([]ckzg4844.Cell, len(cells))
|
ckzgCells := make([]ckzg4844.Cell, len(cells))
|
||||||
|
|
||||||
for i := range cells {
|
for i := range cells {
|
||||||
ckzgCells[i] = ckzg4844.Cell(cells[i])
|
copy(ckzgCells[i][:], cells[i][:])
|
||||||
}
|
}
|
||||||
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
|
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -89,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mb *mockBroadcaster) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn, _ []blocks.PartialDataColumn) error {
|
func (mb *mockBroadcaster) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn) error {
|
||||||
mb.broadcastCalled = true
|
mb.broadcastCalled = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,6 +4,9 @@ go_library(
|
|||||||
name = "go_default_library",
|
name = "go_default_library",
|
||||||
srcs = [
|
srcs = [
|
||||||
"bid.go",
|
"bid.go",
|
||||||
|
"deposit_request.go",
|
||||||
|
"log.go",
|
||||||
|
"payload.go",
|
||||||
"payload_attestation.go",
|
"payload_attestation.go",
|
||||||
"pending_payment.go",
|
"pending_payment.go",
|
||||||
"proposer_slashing.go",
|
"proposer_slashing.go",
|
||||||
@@ -12,6 +15,7 @@ go_library(
|
|||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"//beacon-chain/core/helpers:go_default_library",
|
"//beacon-chain/core/helpers:go_default_library",
|
||||||
|
"//beacon-chain/core/requests:go_default_library",
|
||||||
"//beacon-chain/core/signing:go_default_library",
|
"//beacon-chain/core/signing:go_default_library",
|
||||||
"//beacon-chain/core/time:go_default_library",
|
"//beacon-chain/core/time:go_default_library",
|
||||||
"//beacon-chain/state:go_default_library",
|
"//beacon-chain/state:go_default_library",
|
||||||
@@ -25,9 +29,13 @@ go_library(
|
|||||||
"//crypto/bls/common:go_default_library",
|
"//crypto/bls/common:go_default_library",
|
||||||
"//crypto/hash:go_default_library",
|
"//crypto/hash:go_default_library",
|
||||||
"//encoding/bytesutil:go_default_library",
|
"//encoding/bytesutil:go_default_library",
|
||||||
|
"//encoding/ssz:go_default_library",
|
||||||
|
"//proto/engine/v1:go_default_library",
|
||||||
"//proto/prysm/v1alpha1:go_default_library",
|
"//proto/prysm/v1alpha1:go_default_library",
|
||||||
|
"//runtime/version:go_default_library",
|
||||||
"//time/slots:go_default_library",
|
"//time/slots:go_default_library",
|
||||||
"@com_github_pkg_errors//:go_default_library",
|
"@com_github_pkg_errors//:go_default_library",
|
||||||
|
"@com_github_sirupsen_logrus//:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -35,7 +43,9 @@ go_test(
|
|||||||
name = "go_default_test",
|
name = "go_default_test",
|
||||||
srcs = [
|
srcs = [
|
||||||
"bid_test.go",
|
"bid_test.go",
|
||||||
|
"deposit_request_test.go",
|
||||||
"payload_attestation_test.go",
|
"payload_attestation_test.go",
|
||||||
|
"payload_test.go",
|
||||||
"pending_payment_test.go",
|
"pending_payment_test.go",
|
||||||
"proposer_slashing_test.go",
|
"proposer_slashing_test.go",
|
||||||
],
|
],
|
||||||
@@ -45,6 +55,7 @@ go_test(
|
|||||||
"//beacon-chain/core/signing:go_default_library",
|
"//beacon-chain/core/signing:go_default_library",
|
||||||
"//beacon-chain/state:go_default_library",
|
"//beacon-chain/state:go_default_library",
|
||||||
"//beacon-chain/state/state-native:go_default_library",
|
"//beacon-chain/state/state-native:go_default_library",
|
||||||
|
"//beacon-chain/state/testing:go_default_library",
|
||||||
"//config/params:go_default_library",
|
"//config/params:go_default_library",
|
||||||
"//consensus-types/blocks:go_default_library",
|
"//consensus-types/blocks:go_default_library",
|
||||||
"//consensus-types/interfaces:go_default_library",
|
"//consensus-types/interfaces:go_default_library",
|
||||||
@@ -52,6 +63,7 @@ go_test(
|
|||||||
"//crypto/bls:go_default_library",
|
"//crypto/bls:go_default_library",
|
||||||
"//crypto/bls/common:go_default_library",
|
"//crypto/bls/common:go_default_library",
|
||||||
"//encoding/bytesutil:go_default_library",
|
"//encoding/bytesutil:go_default_library",
|
||||||
|
"//encoding/ssz:go_default_library",
|
||||||
"//proto/engine/v1:go_default_library",
|
"//proto/engine/v1:go_default_library",
|
||||||
"//proto/prysm/v1alpha1:go_default_library",
|
"//proto/prysm/v1alpha1:go_default_library",
|
||||||
"//proto/prysm/v1alpha1/validator-client:go_default_library",
|
"//proto/prysm/v1alpha1/validator-client:go_default_library",
|
||||||
|
|||||||
174
beacon-chain/core/gloas/deposit_request.go
Normal file
174
beacon-chain/core/gloas/deposit_request.go
Normal file
@@ -0,0 +1,174 @@
|
|||||||
|
package gloas
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||||
|
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||||
|
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||||
|
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func processDepositRequests(ctx context.Context, beaconState state.BeaconState, requests []*enginev1.DepositRequest) error {
|
||||||
|
if len(requests) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, receipt := range requests {
|
||||||
|
if err := processDepositRequest(beaconState, receipt); err != nil {
|
||||||
|
return errors.Wrap(err, "could not apply deposit request")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// processDepositRequest processes the specific deposit request
|
||||||
|
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||||
|
// def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
|
||||||
|
//
|
||||||
|
// # [New in Gloas:EIP7732]
|
||||||
|
// builder_pubkeys = [b.pubkey for b in state.builders]
|
||||||
|
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||||
|
//
|
||||||
|
// # [New in Gloas:EIP7732]
|
||||||
|
// # Regardless of the withdrawal credentials prefix, if a builder/validator
|
||||||
|
// # already exists with this pubkey, apply the deposit to their balance
|
||||||
|
// is_builder = deposit_request.pubkey in builder_pubkeys
|
||||||
|
// is_validator = deposit_request.pubkey in validator_pubkeys
|
||||||
|
// is_builder_prefix = is_builder_withdrawal_credential(deposit_request.withdrawal_credentials)
|
||||||
|
// if is_builder or (is_builder_prefix and not is_validator):
|
||||||
|
//
|
||||||
|
// # Apply builder deposits immediately
|
||||||
|
// apply_deposit_for_builder(
|
||||||
|
// state,
|
||||||
|
// deposit_request.pubkey,
|
||||||
|
// deposit_request.withdrawal_credentials,
|
||||||
|
// deposit_request.amount,
|
||||||
|
// deposit_request.signature,
|
||||||
|
// )
|
||||||
|
// return
|
||||||
|
//
|
||||||
|
// # Add validator deposits to the queue
|
||||||
|
// state.pending_deposits.append(
|
||||||
|
// PendingDeposit(
|
||||||
|
// pubkey=deposit_request.pubkey,
|
||||||
|
// withdrawal_credentials=deposit_request.withdrawal_credentials,
|
||||||
|
// amount=deposit_request.amount,
|
||||||
|
// signature=deposit_request.signature,
|
||||||
|
// slot=state.slot,
|
||||||
|
// )
|
||||||
|
// )
|
||||||
|
func processDepositRequest(beaconState state.BeaconState, request *enginev1.DepositRequest) error {
|
||||||
|
if request == nil {
|
||||||
|
return errors.New("nil deposit request")
|
||||||
|
}
|
||||||
|
|
||||||
|
applied, err := applyBuilderDepositRequest(beaconState, request)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "could not apply builder deposit")
|
||||||
|
}
|
||||||
|
if applied {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := beaconState.AppendPendingDeposit(ðpb.PendingDeposit{
|
||||||
|
PublicKey: request.Pubkey,
|
||||||
|
WithdrawalCredentials: request.WithdrawalCredentials,
|
||||||
|
Amount: request.Amount,
|
||||||
|
Signature: request.Signature,
|
||||||
|
Slot: beaconState.Slot(),
|
||||||
|
}); err != nil {
|
||||||
|
return errors.Wrap(err, "could not append deposit request")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func applyBuilderDepositRequest(beaconState state.BeaconState, request *enginev1.DepositRequest) (bool, error) {
|
||||||
|
if beaconState.Version() < version.Gloas {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
pubkey := bytesutil.ToBytes48(request.Pubkey)
|
||||||
|
_, isValidator := beaconState.ValidatorIndexByPubkey(pubkey)
|
||||||
|
_, isBuilder := beaconState.BuilderIndexByPubkey(pubkey)
|
||||||
|
isBuilderPrefix := IsBuilderWithdrawalCredential(request.WithdrawalCredentials)
|
||||||
|
if !isBuilder && (!isBuilderPrefix || isValidator) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := applyDepositForBuilder(
|
||||||
|
beaconState,
|
||||||
|
request.Pubkey,
|
||||||
|
request.WithdrawalCredentials,
|
||||||
|
request.Amount,
|
||||||
|
request.Signature,
|
||||||
|
); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyDepositForBuilder processes an execution-layer deposit for a builder.
|
||||||
|
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||||
|
// def apply_deposit_for_builder(
|
||||||
|
//
|
||||||
|
// state: BeaconState,
|
||||||
|
// pubkey: BLSPubkey,
|
||||||
|
// withdrawal_credentials: Bytes32,
|
||||||
|
// amount: uint64,
|
||||||
|
// signature: BLSSignature,
|
||||||
|
//
|
||||||
|
// ) -> None:
|
||||||
|
//
|
||||||
|
// builder_pubkeys = [b.pubkey for b in state.builders]
|
||||||
|
// if pubkey not in builder_pubkeys:
|
||||||
|
// # Verify the deposit signature (proof of possession) which is not checked by the deposit contract
|
||||||
|
// if is_valid_deposit_signature(pubkey, withdrawal_credentials, amount, signature):
|
||||||
|
// add_builder_to_registry(state, pubkey, withdrawal_credentials, amount)
|
||||||
|
// else:
|
||||||
|
// # Increase balance by deposit amount
|
||||||
|
// builder_index = builder_pubkeys.index(pubkey)
|
||||||
|
// state.builders[builder_index].balance += amount
|
||||||
|
func applyDepositForBuilder(
|
||||||
|
beaconState state.BeaconState,
|
||||||
|
pubkey []byte,
|
||||||
|
withdrawalCredentials []byte,
|
||||||
|
amount uint64,
|
||||||
|
signature []byte,
|
||||||
|
) error {
|
||||||
|
pubkeyBytes := bytesutil.ToBytes48(pubkey)
|
||||||
|
if idx, exists := beaconState.BuilderIndexByPubkey(pubkeyBytes); exists {
|
||||||
|
return beaconState.IncreaseBuilderBalance(idx, amount)
|
||||||
|
}
|
||||||
|
|
||||||
|
valid, err := helpers.IsValidDepositSignature(ðpb.Deposit_Data{
|
||||||
|
PublicKey: pubkey,
|
||||||
|
WithdrawalCredentials: withdrawalCredentials,
|
||||||
|
Amount: amount,
|
||||||
|
Signature: signature,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "could not verify deposit signature")
|
||||||
|
}
|
||||||
|
if !valid {
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"pubkey": fmt.Sprintf("%x", pubkey),
|
||||||
|
}).Warn("ignoring builder deposit: invalid signature")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
withdrawalCredBytes := bytesutil.ToBytes32(withdrawalCredentials)
|
||||||
|
return beaconState.AddBuilderFromDeposit(pubkeyBytes, withdrawalCredBytes, amount)
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsBuilderWithdrawalCredential(withdrawalCredentials []byte) bool {
|
||||||
|
return len(withdrawalCredentials) == fieldparams.RootLength &&
|
||||||
|
withdrawalCredentials[0] == params.BeaconConfig().BuilderWithdrawalPrefixByte
|
||||||
|
}
|
||||||
150
beacon-chain/core/gloas/deposit_request_test.go
Normal file
150
beacon-chain/core/gloas/deposit_request_test.go
Normal file
@@ -0,0 +1,150 @@
|
|||||||
|
package gloas
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||||
|
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||||
|
stateTesting "github.com/OffchainLabs/prysm/v7/beacon-chain/state/testing"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||||
|
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||||
|
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestProcessDepositRequests_EmptyAndNil(t *testing.T) {
|
||||||
|
st := newGloasState(t, nil, nil)
|
||||||
|
|
||||||
|
t.Run("empty requests continues", func(t *testing.T) {
|
||||||
|
err := processDepositRequests(t.Context(), st, []*enginev1.DepositRequest{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("nil request errors", func(t *testing.T) {
|
||||||
|
err := processDepositRequests(t.Context(), st, []*enginev1.DepositRequest{nil})
|
||||||
|
require.ErrorContains(t, "nil deposit request", err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessDepositRequest_BuilderDepositAddsBuilder(t *testing.T) {
|
||||||
|
sk, err := bls.RandKey()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cred := builderWithdrawalCredentials()
|
||||||
|
pd := stateTesting.GeneratePendingDeposit(t, sk, 1234, cred, 0)
|
||||||
|
req := depositRequestFromPending(pd, 1)
|
||||||
|
|
||||||
|
st := newGloasState(t, nil, nil)
|
||||||
|
err = processDepositRequest(st, req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
idx, ok := st.BuilderIndexByPubkey(toBytes48(req.Pubkey))
|
||||||
|
require.Equal(t, true, ok)
|
||||||
|
|
||||||
|
builder, err := st.Builder(idx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, builder)
|
||||||
|
require.DeepEqual(t, req.Pubkey, builder.Pubkey)
|
||||||
|
require.DeepEqual(t, []byte{cred[0]}, builder.Version)
|
||||||
|
require.DeepEqual(t, cred[12:], builder.ExecutionAddress)
|
||||||
|
require.Equal(t, uint64(1234), uint64(builder.Balance))
|
||||||
|
require.Equal(t, params.BeaconConfig().FarFutureEpoch, builder.WithdrawableEpoch)
|
||||||
|
|
||||||
|
pending, err := st.PendingDeposits()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 0, len(pending))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessDepositRequest_ExistingBuilderIncreasesBalance(t *testing.T) {
|
||||||
|
sk, err := bls.RandKey()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
pubkey := sk.PublicKey().Marshal()
|
||||||
|
builders := []*ethpb.Builder{
|
||||||
|
{
|
||||||
|
Pubkey: pubkey,
|
||||||
|
Version: []byte{0},
|
||||||
|
ExecutionAddress: bytes.Repeat([]byte{0x11}, 20),
|
||||||
|
Balance: 5,
|
||||||
|
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
st := newGloasState(t, nil, builders)
|
||||||
|
|
||||||
|
cred := validatorWithdrawalCredentials()
|
||||||
|
pd := stateTesting.GeneratePendingDeposit(t, sk, 200, cred, 0)
|
||||||
|
req := depositRequestFromPending(pd, 9)
|
||||||
|
|
||||||
|
err = processDepositRequest(st, req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
idx, ok := st.BuilderIndexByPubkey(toBytes48(pubkey))
|
||||||
|
require.Equal(t, true, ok)
|
||||||
|
builder, err := st.Builder(idx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, uint64(205), uint64(builder.Balance))
|
||||||
|
|
||||||
|
pending, err := st.PendingDeposits()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 0, len(pending))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestApplyDepositForBuilder_InvalidSignatureIgnoresDeposit(t *testing.T) {
|
||||||
|
sk, err := bls.RandKey()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cred := builderWithdrawalCredentials()
|
||||||
|
st := newGloasState(t, nil, nil)
|
||||||
|
err = applyDepositForBuilder(st, sk.PublicKey().Marshal(), cred[:], 100, make([]byte, 96))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, ok := st.BuilderIndexByPubkey(toBytes48(sk.PublicKey().Marshal()))
|
||||||
|
require.Equal(t, false, ok)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newGloasState(t *testing.T, validators []*ethpb.Validator, builders []*ethpb.Builder) state.BeaconState {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||||
|
DepositRequestsStartIndex: params.BeaconConfig().UnsetDepositRequestsStartIndex,
|
||||||
|
Validators: validators,
|
||||||
|
Balances: make([]uint64, len(validators)),
|
||||||
|
PendingDeposits: []*ethpb.PendingDeposit{},
|
||||||
|
Builders: builders,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return st
|
||||||
|
}
|
||||||
|
|
||||||
|
func depositRequestFromPending(pd *ethpb.PendingDeposit, index uint64) *enginev1.DepositRequest {
|
||||||
|
return &enginev1.DepositRequest{
|
||||||
|
Pubkey: pd.PublicKey,
|
||||||
|
WithdrawalCredentials: pd.WithdrawalCredentials,
|
||||||
|
Amount: pd.Amount,
|
||||||
|
Signature: pd.Signature,
|
||||||
|
Index: index,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func builderWithdrawalCredentials() [32]byte {
|
||||||
|
var cred [32]byte
|
||||||
|
cred[0] = params.BeaconConfig().BuilderWithdrawalPrefixByte
|
||||||
|
copy(cred[12:], bytes.Repeat([]byte{0x22}, 20))
|
||||||
|
return cred
|
||||||
|
}
|
||||||
|
|
||||||
|
func validatorWithdrawalCredentials() [32]byte {
|
||||||
|
var cred [32]byte
|
||||||
|
cred[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||||
|
copy(cred[12:], bytes.Repeat([]byte{0x33}, 20))
|
||||||
|
return cred
|
||||||
|
}
|
||||||
|
|
||||||
|
func toBytes48(b []byte) [48]byte {
|
||||||
|
var out [48]byte
|
||||||
|
copy(out[:], b)
|
||||||
|
return out
|
||||||
|
}
|
||||||
@@ -1,9 +1,9 @@
|
|||||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||||
package rest
|
package gloas
|
||||||
|
|
||||||
import "github.com/sirupsen/logrus"
|
import "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||||
var log = logrus.WithField("package", "api/rest")
|
var log = logrus.WithField("package", "beacon-chain/core/gloas")
|
||||||
344
beacon-chain/core/gloas/payload.go
Normal file
344
beacon-chain/core/gloas/payload.go
Normal file
@@ -0,0 +1,344 @@
|
|||||||
|
package gloas
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
requests "github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/encoding/ssz"
|
||||||
|
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProcessExecutionPayload processes the signed execution payload envelope for the Gloas fork.
|
||||||
|
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||||
|
// def process_execution_payload(
|
||||||
|
//
|
||||||
|
// state: BeaconState,
|
||||||
|
// signed_envelope: SignedExecutionPayloadEnvelope,
|
||||||
|
// execution_engine: ExecutionEngine,
|
||||||
|
// verify: bool = True,
|
||||||
|
//
|
||||||
|
// ) -> None:
|
||||||
|
//
|
||||||
|
// envelope = signed_envelope.message
|
||||||
|
// payload = envelope.payload
|
||||||
|
//
|
||||||
|
// if verify:
|
||||||
|
// assert verify_execution_payload_envelope_signature(state, signed_envelope)
|
||||||
|
//
|
||||||
|
// previous_state_root = hash_tree_root(state)
|
||||||
|
// if state.latest_block_header.state_root == Root():
|
||||||
|
// state.latest_block_header.state_root = previous_state_root
|
||||||
|
//
|
||||||
|
// assert envelope.beacon_block_root == hash_tree_root(state.latest_block_header)
|
||||||
|
// assert envelope.slot == state.slot
|
||||||
|
//
|
||||||
|
// committed_bid = state.latest_execution_payload_bid
|
||||||
|
// assert envelope.builder_index == committed_bid.builder_index
|
||||||
|
// assert committed_bid.blob_kzg_commitments_root == hash_tree_root(envelope.blob_kzg_commitments)
|
||||||
|
// assert committed_bid.prev_randao == payload.prev_randao
|
||||||
|
//
|
||||||
|
// assert hash_tree_root(payload.withdrawals) == hash_tree_root(state.payload_expected_withdrawals)
|
||||||
|
//
|
||||||
|
// assert committed_bid.gas_limit == payload.gas_limit
|
||||||
|
// assert committed_bid.block_hash == payload.block_hash
|
||||||
|
// assert payload.parent_hash == state.latest_block_hash
|
||||||
|
// assert payload.timestamp == compute_time_at_slot(state, state.slot)
|
||||||
|
// assert (
|
||||||
|
// len(envelope.blob_kzg_commitments)
|
||||||
|
// <= get_blob_parameters(get_current_epoch(state)).max_blobs_per_block
|
||||||
|
// )
|
||||||
|
// versioned_hashes = [
|
||||||
|
// kzg_commitment_to_versioned_hash(commitment) for commitment in envelope.blob_kzg_commitments
|
||||||
|
// ]
|
||||||
|
// requests = envelope.execution_requests
|
||||||
|
// assert execution_engine.verify_and_notify_new_payload(
|
||||||
|
// NewPayloadRequest(
|
||||||
|
// execution_payload=payload,
|
||||||
|
// versioned_hashes=versioned_hashes,
|
||||||
|
// parent_beacon_block_root=state.latest_block_header.parent_root,
|
||||||
|
// execution_requests=requests,
|
||||||
|
// )
|
||||||
|
// )
|
||||||
|
//
|
||||||
|
// for op in requests.deposits: process_deposit_request(state, op)
|
||||||
|
// for op in requests.withdrawals: process_withdrawal_request(state, op)
|
||||||
|
// for op in requests.consolidations: process_consolidation_request(state, op)
|
||||||
|
//
|
||||||
|
// payment = state.builder_pending_payments[SLOTS_PER_EPOCH + state.slot % SLOTS_PER_EPOCH]
|
||||||
|
// amount = payment.withdrawal.amount
|
||||||
|
// if amount > 0:
|
||||||
|
// state.builder_pending_withdrawals.append(payment.withdrawal)
|
||||||
|
// state.builder_pending_payments[SLOTS_PER_EPOCH + state.slot % SLOTS_PER_EPOCH] = (
|
||||||
|
// BuilderPendingPayment()
|
||||||
|
// )
|
||||||
|
//
|
||||||
|
// state.execution_payload_availability[state.slot % SLOTS_PER_HISTORICAL_ROOT] = 0b1
|
||||||
|
// state.latest_block_hash = payload.block_hash
|
||||||
|
//
|
||||||
|
// if verify:
|
||||||
|
// assert envelope.state_root == hash_tree_root(state)
|
||||||
|
func ProcessExecutionPayload(
|
||||||
|
ctx context.Context,
|
||||||
|
st state.BeaconState,
|
||||||
|
signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope,
|
||||||
|
) error {
|
||||||
|
if err := verifyExecutionPayloadEnvelopeSignature(st, signedEnvelope); err != nil {
|
||||||
|
return errors.Wrap(err, "signature verification failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
latestHeader := st.LatestBlockHeader()
|
||||||
|
if len(latestHeader.StateRoot) == 0 || bytes.Equal(latestHeader.StateRoot, make([]byte, 32)) {
|
||||||
|
previousStateRoot, err := st.HashTreeRoot(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "could not compute state root")
|
||||||
|
}
|
||||||
|
latestHeader.StateRoot = previousStateRoot[:]
|
||||||
|
if err := st.SetLatestBlockHeader(latestHeader); err != nil {
|
||||||
|
return errors.Wrap(err, "could not set latest block header")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
blockHeaderRoot, err := latestHeader.HashTreeRoot()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "could not compute block header root")
|
||||||
|
}
|
||||||
|
envelope, err := signedEnvelope.Envelope()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "could not get envelope from signed envelope")
|
||||||
|
}
|
||||||
|
beaconBlockRoot := envelope.BeaconBlockRoot()
|
||||||
|
if !bytes.Equal(beaconBlockRoot[:], blockHeaderRoot[:]) {
|
||||||
|
return errors.Errorf("envelope beacon block root does not match state latest block header root: envelope=%#x, header=%#x", beaconBlockRoot, blockHeaderRoot)
|
||||||
|
}
|
||||||
|
|
||||||
|
if envelope.Slot() != st.Slot() {
|
||||||
|
return errors.Errorf("envelope slot does not match state slot: envelope=%d, state=%d", envelope.Slot(), st.Slot())
|
||||||
|
}
|
||||||
|
|
||||||
|
latestBid, err := st.LatestExecutionPayloadBid()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "could not get latest execution payload bid")
|
||||||
|
}
|
||||||
|
if latestBid == nil {
|
||||||
|
return errors.New("latest execution payload bid is nil")
|
||||||
|
}
|
||||||
|
if envelope.BuilderIndex() != latestBid.BuilderIndex() {
|
||||||
|
return errors.Errorf("envelope builder index does not match committed bid builder index: envelope=%d, bid=%d", envelope.BuilderIndex(), latestBid.BuilderIndex())
|
||||||
|
}
|
||||||
|
|
||||||
|
envelopeBlobCommitments := envelope.BlobKzgCommitments()
|
||||||
|
envelopeBlobRoot, err := ssz.KzgCommitmentsRoot(envelopeBlobCommitments)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "could not compute envelope blob KZG commitments root")
|
||||||
|
}
|
||||||
|
committedBlobRoot := latestBid.BlobKzgCommitmentsRoot()
|
||||||
|
if !bytes.Equal(committedBlobRoot[:], envelopeBlobRoot[:]) {
|
||||||
|
return errors.Errorf("committed bid blob KZG commitments root does not match envelope: bid=%#x, envelope=%#x", committedBlobRoot, envelopeBlobRoot)
|
||||||
|
}
|
||||||
|
|
||||||
|
payload, err := envelope.Execution()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "could not get execution payload from envelope")
|
||||||
|
}
|
||||||
|
withdrawals, err := payload.Withdrawals()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "could not get withdrawals from payload")
|
||||||
|
}
|
||||||
|
|
||||||
|
ok, err := st.WithdrawalsMatchPayloadExpected(withdrawals)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "could not validate payload withdrawals")
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
return errors.New("payload withdrawals do not match expected withdrawals")
|
||||||
|
}
|
||||||
|
|
||||||
|
if latestBid.GasLimit() != payload.GasLimit() {
|
||||||
|
return errors.Errorf("committed bid gas limit does not match payload gas limit: bid=%d, payload=%d", latestBid.GasLimit(), payload.GasLimit())
|
||||||
|
}
|
||||||
|
|
||||||
|
latestBidPrevRandao := latestBid.PrevRandao()
|
||||||
|
if !bytes.Equal(payload.PrevRandao(), latestBidPrevRandao[:]) {
|
||||||
|
return errors.Errorf("payload prev randao does not match committed bid prev randao: payload=%#x, bid=%#x", payload.PrevRandao(), latestBidPrevRandao)
|
||||||
|
}
|
||||||
|
|
||||||
|
bidBlockHash := latestBid.BlockHash()
|
||||||
|
payloadBlockHash := payload.BlockHash()
|
||||||
|
if !bytes.Equal(bidBlockHash[:], payloadBlockHash) {
|
||||||
|
return errors.Errorf("committed bid block hash does not match payload block hash: bid=%#x, payload=%#x", bidBlockHash, payloadBlockHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
latestBlockHash, err := st.LatestBlockHash()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "could not get latest block hash")
|
||||||
|
}
|
||||||
|
if !bytes.Equal(payload.ParentHash(), latestBlockHash[:]) {
|
||||||
|
return errors.Errorf("payload parent hash does not match state latest block hash: payload=%#x, state=%#x", payload.ParentHash(), latestBlockHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
t, err := slots.StartTime(st.GenesisTime(), st.Slot())
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "could not compute timestamp")
|
||||||
|
}
|
||||||
|
if payload.Timestamp() != uint64(t.Unix()) {
|
||||||
|
return errors.Errorf("payload timestamp does not match expected timestamp: payload=%d, expected=%d", payload.Timestamp(), uint64(t.Unix()))
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := params.BeaconConfig()
|
||||||
|
maxBlobsPerBlock := cfg.MaxBlobsPerBlock(envelope.Slot())
|
||||||
|
if len(envelopeBlobCommitments) > maxBlobsPerBlock {
|
||||||
|
return errors.Errorf("too many blob KZG commitments: got=%d, max=%d", len(envelopeBlobCommitments), maxBlobsPerBlock)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := processExecutionRequests(ctx, st, envelope.ExecutionRequests()); err != nil {
|
||||||
|
return errors.Wrap(err, "could not process execution requests")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := st.QueueBuilderPayment(); err != nil {
|
||||||
|
return errors.Wrap(err, "could not queue builder payment")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := st.SetExecutionPayloadAvailability(st.Slot(), true); err != nil {
|
||||||
|
return errors.Wrap(err, "could not set execution payload availability")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := st.SetLatestBlockHash([32]byte(payload.BlockHash())); err != nil {
|
||||||
|
return errors.Wrap(err, "could not set latest block hash")
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := st.HashTreeRoot(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "could not get hash tree root")
|
||||||
|
}
|
||||||
|
if r != envelope.StateRoot() {
|
||||||
|
return fmt.Errorf("state root mismatch: expected %#x, got %#x", envelope.StateRoot(), r)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func envelopePublicKey(st state.BeaconState, builderIdx primitives.BuilderIndex) (bls.PublicKey, error) {
|
||||||
|
if builderIdx == params.BeaconConfig().BuilderIndexSelfBuild {
|
||||||
|
return proposerPublicKey(st)
|
||||||
|
}
|
||||||
|
return builderPublicKey(st, builderIdx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func proposerPublicKey(st state.BeaconState) (bls.PublicKey, error) {
|
||||||
|
header := st.LatestBlockHeader()
|
||||||
|
if header == nil {
|
||||||
|
return nil, fmt.Errorf("latest block header is nil")
|
||||||
|
}
|
||||||
|
proposerPubkey := st.PubkeyAtIndex(header.ProposerIndex)
|
||||||
|
publicKey, err := bls.PublicKeyFromBytes(proposerPubkey[:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid proposer public key: %w", err)
|
||||||
|
}
|
||||||
|
return publicKey, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func builderPublicKey(st state.BeaconState, builderIdx primitives.BuilderIndex) (bls.PublicKey, error) {
|
||||||
|
builder, err := st.Builder(builderIdx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get builder: %w", err)
|
||||||
|
}
|
||||||
|
if builder == nil {
|
||||||
|
return nil, fmt.Errorf("builder at index %d not found", builderIdx)
|
||||||
|
}
|
||||||
|
publicKey, err := bls.PublicKeyFromBytes(builder.Pubkey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid builder public key: %w", err)
|
||||||
|
}
|
||||||
|
return publicKey, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// processExecutionRequests processes deposits, withdrawals, and consolidations from execution requests.
|
||||||
|
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||||
|
// for op in requests.deposits: process_deposit_request(state, op)
|
||||||
|
// for op in requests.withdrawals: process_withdrawal_request(state, op)
|
||||||
|
// for op in requests.consolidations: process_consolidation_request(state, op)
|
||||||
|
func processExecutionRequests(ctx context.Context, st state.BeaconState, rqs *enginev1.ExecutionRequests) error {
|
||||||
|
if err := processDepositRequests(ctx, st, rqs.Deposits); err != nil {
|
||||||
|
return errors.Wrap(err, "could not process deposit requests")
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
st, err = requests.ProcessWithdrawalRequests(ctx, st, rqs.Withdrawals)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "could not process withdrawal requests")
|
||||||
|
}
|
||||||
|
err = requests.ProcessConsolidationRequests(ctx, st, rqs.Consolidations)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "could not process consolidation requests")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// verifyExecutionPayloadEnvelopeSignature verifies the BLS signature on a signed execution payload envelope.
|
||||||
|
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||||
|
// builder_index = signed_envelope.message.builder_index
|
||||||
|
// if builder_index == BUILDER_INDEX_SELF_BUILD:
|
||||||
|
//
|
||||||
|
// validator_index = state.latest_block_header.proposer_index
|
||||||
|
// pubkey = state.validators[validator_index].pubkey
|
||||||
|
//
|
||||||
|
// else:
|
||||||
|
//
|
||||||
|
// pubkey = state.builders[builder_index].pubkey
|
||||||
|
//
|
||||||
|
// signing_root = compute_signing_root(
|
||||||
|
//
|
||||||
|
// signed_envelope.message, get_domain(state, DOMAIN_BEACON_BUILDER)
|
||||||
|
//
|
||||||
|
// )
|
||||||
|
// return bls.Verify(pubkey, signing_root, signed_envelope.signature)
|
||||||
|
func verifyExecutionPayloadEnvelopeSignature(st state.BeaconState, signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope) error {
|
||||||
|
envelope, err := signedEnvelope.Envelope()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get envelope: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
builderIdx := envelope.BuilderIndex()
|
||||||
|
publicKey, err := envelopePublicKey(st, builderIdx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
signatureBytes := signedEnvelope.Signature()
|
||||||
|
signature, err := bls.SignatureFromBytes(signatureBytes[:])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid signature format: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
currentEpoch := slots.ToEpoch(envelope.Slot())
|
||||||
|
domain, err := signing.Domain(
|
||||||
|
st.Fork(),
|
||||||
|
currentEpoch,
|
||||||
|
params.BeaconConfig().DomainBeaconBuilder,
|
||||||
|
st.GenesisValidatorsRoot(),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to compute signing domain: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
signingRoot, err := signedEnvelope.SigningRoot(domain)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to compute signing root: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !signature.Verify(publicKey, signingRoot[:]) {
|
||||||
|
return fmt.Errorf("signature verification failed: %w", signing.ErrSigFailedToVerify)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -114,32 +114,17 @@ func payloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot pr
|
|||||||
}
|
}
|
||||||
|
|
||||||
committeesPerSlot := helpers.SlotCommitteeCount(activeCount)
|
committeesPerSlot := helpers.SlotCommitteeCount(activeCount)
|
||||||
|
out := make([]primitives.ValidatorIndex, 0, activeCount/uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||||
|
|
||||||
selected := make([]primitives.ValidatorIndex, 0, fieldparams.PTCSize)
|
for i := primitives.CommitteeIndex(0); i < primitives.CommitteeIndex(committeesPerSlot); i++ {
|
||||||
var i uint64
|
committee, err := helpers.BeaconCommitteeFromState(ctx, st, slot, i)
|
||||||
for uint64(len(selected)) < fieldparams.PTCSize {
|
if err != nil {
|
||||||
if ctx.Err() != nil {
|
return nil, errors.Wrapf(err, "failed to get beacon committee %d", i)
|
||||||
return nil, ctx.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
for committeeIndex := primitives.CommitteeIndex(0); committeeIndex < primitives.CommitteeIndex(committeesPerSlot); committeeIndex++ {
|
|
||||||
if uint64(len(selected)) >= fieldparams.PTCSize {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
committee, err := helpers.BeaconCommitteeFromState(ctx, st, slot, committeeIndex)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to get beacon committee %d", committeeIndex)
|
|
||||||
}
|
|
||||||
|
|
||||||
selected, i, err = selectByBalanceFill(ctx, st, committee, seed, selected, i)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to sample beacon committee %d", committeeIndex)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
out = append(out, committee...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return selected, nil
|
return selectByBalance(ctx, st, out, seed, fieldparams.PTCSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ptcSeed computes the seed for the payload timeliness committee.
|
// ptcSeed computes the seed for the payload timeliness committee.
|
||||||
@@ -163,39 +148,33 @@ func ptcSeed(st state.ReadOnlyBeaconState, epoch primitives.Epoch, slot primitiv
|
|||||||
// if compute_balance_weighted_acceptance(state, indices[next], seed, i):
|
// if compute_balance_weighted_acceptance(state, indices[next], seed, i):
|
||||||
// selected.append(indices[next])
|
// selected.append(indices[next])
|
||||||
// i += 1
|
// i += 1
|
||||||
func selectByBalanceFill(
|
func selectByBalance(ctx context.Context, st state.ReadOnlyBeaconState, candidates []primitives.ValidatorIndex, seed [32]byte, count uint64) ([]primitives.ValidatorIndex, error) {
|
||||||
ctx context.Context,
|
if len(candidates) == 0 {
|
||||||
st state.ReadOnlyBeaconState,
|
return nil, errors.New("no candidates for balance weighted selection")
|
||||||
candidates []primitives.ValidatorIndex,
|
}
|
||||||
seed [32]byte,
|
|
||||||
selected []primitives.ValidatorIndex,
|
|
||||||
i uint64,
|
|
||||||
) ([]primitives.ValidatorIndex, uint64, error) {
|
|
||||||
hashFunc := hash.CustomSHA256Hasher()
|
hashFunc := hash.CustomSHA256Hasher()
|
||||||
// Pre-allocate buffer for hash input: seed (32 bytes) + round counter (8 bytes).
|
// Pre-allocate buffer for hash input: seed (32 bytes) + round counter (8 bytes).
|
||||||
var buf [40]byte
|
var buf [40]byte
|
||||||
copy(buf[:], seed[:])
|
copy(buf[:], seed[:])
|
||||||
maxBalance := params.BeaconConfig().MaxEffectiveBalanceElectra
|
maxBalance := params.BeaconConfig().MaxEffectiveBalanceElectra
|
||||||
|
|
||||||
for _, idx := range candidates {
|
selected := make([]primitives.ValidatorIndex, 0, count)
|
||||||
|
total := uint64(len(candidates))
|
||||||
|
for i := uint64(0); uint64(len(selected)) < count; i++ {
|
||||||
if ctx.Err() != nil {
|
if ctx.Err() != nil {
|
||||||
return nil, i, ctx.Err()
|
return nil, ctx.Err()
|
||||||
}
|
}
|
||||||
|
idx := candidates[i%total]
|
||||||
ok, err := acceptByBalance(st, idx, buf[:], hashFunc, maxBalance, i)
|
ok, err := acceptByBalance(st, idx, buf[:], hashFunc, maxBalance, i)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, i, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if ok {
|
if ok {
|
||||||
selected = append(selected, idx)
|
selected = append(selected, idx)
|
||||||
}
|
}
|
||||||
if uint64(len(selected)) == fieldparams.PTCSize {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
}
|
||||||
|
return selected, nil
|
||||||
return selected, i, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// acceptByBalance determines if a validator is accepted based on its effective balance.
|
// acceptByBalance determines if a validator is accepted based on its effective balance.
|
||||||
|
|||||||
360
beacon-chain/core/gloas/payload_test.go
Normal file
360
beacon-chain/core/gloas/payload_test.go
Normal file
@@ -0,0 +1,360 @@
|
|||||||
|
package gloas
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||||
|
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/encoding/ssz"
|
||||||
|
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||||
|
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
type payloadFixture struct {
|
||||||
|
state state.BeaconState
|
||||||
|
signed interfaces.ROSignedExecutionPayloadEnvelope
|
||||||
|
signedProto *ethpb.SignedExecutionPayloadEnvelope
|
||||||
|
envelope *ethpb.ExecutionPayloadEnvelope
|
||||||
|
payload *enginev1.ExecutionPayloadDeneb
|
||||||
|
slot primitives.Slot
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildPayloadFixture(t *testing.T, mutate func(payload *enginev1.ExecutionPayloadDeneb, bid *ethpb.ExecutionPayloadBid, envelope *ethpb.ExecutionPayloadEnvelope)) payloadFixture {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
cfg := params.BeaconConfig()
|
||||||
|
slot := primitives.Slot(5)
|
||||||
|
builderIdx := primitives.BuilderIndex(0)
|
||||||
|
|
||||||
|
sk, err := bls.RandKey()
|
||||||
|
require.NoError(t, err)
|
||||||
|
pk := sk.PublicKey().Marshal()
|
||||||
|
|
||||||
|
randao := bytes.Repeat([]byte{0xAA}, 32)
|
||||||
|
parentHash := bytes.Repeat([]byte{0xBB}, 32)
|
||||||
|
blockHash := bytes.Repeat([]byte{0xCC}, 32)
|
||||||
|
|
||||||
|
withdrawals := []*enginev1.Withdrawal{
|
||||||
|
{Index: 0, ValidatorIndex: 1, Address: bytes.Repeat([]byte{0x01}, 20), Amount: 0},
|
||||||
|
}
|
||||||
|
|
||||||
|
blobCommitments := [][]byte{}
|
||||||
|
blobRoot, err := ssz.KzgCommitmentsRoot(blobCommitments)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
payload := &enginev1.ExecutionPayloadDeneb{
|
||||||
|
ParentHash: parentHash,
|
||||||
|
FeeRecipient: bytes.Repeat([]byte{0x01}, 20),
|
||||||
|
StateRoot: bytes.Repeat([]byte{0x02}, 32),
|
||||||
|
ReceiptsRoot: bytes.Repeat([]byte{0x03}, 32),
|
||||||
|
LogsBloom: bytes.Repeat([]byte{0x04}, 256),
|
||||||
|
PrevRandao: randao,
|
||||||
|
BlockNumber: 1,
|
||||||
|
GasLimit: 1,
|
||||||
|
GasUsed: 0,
|
||||||
|
Timestamp: 100,
|
||||||
|
ExtraData: []byte{},
|
||||||
|
BaseFeePerGas: bytes.Repeat([]byte{0x05}, 32),
|
||||||
|
BlockHash: blockHash,
|
||||||
|
Transactions: [][]byte{},
|
||||||
|
Withdrawals: withdrawals,
|
||||||
|
BlobGasUsed: 0,
|
||||||
|
ExcessBlobGas: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
bid := ðpb.ExecutionPayloadBid{
|
||||||
|
ParentBlockHash: parentHash,
|
||||||
|
ParentBlockRoot: bytes.Repeat([]byte{0xDD}, 32),
|
||||||
|
BlockHash: blockHash,
|
||||||
|
PrevRandao: randao,
|
||||||
|
GasLimit: 1,
|
||||||
|
BuilderIndex: builderIdx,
|
||||||
|
Slot: slot,
|
||||||
|
Value: 0,
|
||||||
|
ExecutionPayment: 0,
|
||||||
|
BlobKzgCommitmentsRoot: blobRoot[:],
|
||||||
|
FeeRecipient: bytes.Repeat([]byte{0xEE}, 20),
|
||||||
|
}
|
||||||
|
|
||||||
|
header := ðpb.BeaconBlockHeader{
|
||||||
|
Slot: slot,
|
||||||
|
ParentRoot: bytes.Repeat([]byte{0x11}, 32),
|
||||||
|
StateRoot: bytes.Repeat([]byte{0x22}, 32),
|
||||||
|
BodyRoot: bytes.Repeat([]byte{0x33}, 32),
|
||||||
|
}
|
||||||
|
headerRoot, err := header.HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
envelope := ðpb.ExecutionPayloadEnvelope{
|
||||||
|
Slot: slot,
|
||||||
|
BuilderIndex: builderIdx,
|
||||||
|
BeaconBlockRoot: headerRoot[:],
|
||||||
|
Payload: payload,
|
||||||
|
BlobKzgCommitments: blobCommitments,
|
||||||
|
ExecutionRequests: &enginev1.ExecutionRequests{},
|
||||||
|
}
|
||||||
|
|
||||||
|
if mutate != nil {
|
||||||
|
mutate(payload, bid, envelope)
|
||||||
|
}
|
||||||
|
|
||||||
|
genesisRoot := bytes.Repeat([]byte{0xAB}, 32)
|
||||||
|
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||||
|
stateRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||||
|
for i := range blockRoots {
|
||||||
|
blockRoots[i] = bytes.Repeat([]byte{0x44}, 32)
|
||||||
|
stateRoots[i] = bytes.Repeat([]byte{0x55}, 32)
|
||||||
|
}
|
||||||
|
randaoMixes := make([][]byte, cfg.EpochsPerHistoricalVector)
|
||||||
|
for i := range randaoMixes {
|
||||||
|
randaoMixes[i] = randao
|
||||||
|
}
|
||||||
|
|
||||||
|
withdrawalCreds := make([]byte, 32)
|
||||||
|
withdrawalCreds[0] = cfg.ETH1AddressWithdrawalPrefixByte
|
||||||
|
|
||||||
|
eth1Data := ðpb.Eth1Data{
|
||||||
|
DepositRoot: bytes.Repeat([]byte{0x66}, 32),
|
||||||
|
DepositCount: 0,
|
||||||
|
BlockHash: bytes.Repeat([]byte{0x77}, 32),
|
||||||
|
}
|
||||||
|
|
||||||
|
vals := []*ethpb.Validator{
|
||||||
|
{
|
||||||
|
PublicKey: pk,
|
||||||
|
WithdrawalCredentials: withdrawalCreds,
|
||||||
|
EffectiveBalance: cfg.MinActivationBalance + 1_000,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
balances := []uint64{cfg.MinActivationBalance + 1_000}
|
||||||
|
|
||||||
|
payments := make([]*ethpb.BuilderPendingPayment, cfg.SlotsPerEpoch*2)
|
||||||
|
for i := range payments {
|
||||||
|
payments[i] = ðpb.BuilderPendingPayment{
|
||||||
|
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||||
|
FeeRecipient: make([]byte, 20),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
executionPayloadAvailability := make([]byte, cfg.SlotsPerHistoricalRoot/8)
|
||||||
|
|
||||||
|
builders := make([]*ethpb.Builder, builderIdx+1)
|
||||||
|
builders[builderIdx] = ðpb.Builder{
|
||||||
|
Pubkey: pk,
|
||||||
|
Version: []byte{0},
|
||||||
|
ExecutionAddress: bytes.Repeat([]byte{0x09}, 20),
|
||||||
|
Balance: 0,
|
||||||
|
DepositEpoch: 0,
|
||||||
|
WithdrawableEpoch: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
genesisTime := uint64(0)
|
||||||
|
slotSeconds := cfg.SecondsPerSlot * uint64(slot)
|
||||||
|
if payload.Timestamp > slotSeconds {
|
||||||
|
genesisTime = payload.Timestamp - slotSeconds
|
||||||
|
}
|
||||||
|
|
||||||
|
stProto := ðpb.BeaconStateGloas{
|
||||||
|
Slot: slot,
|
||||||
|
GenesisTime: genesisTime,
|
||||||
|
GenesisValidatorsRoot: genesisRoot,
|
||||||
|
Fork: ðpb.Fork{
|
||||||
|
CurrentVersion: bytes.Repeat([]byte{0x01}, 4),
|
||||||
|
PreviousVersion: bytes.Repeat([]byte{0x01}, 4),
|
||||||
|
Epoch: 0,
|
||||||
|
},
|
||||||
|
LatestBlockHeader: header,
|
||||||
|
BlockRoots: blockRoots,
|
||||||
|
StateRoots: stateRoots,
|
||||||
|
RandaoMixes: randaoMixes,
|
||||||
|
Eth1Data: eth1Data,
|
||||||
|
Validators: vals,
|
||||||
|
Balances: balances,
|
||||||
|
LatestBlockHash: payload.ParentHash,
|
||||||
|
LatestExecutionPayloadBid: bid,
|
||||||
|
BuilderPendingPayments: payments,
|
||||||
|
ExecutionPayloadAvailability: executionPayloadAvailability,
|
||||||
|
BuilderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{},
|
||||||
|
PayloadExpectedWithdrawals: payload.Withdrawals,
|
||||||
|
Builders: builders,
|
||||||
|
}
|
||||||
|
|
||||||
|
st, err := state_native.InitializeFromProtoGloas(stProto)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
expected := st.Copy()
|
||||||
|
ctx := context.Background()
|
||||||
|
require.NoError(t, processExecutionRequests(ctx, expected, envelope.ExecutionRequests))
|
||||||
|
require.NoError(t, expected.QueueBuilderPayment())
|
||||||
|
require.NoError(t, expected.SetExecutionPayloadAvailability(slot, true))
|
||||||
|
var blockHashArr [32]byte
|
||||||
|
copy(blockHashArr[:], payload.BlockHash)
|
||||||
|
require.NoError(t, expected.SetLatestBlockHash(blockHashArr))
|
||||||
|
expectedRoot, err := expected.HashTreeRoot(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
envelope.StateRoot = expectedRoot[:]
|
||||||
|
|
||||||
|
epoch := slots.ToEpoch(slot)
|
||||||
|
domain, err := signing.Domain(st.Fork(), epoch, cfg.DomainBeaconBuilder, st.GenesisValidatorsRoot())
|
||||||
|
require.NoError(t, err)
|
||||||
|
signingRoot, err := signing.ComputeSigningRoot(envelope, domain)
|
||||||
|
require.NoError(t, err)
|
||||||
|
signature := sk.Sign(signingRoot[:]).Marshal()
|
||||||
|
|
||||||
|
signedProto := ðpb.SignedExecutionPayloadEnvelope{
|
||||||
|
Message: envelope,
|
||||||
|
Signature: signature,
|
||||||
|
}
|
||||||
|
signed, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return payloadFixture{
|
||||||
|
state: st,
|
||||||
|
signed: signed,
|
||||||
|
signedProto: signedProto,
|
||||||
|
envelope: envelope,
|
||||||
|
payload: payload,
|
||||||
|
slot: slot,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessExecutionPayload_Success(t *testing.T) {
|
||||||
|
fixture := buildPayloadFixture(t, nil)
|
||||||
|
require.NoError(t, ProcessExecutionPayload(t.Context(), fixture.state, fixture.signed))
|
||||||
|
|
||||||
|
latestHash, err := fixture.state.LatestBlockHash()
|
||||||
|
require.NoError(t, err)
|
||||||
|
var expectedHash [32]byte
|
||||||
|
copy(expectedHash[:], fixture.payload.BlockHash)
|
||||||
|
require.Equal(t, expectedHash, latestHash)
|
||||||
|
|
||||||
|
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||||
|
paymentIndex := slotsPerEpoch + (fixture.slot % slotsPerEpoch)
|
||||||
|
payments, err := fixture.state.BuilderPendingPayments()
|
||||||
|
require.NoError(t, err)
|
||||||
|
payment := payments[paymentIndex]
|
||||||
|
require.NotNil(t, payment)
|
||||||
|
require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProcessExecutionPayload_PrevRandaoMismatch(t *testing.T) {
|
||||||
|
fixture := buildPayloadFixture(t, func(_ *enginev1.ExecutionPayloadDeneb, bid *ethpb.ExecutionPayloadBid, _ *ethpb.ExecutionPayloadEnvelope) {
|
||||||
|
bid.PrevRandao = bytes.Repeat([]byte{0xFF}, 32)
|
||||||
|
})
|
||||||
|
|
||||||
|
err := ProcessExecutionPayload(t.Context(), fixture.state, fixture.signed)
|
||||||
|
require.ErrorContains(t, "prev randao", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestQueueBuilderPayment_ZeroAmountClearsSlot(t *testing.T) {
|
||||||
|
fixture := buildPayloadFixture(t, nil)
|
||||||
|
|
||||||
|
require.NoError(t, fixture.state.QueueBuilderPayment())
|
||||||
|
|
||||||
|
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||||
|
paymentIndex := slotsPerEpoch + (fixture.slot % slotsPerEpoch)
|
||||||
|
payments, err := fixture.state.BuilderPendingPayments()
|
||||||
|
require.NoError(t, err)
|
||||||
|
payment := payments[paymentIndex]
|
||||||
|
require.NotNil(t, payment)
|
||||||
|
require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestVerifyExecutionPayloadEnvelopeSignature(t *testing.T) {
|
||||||
|
fixture := buildPayloadFixture(t, nil)
|
||||||
|
|
||||||
|
t.Run("self build", func(t *testing.T) {
|
||||||
|
proposerSk, err := bls.RandKey()
|
||||||
|
require.NoError(t, err)
|
||||||
|
proposerPk := proposerSk.PublicKey().Marshal()
|
||||||
|
|
||||||
|
stPb, ok := fixture.state.ToProtoUnsafe().(*ethpb.BeaconStateGloas)
|
||||||
|
require.Equal(t, true, ok)
|
||||||
|
stPb = proto.Clone(stPb).(*ethpb.BeaconStateGloas)
|
||||||
|
stPb.Validators[0].PublicKey = proposerPk
|
||||||
|
st, err := state_native.InitializeFromProtoUnsafeGloas(stPb)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
msg := proto.Clone(fixture.signedProto.Message).(*ethpb.ExecutionPayloadEnvelope)
|
||||||
|
msg.BuilderIndex = params.BeaconConfig().BuilderIndexSelfBuild
|
||||||
|
msg.BlobKzgCommitments = []([]byte){}
|
||||||
|
|
||||||
|
epoch := slots.ToEpoch(msg.Slot)
|
||||||
|
domain, err := signing.Domain(st.Fork(), epoch, params.BeaconConfig().DomainBeaconBuilder, st.GenesisValidatorsRoot())
|
||||||
|
require.NoError(t, err)
|
||||||
|
signingRoot, err := signing.ComputeSigningRoot(msg, domain)
|
||||||
|
require.NoError(t, err)
|
||||||
|
signature := proposerSk.Sign(signingRoot[:]).Marshal()
|
||||||
|
|
||||||
|
signedProto := ðpb.SignedExecutionPayloadEnvelope{
|
||||||
|
Message: msg,
|
||||||
|
Signature: signature,
|
||||||
|
}
|
||||||
|
signed, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.NoError(t, verifyExecutionPayloadEnvelopeSignature(st, signed))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("builder", func(t *testing.T) {
|
||||||
|
signed, err := blocks.WrappedROSignedExecutionPayloadEnvelope(fixture.signedProto)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.NoError(t, verifyExecutionPayloadEnvelopeSignature(fixture.state, signed))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("invalid signature", func(t *testing.T) {
|
||||||
|
t.Run("self build", func(t *testing.T) {
|
||||||
|
proposerSk, err := bls.RandKey()
|
||||||
|
require.NoError(t, err)
|
||||||
|
proposerPk := proposerSk.PublicKey().Marshal()
|
||||||
|
|
||||||
|
stPb, ok := fixture.state.ToProtoUnsafe().(*ethpb.BeaconStateGloas)
|
||||||
|
require.Equal(t, true, ok)
|
||||||
|
stPb = proto.Clone(stPb).(*ethpb.BeaconStateGloas)
|
||||||
|
stPb.Validators[0].PublicKey = proposerPk
|
||||||
|
st, err := state_native.InitializeFromProtoUnsafeGloas(stPb)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
msg := proto.Clone(fixture.signedProto.Message).(*ethpb.ExecutionPayloadEnvelope)
|
||||||
|
msg.BuilderIndex = params.BeaconConfig().BuilderIndexSelfBuild
|
||||||
|
if msg.BlobKzgCommitments == nil {
|
||||||
|
msg.BlobKzgCommitments = [][]byte{}
|
||||||
|
}
|
||||||
|
|
||||||
|
signedProto := ðpb.SignedExecutionPayloadEnvelope{
|
||||||
|
Message: msg,
|
||||||
|
Signature: bytes.Repeat([]byte{0xFF}, 96),
|
||||||
|
}
|
||||||
|
badSigned, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = verifyExecutionPayloadEnvelopeSignature(st, badSigned)
|
||||||
|
require.ErrorContains(t, "invalid signature format", err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("builder", func(t *testing.T) {
|
||||||
|
signedProto := ðpb.SignedExecutionPayloadEnvelope{
|
||||||
|
Message: fixture.signedProto.Message,
|
||||||
|
Signature: bytes.Repeat([]byte{0xFF}, 96),
|
||||||
|
}
|
||||||
|
badSigned, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = verifyExecutionPayloadEnvelopeSignature(fixture.state, badSigned)
|
||||||
|
require.ErrorContains(t, "invalid signature format", err)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -33,7 +33,6 @@ go_library(
|
|||||||
"@com_github_pkg_errors//:go_default_library",
|
"@com_github_pkg_errors//:go_default_library",
|
||||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
|
||||||
"@org_golang_x_sync//errgroup:go_default_library",
|
"@org_golang_x_sync//errgroup:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,15 +1,11 @@
|
|||||||
package peerdas
|
package peerdas
|
||||||
|
|
||||||
import (
|
import (
|
||||||
stderrors "errors"
|
|
||||||
"iter"
|
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||||
"github.com/OffchainLabs/prysm/v7/container/trie"
|
"github.com/OffchainLabs/prysm/v7/container/trie"
|
||||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
|
||||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
@@ -20,7 +16,6 @@ var (
|
|||||||
ErrIndexTooLarge = errors.New("column index is larger than the specified columns count")
|
ErrIndexTooLarge = errors.New("column index is larger than the specified columns count")
|
||||||
ErrNoKzgCommitments = errors.New("no KZG commitments found")
|
ErrNoKzgCommitments = errors.New("no KZG commitments found")
|
||||||
ErrMismatchLength = errors.New("mismatch in the length of the column, commitments or proofs")
|
ErrMismatchLength = errors.New("mismatch in the length of the column, commitments or proofs")
|
||||||
ErrEmptySegment = errors.New("empty segment in batch")
|
|
||||||
ErrInvalidKZGProof = errors.New("invalid KZG proof")
|
ErrInvalidKZGProof = errors.New("invalid KZG proof")
|
||||||
ErrBadRootLength = errors.New("bad root length")
|
ErrBadRootLength = errors.New("bad root length")
|
||||||
ErrInvalidInclusionProof = errors.New("invalid inclusion proof")
|
ErrInvalidInclusionProof = errors.New("invalid inclusion proof")
|
||||||
@@ -62,127 +57,62 @@ func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CellProofBundleSegment is returned when a batch fails. The caller can call
|
// VerifyDataColumnsSidecarKZGProofs verifies if the KZG proofs are correct.
|
||||||
// the `.Verify` method to verify just this segment.
|
|
||||||
type CellProofBundleSegment struct {
|
|
||||||
indices []uint64
|
|
||||||
commitments []kzg.Bytes48
|
|
||||||
cells []kzg.Cell
|
|
||||||
proofs []kzg.Bytes48
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify verifies this segment without batching.
|
|
||||||
func (s CellProofBundleSegment) Verify() error {
|
|
||||||
if len(s.cells) == 0 {
|
|
||||||
return ErrEmptySegment
|
|
||||||
}
|
|
||||||
verified, err := kzg.VerifyCellKZGProofBatch(s.commitments, s.indices, s.cells, s.proofs)
|
|
||||||
if err != nil {
|
|
||||||
return stderrors.Join(err, ErrInvalidKZGProof)
|
|
||||||
}
|
|
||||||
if !verified {
|
|
||||||
return ErrInvalidKZGProof
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func VerifyDataColumnsCellsKZGProofs(sizeHint int, cellProofsIter iter.Seq[blocks.CellProofBundle]) error {
|
|
||||||
// ignore the failed segment list since we are just passing in one segment.
|
|
||||||
_, err := BatchVerifyDataColumnsCellsKZGProofs(sizeHint, []iter.Seq[blocks.CellProofBundle]{cellProofsIter})
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// BatchVerifyDataColumnsCellsKZGProofs verifies if the KZG proofs are correct.
|
|
||||||
// Note: We are slightly deviating from the specification here:
|
// Note: We are slightly deviating from the specification here:
|
||||||
// The specification verifies the KZG proofs for each sidecar separately,
|
// The specification verifies the KZG proofs for each sidecar separately,
|
||||||
// while we are verifying all the KZG proofs from multiple sidecars in a batch.
|
// while we are verifying all the KZG proofs from multiple sidecars in a batch.
|
||||||
// This is done to improve performance since the internal KZG library is way more
|
// This is done to improve performance since the internal KZG library is way more
|
||||||
// efficient when verifying in batch. If the batch fails, the failed segments
|
// efficient when verifying in batch.
|
||||||
// are returned to the caller so that they may try segment by segment without
|
|
||||||
// batching. On success the failed segment list is empty.
|
|
||||||
//
|
|
||||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
|
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
|
||||||
func BatchVerifyDataColumnsCellsKZGProofs(sizeHint int, cellProofsIters []iter.Seq[blocks.CellProofBundle]) ( /* failed segment list */ []CellProofBundleSegment, error) {
|
func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
|
||||||
commitments := make([]kzg.Bytes48, 0, sizeHint)
|
// Compute the total count.
|
||||||
indices := make([]uint64, 0, sizeHint)
|
count := 0
|
||||||
cells := make([]kzg.Cell, 0, sizeHint)
|
for _, sidecar := range sidecars {
|
||||||
proofs := make([]kzg.Bytes48, 0, sizeHint)
|
count += len(sidecar.Column)
|
||||||
|
}
|
||||||
|
|
||||||
var anySegmentEmpty bool
|
commitments := make([]kzg.Bytes48, 0, count)
|
||||||
var segments []CellProofBundleSegment
|
indices := make([]uint64, 0, count)
|
||||||
for _, cellProofsIter := range cellProofsIters {
|
cells := make([]kzg.Cell, 0, count)
|
||||||
startIdx := len(cells)
|
proofs := make([]kzg.Bytes48, 0, count)
|
||||||
for bundle := range cellProofsIter {
|
|
||||||
|
for _, sidecar := range sidecars {
|
||||||
|
for i := range sidecar.Column {
|
||||||
var (
|
var (
|
||||||
commitment kzg.Bytes48
|
commitment kzg.Bytes48
|
||||||
cell kzg.Cell
|
cell kzg.Cell
|
||||||
proof kzg.Bytes48
|
proof kzg.Bytes48
|
||||||
)
|
)
|
||||||
|
|
||||||
if len(bundle.Commitment) != len(commitment) ||
|
commitmentBytes := sidecar.KzgCommitments[i]
|
||||||
len(bundle.Cell) != len(cell) ||
|
cellBytes := sidecar.Column[i]
|
||||||
len(bundle.Proof) != len(proof) {
|
proofBytes := sidecar.KzgProofs[i]
|
||||||
return nil, ErrMismatchLength
|
|
||||||
|
if len(commitmentBytes) != len(commitment) ||
|
||||||
|
len(cellBytes) != len(cell) ||
|
||||||
|
len(proofBytes) != len(proof) {
|
||||||
|
return ErrMismatchLength
|
||||||
}
|
}
|
||||||
|
|
||||||
copy(commitment[:], bundle.Commitment)
|
copy(commitment[:], commitmentBytes)
|
||||||
copy(cell[:], bundle.Cell)
|
copy(cell[:], cellBytes)
|
||||||
copy(proof[:], bundle.Proof)
|
copy(proof[:], proofBytes)
|
||||||
|
|
||||||
commitments = append(commitments, commitment)
|
commitments = append(commitments, commitment)
|
||||||
indices = append(indices, bundle.ColumnIndex)
|
indices = append(indices, sidecar.Index)
|
||||||
cells = append(cells, cell)
|
cells = append(cells, cell)
|
||||||
proofs = append(proofs, proof)
|
proofs = append(proofs, proof)
|
||||||
}
|
}
|
||||||
if len(cells[startIdx:]) == 0 {
|
|
||||||
anySegmentEmpty = true
|
|
||||||
}
|
|
||||||
segments = append(segments, CellProofBundleSegment{
|
|
||||||
indices: indices[startIdx:],
|
|
||||||
commitments: commitments[startIdx:],
|
|
||||||
cells: cells[startIdx:],
|
|
||||||
proofs: proofs[startIdx:],
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if anySegmentEmpty {
|
|
||||||
return segments, ErrEmptySegment
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Batch verify that the cells match the corresponding commitments and proofs.
|
// Batch verify that the cells match the corresponding commitments and proofs.
|
||||||
verified, err := kzg.VerifyCellKZGProofBatch(commitments, indices, cells, proofs)
|
verified, err := kzg.VerifyCellKZGProofBatch(commitments, indices, cells, proofs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return segments, stderrors.Join(err, ErrInvalidKZGProof)
|
return errors.Wrap(err, "verify cell KZG proof batch")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !verified {
|
if !verified {
|
||||||
return segments, ErrInvalidKZGProof
|
return ErrInvalidKZGProof
|
||||||
}
|
|
||||||
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// verifyKzgCommitmentsInclusionProof is the shared implementation for inclusion proof verification.
|
|
||||||
func verifyKzgCommitmentsInclusionProof(bodyRoot []byte, kzgCommitments [][]byte, inclusionProof [][]byte) error {
|
|
||||||
if len(bodyRoot) != fieldparams.RootLength {
|
|
||||||
return ErrBadRootLength
|
|
||||||
}
|
|
||||||
|
|
||||||
leaves := blocks.LeavesFromCommitments(kzgCommitments)
|
|
||||||
|
|
||||||
sparse, err := trie.GenerateTrieFromItems(leaves, fieldparams.LogMaxBlobCommitments)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "generate trie from items")
|
|
||||||
}
|
|
||||||
|
|
||||||
hashTreeRoot, err := sparse.HashTreeRoot()
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "hash tree root")
|
|
||||||
}
|
|
||||||
|
|
||||||
verified := trie.VerifyMerkleProof(bodyRoot, hashTreeRoot[:], kzgPosition, inclusionProof)
|
|
||||||
if !verified {
|
|
||||||
return ErrInvalidInclusionProof
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -194,23 +124,30 @@ func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
|
|||||||
if sidecar.SignedBlockHeader == nil || sidecar.SignedBlockHeader.Header == nil {
|
if sidecar.SignedBlockHeader == nil || sidecar.SignedBlockHeader.Header == nil {
|
||||||
return ErrNilBlockHeader
|
return ErrNilBlockHeader
|
||||||
}
|
}
|
||||||
return verifyKzgCommitmentsInclusionProof(
|
|
||||||
sidecar.SignedBlockHeader.Header.BodyRoot,
|
|
||||||
sidecar.KzgCommitments,
|
|
||||||
sidecar.KzgCommitmentsInclusionProof,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifyPartialDataColumnHeaderInclusionProof verifies if the KZG commitments are included in the beacon block.
|
root := sidecar.SignedBlockHeader.Header.BodyRoot
|
||||||
func VerifyPartialDataColumnHeaderInclusionProof(header *ethpb.PartialDataColumnHeader) error {
|
if len(root) != fieldparams.RootLength {
|
||||||
if header.SignedBlockHeader == nil || header.SignedBlockHeader.Header == nil {
|
return ErrBadRootLength
|
||||||
return ErrNilBlockHeader
|
|
||||||
}
|
}
|
||||||
return verifyKzgCommitmentsInclusionProof(
|
|
||||||
header.SignedBlockHeader.Header.BodyRoot,
|
leaves := blocks.LeavesFromCommitments(sidecar.KzgCommitments)
|
||||||
header.KzgCommitments,
|
|
||||||
header.KzgCommitmentsInclusionProof,
|
sparse, err := trie.GenerateTrieFromItems(leaves, fieldparams.LogMaxBlobCommitments)
|
||||||
)
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "generate trie from items")
|
||||||
|
}
|
||||||
|
|
||||||
|
hashTreeRoot, err := sparse.HashTreeRoot()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "hash tree root")
|
||||||
|
}
|
||||||
|
|
||||||
|
verified := trie.VerifyMerkleProof(root, hashTreeRoot[:], kzgPosition, sidecar.KzgCommitmentsInclusionProof)
|
||||||
|
if !verified {
|
||||||
|
return ErrInvalidInclusionProof
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ComputeSubnetForDataColumnSidecar computes the subnet for a data column sidecar.
|
// ComputeSubnetForDataColumnSidecar computes the subnet for a data column sidecar.
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package peerdas_test
|
|||||||
import (
|
import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"fmt"
|
"fmt"
|
||||||
"iter"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||||
@@ -73,7 +72,7 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
|||||||
sidecars := generateRandomSidecars(t, seed, blobCount)
|
sidecars := generateRandomSidecars(t, seed, blobCount)
|
||||||
sidecars[0].Column[0] = sidecars[0].Column[0][:len(sidecars[0].Column[0])-1] // Remove one byte to create size mismatch
|
sidecars[0].Column[0] = sidecars[0].Column[0][:len(sidecars[0].Column[0])-1] // Remove one byte to create size mismatch
|
||||||
|
|
||||||
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(sidecars))
|
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||||
require.ErrorIs(t, err, peerdas.ErrMismatchLength)
|
require.ErrorIs(t, err, peerdas.ErrMismatchLength)
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -81,15 +80,14 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
|||||||
sidecars := generateRandomSidecars(t, seed, blobCount)
|
sidecars := generateRandomSidecars(t, seed, blobCount)
|
||||||
sidecars[0].Column[0][0]++ // It is OK to overflow
|
sidecars[0].Column[0][0]++ // It is OK to overflow
|
||||||
|
|
||||||
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(sidecars))
|
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||||
require.ErrorIs(t, err, peerdas.ErrInvalidKZGProof)
|
require.ErrorIs(t, err, peerdas.ErrInvalidKZGProof)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("nominal", func(t *testing.T) {
|
t.Run("nominal", func(t *testing.T) {
|
||||||
sidecars := generateRandomSidecars(t, seed, blobCount)
|
sidecars := generateRandomSidecars(t, seed, blobCount)
|
||||||
failedSegments, err := peerdas.BatchVerifyDataColumnsCellsKZGProofs(blobCount, []iter.Seq[blocks.CellProofBundle]{blocks.RODataColumnsToCellProofBundles(sidecars)})
|
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 0, len(failedSegments))
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -275,7 +273,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_SameCommitments_NoBatch(b *testin
|
|||||||
for _, sidecar := range sidecars {
|
for _, sidecar := range sidecars {
|
||||||
sidecars := []blocks.RODataColumn{sidecar}
|
sidecars := []blocks.RODataColumn{sidecar}
|
||||||
b.StartTimer()
|
b.StartTimer()
|
||||||
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(sidecars))
|
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
require.NoError(b, err)
|
require.NoError(b, err)
|
||||||
}
|
}
|
||||||
@@ -310,7 +308,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch(b *testing.
|
|||||||
}
|
}
|
||||||
|
|
||||||
b.StartTimer()
|
b.StartTimer()
|
||||||
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(allSidecars))
|
err := peerdas.VerifyDataColumnsSidecarKZGProofs(allSidecars)
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
require.NoError(b, err)
|
require.NoError(b, err)
|
||||||
}
|
}
|
||||||
@@ -343,7 +341,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch4(b *testing
|
|||||||
|
|
||||||
for _, sidecars := range allSidecars {
|
for _, sidecars := range allSidecars {
|
||||||
b.StartTimer()
|
b.StartTimer()
|
||||||
err := peerdas.VerifyDataColumnsCellsKZGProofs(len(allSidecars), blocks.RODataColumnsToCellProofBundles(sidecars))
|
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
require.NoError(b, err)
|
require.NoError(b, err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/OffchainLabs/go-bitfield"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
@@ -340,8 +339,7 @@ func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ComputeCellsAndProofsFromStructured computes the cells and proofs from blobs and cell proofs.
|
// ComputeCellsAndProofsFromStructured computes the cells and proofs from blobs and cell proofs.
|
||||||
// commitmentCount is required to return the correct sized bitlist even if we see a nil slice of blobsAndProofs.
|
func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([][]kzg.Cell, [][]kzg.Proof, error) {
|
||||||
func ComputeCellsAndProofsFromStructured(commitmentCount uint64, blobsAndProofs []*pb.BlobAndProofV2) (bitfield.Bitlist /* parts included */, [][]kzg.Cell, [][]kzg.Proof, error) {
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
defer func() {
|
defer func() {
|
||||||
cellsAndProofsFromStructuredComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
cellsAndProofsFromStructuredComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||||
@@ -349,24 +347,14 @@ func ComputeCellsAndProofsFromStructured(commitmentCount uint64, blobsAndProofs
|
|||||||
|
|
||||||
var wg errgroup.Group
|
var wg errgroup.Group
|
||||||
|
|
||||||
var blobsPresent int
|
cellsPerBlob := make([][]kzg.Cell, len(blobsAndProofs))
|
||||||
for _, blobAndProof := range blobsAndProofs {
|
proofsPerBlob := make([][]kzg.Proof, len(blobsAndProofs))
|
||||||
if blobAndProof != nil {
|
|
||||||
blobsPresent++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cellsPerBlob := make([][]kzg.Cell, blobsPresent)
|
|
||||||
proofsPerBlob := make([][]kzg.Proof, blobsPresent)
|
|
||||||
included := bitfield.NewBitlist(commitmentCount)
|
|
||||||
|
|
||||||
var j int
|
|
||||||
for i, blobAndProof := range blobsAndProofs {
|
for i, blobAndProof := range blobsAndProofs {
|
||||||
if blobAndProof == nil {
|
if blobAndProof == nil {
|
||||||
continue
|
return nil, nil, ErrNilBlobAndProof
|
||||||
}
|
}
|
||||||
included.SetBitAt(uint64(i), true)
|
|
||||||
|
|
||||||
compactIndex := j
|
|
||||||
wg.Go(func() error {
|
wg.Go(func() error {
|
||||||
var kzgBlob kzg.Blob
|
var kzgBlob kzg.Blob
|
||||||
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
||||||
@@ -393,18 +381,17 @@ func ComputeCellsAndProofsFromStructured(commitmentCount uint64, blobsAndProofs
|
|||||||
kzgProofs = append(kzgProofs, kzgProof)
|
kzgProofs = append(kzgProofs, kzgProof)
|
||||||
}
|
}
|
||||||
|
|
||||||
cellsPerBlob[compactIndex] = cells
|
cellsPerBlob[i] = cells
|
||||||
proofsPerBlob[compactIndex] = kzgProofs
|
proofsPerBlob[i] = kzgProofs
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
j++
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := wg.Wait(); err != nil {
|
if err := wg.Wait(); err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return included, cellsPerBlob, proofsPerBlob, nil
|
return cellsPerBlob, proofsPerBlob, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReconstructBlobs reconstructs blobs from data column sidecars without computing KZG proofs or creating sidecars.
|
// ReconstructBlobs reconstructs blobs from data column sidecars without computing KZG proofs or creating sidecars.
|
||||||
|
|||||||
@@ -479,9 +479,8 @@ func TestComputeCellsAndProofsFromFlat(t *testing.T) {
|
|||||||
|
|
||||||
func TestComputeCellsAndProofsFromStructured(t *testing.T) {
|
func TestComputeCellsAndProofsFromStructured(t *testing.T) {
|
||||||
t.Run("nil blob and proof", func(t *testing.T) {
|
t.Run("nil blob and proof", func(t *testing.T) {
|
||||||
included, _, _, err := peerdas.ComputeCellsAndProofsFromStructured(0, []*pb.BlobAndProofV2{nil})
|
_, _, err := peerdas.ComputeCellsAndProofsFromStructured([]*pb.BlobAndProofV2{nil})
|
||||||
require.NoError(t, err)
|
require.ErrorIs(t, err, peerdas.ErrNilBlobAndProof)
|
||||||
require.Equal(t, uint64(0), included.Count())
|
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("nominal", func(t *testing.T) {
|
t.Run("nominal", func(t *testing.T) {
|
||||||
@@ -534,8 +533,7 @@ func TestComputeCellsAndProofsFromStructured(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Test ComputeCellsAndProofs
|
// Test ComputeCellsAndProofs
|
||||||
included, actualCellsPerBlob, actualProofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(uint64(len(blobsAndProofs)), blobsAndProofs)
|
actualCellsPerBlob, actualProofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(blobsAndProofs)
|
||||||
require.Equal(t, included.Count(), uint64(len(actualCellsPerBlob)))
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, blobCount, len(actualCellsPerBlob))
|
require.Equal(t, blobCount, len(actualCellsPerBlob))
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package peerdas
|
|||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/OffchainLabs/go-bitfield"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||||
beaconState "github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
beaconState "github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||||
@@ -24,13 +23,11 @@ var (
|
|||||||
var (
|
var (
|
||||||
_ ConstructionPopulator = (*BlockReconstructionSource)(nil)
|
_ ConstructionPopulator = (*BlockReconstructionSource)(nil)
|
||||||
_ ConstructionPopulator = (*SidecarReconstructionSource)(nil)
|
_ ConstructionPopulator = (*SidecarReconstructionSource)(nil)
|
||||||
_ ConstructionPopulator = (*PartialDataColumnHeaderReconstructionSource)(nil)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
BlockType = "BeaconBlock"
|
BlockType = "BeaconBlock"
|
||||||
SidecarType = "DataColumnSidecar"
|
SidecarType = "DataColumnSidecar"
|
||||||
PartialDataColumnHeaderType = "PartialDataColumnHeader"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
@@ -57,10 +54,6 @@ type (
|
|||||||
blocks.VerifiedRODataColumn
|
blocks.VerifiedRODataColumn
|
||||||
}
|
}
|
||||||
|
|
||||||
PartialDataColumnHeaderReconstructionSource struct {
|
|
||||||
*ethpb.PartialDataColumnHeader
|
|
||||||
}
|
|
||||||
|
|
||||||
blockInfo struct {
|
blockInfo struct {
|
||||||
signedBlockHeader *ethpb.SignedBeaconBlockHeader
|
signedBlockHeader *ethpb.SignedBeaconBlockHeader
|
||||||
kzgCommitments [][]byte
|
kzgCommitments [][]byte
|
||||||
@@ -78,11 +71,6 @@ func PopulateFromSidecar(sidecar blocks.VerifiedRODataColumn) *SidecarReconstruc
|
|||||||
return &SidecarReconstructionSource{VerifiedRODataColumn: sidecar}
|
return &SidecarReconstructionSource{VerifiedRODataColumn: sidecar}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PopulateFromPartialHeader creates a PartialDataColumnHeaderReconstructionSource from a partial header
|
|
||||||
func PopulateFromPartialHeader(header *ethpb.PartialDataColumnHeader) *PartialDataColumnHeaderReconstructionSource {
|
|
||||||
return &PartialDataColumnHeaderReconstructionSource{PartialDataColumnHeader: header}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidatorsCustodyRequirement returns the number of custody groups regarding the validator indices attached to the beacon node.
|
// ValidatorsCustodyRequirement returns the number of custody groups regarding the validator indices attached to the beacon node.
|
||||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#validator-custody
|
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#validator-custody
|
||||||
func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validatorsIndex map[primitives.ValidatorIndex]bool) (uint64, error) {
|
func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validatorsIndex map[primitives.ValidatorIndex]bool) (uint64, error) {
|
||||||
@@ -155,40 +143,6 @@ func DataColumnSidecars(cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof,
|
|||||||
return roSidecars, nil
|
return roSidecars, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func PartialColumns(included bitfield.Bitlist, cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof, src ConstructionPopulator) ([]blocks.PartialDataColumn, error) {
|
|
||||||
start := time.Now()
|
|
||||||
const numberOfColumns = uint64(fieldparams.NumberOfColumns)
|
|
||||||
cells, proofs, err := rotateRowsToCols(cellsPerBlob, proofsPerBlob, numberOfColumns)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "rotate cells and proofs")
|
|
||||||
}
|
|
||||||
info, err := src.extract()
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "extract block info")
|
|
||||||
}
|
|
||||||
|
|
||||||
dataColumns := make([]blocks.PartialDataColumn, 0, numberOfColumns)
|
|
||||||
for idx := range numberOfColumns {
|
|
||||||
dc, err := blocks.NewPartialDataColumn(info.signedBlockHeader, idx, info.kzgCommitments, info.kzgInclusionProof)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "new ro data column")
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range len(info.kzgCommitments) {
|
|
||||||
if !included.BitAt(uint64(i)) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
dc.ExtendFromVerfifiedCell(uint64(i), cells[idx][0], proofs[idx][0])
|
|
||||||
cells[idx] = cells[idx][1:]
|
|
||||||
proofs[idx] = proofs[idx][1:]
|
|
||||||
}
|
|
||||||
dataColumns = append(dataColumns, dc)
|
|
||||||
}
|
|
||||||
|
|
||||||
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
|
||||||
return dataColumns, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Slot returns the slot of the source
|
// Slot returns the slot of the source
|
||||||
func (s *BlockReconstructionSource) Slot() primitives.Slot {
|
func (s *BlockReconstructionSource) Slot() primitives.Slot {
|
||||||
return s.Block().Slot()
|
return s.Block().Slot()
|
||||||
@@ -300,43 +254,3 @@ func (s *SidecarReconstructionSource) extract() (*blockInfo, error) {
|
|||||||
|
|
||||||
return info, nil
|
return info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Slot returns the slot from the partial data column header
|
|
||||||
func (p *PartialDataColumnHeaderReconstructionSource) Slot() primitives.Slot {
|
|
||||||
return p.SignedBlockHeader.Header.Slot
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root returns the block root computed from the header
|
|
||||||
func (p *PartialDataColumnHeaderReconstructionSource) Root() [fieldparams.RootLength]byte {
|
|
||||||
root, err := p.SignedBlockHeader.Header.HashTreeRoot()
|
|
||||||
if err != nil {
|
|
||||||
return [fieldparams.RootLength]byte{}
|
|
||||||
}
|
|
||||||
return root
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProposerIndex returns the proposer index from the header
|
|
||||||
func (p *PartialDataColumnHeaderReconstructionSource) ProposerIndex() primitives.ValidatorIndex {
|
|
||||||
return p.SignedBlockHeader.Header.ProposerIndex
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commitments returns the KZG commitments from the header
|
|
||||||
func (p *PartialDataColumnHeaderReconstructionSource) Commitments() ([][]byte, error) {
|
|
||||||
return p.KzgCommitments, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type of the source
|
|
||||||
func (p *PartialDataColumnHeaderReconstructionSource) Type() string {
|
|
||||||
return PartialDataColumnHeaderType
|
|
||||||
}
|
|
||||||
|
|
||||||
// extract extracts the block information from the partial header
|
|
||||||
func (p *PartialDataColumnHeaderReconstructionSource) extract() (*blockInfo, error) {
|
|
||||||
info := &blockInfo{
|
|
||||||
signedBlockHeader: p.SignedBlockHeader,
|
|
||||||
kzgCommitments: p.KzgCommitments,
|
|
||||||
kzgInclusionProof: p.KzgCommitmentsInclusionProof,
|
|
||||||
}
|
|
||||||
|
|
||||||
return info, nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -267,31 +267,4 @@ func TestReconstructionSource(t *testing.T) {
|
|||||||
|
|
||||||
require.Equal(t, peerdas.SidecarType, src.Type())
|
require.Equal(t, peerdas.SidecarType, src.Type())
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("from partial header", func(t *testing.T) {
|
|
||||||
referenceSidecar := sidecars[0]
|
|
||||||
partialHeader := ðpb.PartialDataColumnHeader{
|
|
||||||
SignedBlockHeader: referenceSidecar.SignedBlockHeader,
|
|
||||||
KzgCommitments: referenceSidecar.KzgCommitments,
|
|
||||||
KzgCommitmentsInclusionProof: referenceSidecar.KzgCommitmentsInclusionProof,
|
|
||||||
}
|
|
||||||
|
|
||||||
src := peerdas.PopulateFromPartialHeader(partialHeader)
|
|
||||||
require.Equal(t, referenceSidecar.SignedBlockHeader.Header.Slot, src.Slot())
|
|
||||||
|
|
||||||
// Compute expected root
|
|
||||||
expectedRoot, err := referenceSidecar.SignedBlockHeader.Header.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, expectedRoot, src.Root())
|
|
||||||
|
|
||||||
require.Equal(t, referenceSidecar.SignedBlockHeader.Header.ProposerIndex, src.ProposerIndex())
|
|
||||||
|
|
||||||
commitments, err := src.Commitments()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 2, len(commitments))
|
|
||||||
require.DeepEqual(t, commitment1, commitments[0])
|
|
||||||
require.DeepEqual(t, commitment2, commitments[1])
|
|
||||||
|
|
||||||
require.Equal(t, peerdas.PartialDataColumnHeaderType, src.Type())
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -67,6 +67,7 @@ func getSubscriptionStatusFromDB(t *testing.T, db *Store) bool {
|
|||||||
return subscribed
|
return subscribed
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
func TestUpdateCustodyInfo(t *testing.T) {
|
func TestUpdateCustodyInfo(t *testing.T) {
|
||||||
ctx := t.Context()
|
ctx := t.Context()
|
||||||
|
|
||||||
|
|||||||
@@ -73,7 +73,6 @@ go_library(
|
|||||||
"@com_github_pkg_errors//:go_default_library",
|
"@com_github_pkg_errors//:go_default_library",
|
||||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
|
||||||
"@com_github_sirupsen_logrus//:go_default_library",
|
"@com_github_sirupsen_logrus//:go_default_library",
|
||||||
"@io_k8s_client_go//tools/cache:go_default_library",
|
"@io_k8s_client_go//tools/cache:go_default_library",
|
||||||
"@org_golang_google_protobuf//proto:go_default_library",
|
"@org_golang_google_protobuf//proto:go_default_library",
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/OffchainLabs/go-bitfield"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/execution/types"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/execution/types"
|
||||||
@@ -59,7 +58,6 @@ var (
|
|||||||
fuluEngineEndpoints = []string{
|
fuluEngineEndpoints = []string{
|
||||||
GetPayloadMethodV5,
|
GetPayloadMethodV5,
|
||||||
GetBlobsV2,
|
GetBlobsV2,
|
||||||
GetBlobsV3,
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -101,8 +99,6 @@ const (
|
|||||||
GetBlobsV1 = "engine_getBlobsV1"
|
GetBlobsV1 = "engine_getBlobsV1"
|
||||||
// GetBlobsV2 request string for JSON-RPC.
|
// GetBlobsV2 request string for JSON-RPC.
|
||||||
GetBlobsV2 = "engine_getBlobsV2"
|
GetBlobsV2 = "engine_getBlobsV2"
|
||||||
// GetBlobsV3 request string for JSON-RPC.
|
|
||||||
GetBlobsV3 = "engine_getBlobsV3"
|
|
||||||
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
|
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
|
||||||
defaultEngineTimeout = time.Second
|
defaultEngineTimeout = time.Second
|
||||||
)
|
)
|
||||||
@@ -126,7 +122,7 @@ type Reconstructor interface {
|
|||||||
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
|
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
|
||||||
) ([]interfaces.SignedBeaconBlock, error)
|
) ([]interfaces.SignedBeaconBlock, error)
|
||||||
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
|
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
|
||||||
ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, []blocks.PartialDataColumn, error)
|
ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// EngineCaller defines a client that can interact with an Ethereum
|
// EngineCaller defines a client that can interact with an Ethereum
|
||||||
@@ -557,22 +553,6 @@ func (s *Service) GetBlobsV2(ctx context.Context, versionedHashes []common.Hash)
|
|||||||
return result, handleRPCError(err)
|
return result, handleRPCError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) GetBlobsV3(ctx context.Context, versionedHashes []common.Hash) ([]*pb.BlobAndProofV2, error) {
|
|
||||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobsV3")
|
|
||||||
defer span.End()
|
|
||||||
start := time.Now()
|
|
||||||
|
|
||||||
if !s.capabilityCache.has(GetBlobsV3) {
|
|
||||||
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV3))
|
|
||||||
}
|
|
||||||
|
|
||||||
getBlobsV3RequestsTotal.Inc()
|
|
||||||
result := make([]*pb.BlobAndProofV2, len(versionedHashes))
|
|
||||||
err := s.rpcClient.CallContext(ctx, &result, GetBlobsV3, versionedHashes)
|
|
||||||
getBlobsV3Latency.Observe(time.Since(start).Seconds())
|
|
||||||
return result, handleRPCError(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReconstructFullBlock takes in a blinded beacon block and reconstructs
|
// ReconstructFullBlock takes in a blinded beacon block and reconstructs
|
||||||
// a beacon block with a full execution payload via the engine API.
|
// a beacon block with a full execution payload via the engine API.
|
||||||
func (s *Service) ReconstructFullBlock(
|
func (s *Service) ReconstructFullBlock(
|
||||||
@@ -683,47 +663,40 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
|||||||
return verifiedBlobs, nil
|
return verifiedBlobs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, []blocks.PartialDataColumn, error) {
|
func (s *Service) ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error) {
|
||||||
root := populator.Root()
|
root := populator.Root()
|
||||||
|
|
||||||
// Fetch cells and proofs from the execution client using the KZG commitments from the sidecar.
|
// Fetch cells and proofs from the execution client using the KZG commitments from the sidecar.
|
||||||
commitments, err := populator.Commitments()
|
commitments, err := populator.Commitments()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, wrapWithBlockRoot(err, root, "commitments")
|
return nil, wrapWithBlockRoot(err, root, "commitments")
|
||||||
}
|
}
|
||||||
|
|
||||||
included, cellsPerBlob, proofsPerBlob, err := s.fetchCellsAndProofsFromExecution(ctx, commitments)
|
cellsPerBlob, proofsPerBlob, err := s.fetchCellsAndProofsFromExecution(ctx, commitments)
|
||||||
log.Info("Received cells and proofs from execution client", "included", included, "cells count", len(cellsPerBlob), "err", err)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, wrapWithBlockRoot(err, root, "fetch cells and proofs from execution client")
|
return nil, wrapWithBlockRoot(err, root, "fetch cells and proofs from execution client")
|
||||||
}
|
}
|
||||||
|
|
||||||
partialColumns, err := peerdas.PartialColumns(included, cellsPerBlob, proofsPerBlob, populator)
|
// Return early if nothing is returned from the EL.
|
||||||
haveAllBlobs := included.Count() == uint64(len(commitments))
|
if len(cellsPerBlob) == 0 {
|
||||||
log.Info("Constructed partial columns", "haveAllBlobs", haveAllBlobs)
|
return nil, nil
|
||||||
|
|
||||||
if haveAllBlobs {
|
|
||||||
// Construct data column sidears from the signed block and cells and proofs.
|
|
||||||
roSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, populator)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, wrapWithBlockRoot(err, populator.Root(), "data column sidcars from column sidecar")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Upgrade the sidecars to verified sidecars.
|
|
||||||
// We trust the execution layer we are connected to, so we can upgrade the sidecar into a verified one.
|
|
||||||
verifiedROSidecars := upgradeSidecarsToVerifiedSidecars(roSidecars)
|
|
||||||
|
|
||||||
return verifiedROSidecars, partialColumns, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Construct data column sidears from the signed block and cells and proofs.
|
||||||
|
roSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, populator)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, wrapWithBlockRoot(err, populator.Root(), "partial columns from column sidecar")
|
return nil, wrapWithBlockRoot(err, populator.Root(), "data column sidcars from column sidecar")
|
||||||
}
|
}
|
||||||
return nil, partialColumns, nil
|
|
||||||
|
// Upgrade the sidecars to verified sidecars.
|
||||||
|
// We trust the execution layer we are connected to, so we can upgrade the sidecar into a verified one.
|
||||||
|
verifiedROSidecars := upgradeSidecarsToVerifiedSidecars(roSidecars)
|
||||||
|
|
||||||
|
return verifiedROSidecars, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// fetchCellsAndProofsFromExecution fetches cells and proofs from the execution client (using engine_getBlobsV2 execution API method)
|
// fetchCellsAndProofsFromExecution fetches cells and proofs from the execution client (using engine_getBlobsV2 execution API method)
|
||||||
func (s *Service) fetchCellsAndProofsFromExecution(ctx context.Context, kzgCommitments [][]byte) (bitfield.Bitlist /* included parts */, [][]kzg.Cell, [][]kzg.Proof, error) {
|
func (s *Service) fetchCellsAndProofsFromExecution(ctx context.Context, kzgCommitments [][]byte) ([][]kzg.Cell, [][]kzg.Proof, error) {
|
||||||
// Collect KZG hashes for all blobs.
|
// Collect KZG hashes for all blobs.
|
||||||
versionedHashes := make([]common.Hash, 0, len(kzgCommitments))
|
versionedHashes := make([]common.Hash, 0, len(kzgCommitments))
|
||||||
for _, commitment := range kzgCommitments {
|
for _, commitment := range kzgCommitments {
|
||||||
@@ -731,34 +704,24 @@ func (s *Service) fetchCellsAndProofsFromExecution(ctx context.Context, kzgCommi
|
|||||||
versionedHashes = append(versionedHashes, versionedHash)
|
versionedHashes = append(versionedHashes, versionedHash)
|
||||||
}
|
}
|
||||||
|
|
||||||
var blobAndProofs []*pb.BlobAndProofV2
|
|
||||||
|
|
||||||
// Fetch all blobsAndCellsProofs from the execution client.
|
// Fetch all blobsAndCellsProofs from the execution client.
|
||||||
var err error
|
blobAndProofV2s, err := s.GetBlobsV2(ctx, versionedHashes)
|
||||||
useV3 := s.capabilityCache.has(GetBlobsV3)
|
if err != nil {
|
||||||
if useV3 {
|
return nil, nil, errors.Wrapf(err, "get blobs V2")
|
||||||
// v3 can return a partial response. V2 is all or nothing
|
|
||||||
blobAndProofs, err = s.GetBlobsV3(ctx, versionedHashes)
|
|
||||||
} else {
|
|
||||||
blobAndProofs, err = s.GetBlobsV2(ctx, versionedHashes)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
// Return early if nothing is returned from the EL.
|
||||||
return nil, nil, nil, errors.Wrapf(err, "get blobs V2/3")
|
if len(blobAndProofV2s) == 0 {
|
||||||
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compute cells and proofs from the blobs and cell proofs.
|
// Compute cells and proofs from the blobs and cell proofs.
|
||||||
included, cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(uint64(len(kzgCommitments)), blobAndProofs)
|
cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(blobAndProofV2s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, errors.Wrap(err, "compute cells and proofs")
|
return nil, nil, errors.Wrap(err, "compute cells and proofs")
|
||||||
}
|
|
||||||
if included.Count() == uint64(len(kzgCommitments)) {
|
|
||||||
getBlobsV3CompleteResponsesTotal.Inc()
|
|
||||||
} else if included.Count() > 0 {
|
|
||||||
getBlobsV3PartialResponsesTotal.Inc()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return included, cellsPerBlob, proofsPerBlob, nil
|
return cellsPerBlob, proofsPerBlob, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// upgradeSidecarsToVerifiedSidecars upgrades a list of data column sidecars into verified data column sidecars.
|
// upgradeSidecarsToVerifiedSidecars upgrades a list of data column sidecars into verified data column sidecars.
|
||||||
|
|||||||
@@ -2587,7 +2587,7 @@ func TestConstructDataColumnSidecars(t *testing.T) {
|
|||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
t.Run("GetBlobsV2 is not supported", func(t *testing.T) {
|
t.Run("GetBlobsV2 is not supported", func(t *testing.T) {
|
||||||
_, _, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
_, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||||
require.ErrorContains(t, "engine_getBlobsV2 is not supported", err)
|
require.ErrorContains(t, "engine_getBlobsV2 is not supported", err)
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -2598,7 +2598,7 @@ func TestConstructDataColumnSidecars(t *testing.T) {
|
|||||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||||
defer rpcClient.Close()
|
defer rpcClient.Close()
|
||||||
|
|
||||||
dataColumns, _, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 0, len(dataColumns))
|
require.Equal(t, 0, len(dataColumns))
|
||||||
})
|
})
|
||||||
@@ -2611,7 +2611,7 @@ func TestConstructDataColumnSidecars(t *testing.T) {
|
|||||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||||
defer rpcClient.Close()
|
defer rpcClient.Close()
|
||||||
|
|
||||||
dataColumns, _, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 128, len(dataColumns))
|
require.Equal(t, 128, len(dataColumns))
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -34,25 +34,6 @@ var (
|
|||||||
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
getBlobsV3RequestsTotal = promauto.NewCounter(prometheus.CounterOpts{
|
|
||||||
Name: "beacon_engine_getBlobsV3_requests_total",
|
|
||||||
Help: "Total number of engine_getBlobsV3 requests sent",
|
|
||||||
})
|
|
||||||
getBlobsV3CompleteResponsesTotal = promauto.NewCounter(prometheus.CounterOpts{
|
|
||||||
Name: "beacon_engine_getBlobsV3_complete_responses_total",
|
|
||||||
Help: "Total number of complete engine_getBlobsV3 successful responses received",
|
|
||||||
})
|
|
||||||
getBlobsV3PartialResponsesTotal = promauto.NewCounter(prometheus.CounterOpts{
|
|
||||||
Name: "beacon_engine_getBlobsV3_partial_responses_total",
|
|
||||||
Help: "Total number of engine_getBlobsV3 partial responses received",
|
|
||||||
})
|
|
||||||
getBlobsV3Latency = promauto.NewHistogram(
|
|
||||||
prometheus.HistogramOpts{
|
|
||||||
Name: "beacon_engine_getBlobsV3_request_duration_seconds",
|
|
||||||
Help: "Duration of engine_getBlobsV3 requests in seconds",
|
|
||||||
Buckets: []float64{0.025, 0.05, 0.1, 0.2, 0.5, 1, 2, 4},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
errParseCount = promauto.NewCounter(prometheus.CounterOpts{
|
errParseCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||||
Name: "execution_parse_error_count",
|
Name: "execution_parse_error_count",
|
||||||
Help: "The number of errors that occurred while parsing execution payload",
|
Help: "The number of errors that occurred while parsing execution payload",
|
||||||
|
|||||||
@@ -118,8 +118,8 @@ func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadO
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ConstructDataColumnSidecars is a mock implementation of the ConstructDataColumnSidecars method.
|
// ConstructDataColumnSidecars is a mock implementation of the ConstructDataColumnSidecars method.
|
||||||
func (e *EngineClient) ConstructDataColumnSidecars(context.Context, peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, []blocks.PartialDataColumn, error) {
|
func (e *EngineClient) ConstructDataColumnSidecars(context.Context, peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error) {
|
||||||
return e.DataColumnSidecars, nil, e.ErrorDataColumnSidecars
|
return e.DataColumnSidecars, e.ErrorDataColumnSidecars
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTerminalBlockHash --
|
// GetTerminalBlockHash --
|
||||||
|
|||||||
@@ -134,20 +134,10 @@ type BeaconNode struct {
|
|||||||
|
|
||||||
// New creates a new node instance, sets up configuration options, and registers
|
// New creates a new node instance, sets up configuration options, and registers
|
||||||
// every required service to the node.
|
// every required service to the node.
|
||||||
func New(cliCtx *cli.Context, cancel context.CancelFunc, optFuncs []func(*cli.Context) ([]Option, error), opts ...Option) (*BeaconNode, error) {
|
func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*BeaconNode, error) {
|
||||||
if err := configureBeacon(cliCtx); err != nil {
|
if err := configureBeacon(cliCtx); err != nil {
|
||||||
return nil, errors.Wrap(err, "could not set beacon configuration options")
|
return nil, errors.Wrap(err, "could not set beacon configuration options")
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, of := range optFuncs {
|
|
||||||
ofo, err := of(cliCtx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if ofo != nil {
|
|
||||||
opts = append(opts, ofo...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ctx := cliCtx.Context
|
ctx := cliCtx.Context
|
||||||
|
|
||||||
beacon := &BeaconNode{
|
beacon := &BeaconNode{
|
||||||
@@ -678,7 +668,6 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
|||||||
DB: b.db,
|
DB: b.db,
|
||||||
StateGen: b.stateGen,
|
StateGen: b.stateGen,
|
||||||
ClockWaiter: b.ClockWaiter,
|
ClockWaiter: b.ClockWaiter,
|
||||||
PartialDataColumns: b.cliCtx.Bool(flags.PartialDataColumns.Name),
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ func TestNodeClose_OK(t *testing.T) {
|
|||||||
WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
|
WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
|
||||||
}
|
}
|
||||||
|
|
||||||
node, err := New(ctx, cancel, nil, options...)
|
node, err := New(ctx, cancel, options...)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
node.Close()
|
node.Close()
|
||||||
@@ -87,7 +87,7 @@ func TestNodeStart_Ok(t *testing.T) {
|
|||||||
WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
|
WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
|
||||||
}
|
}
|
||||||
|
|
||||||
node, err := New(ctx, cancel, nil, options...)
|
node, err := New(ctx, cancel, options...)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotNil(t, node.lcStore)
|
require.NotNil(t, node.lcStore)
|
||||||
node.services = &runtime.ServiceRegistry{}
|
node.services = &runtime.ServiceRegistry{}
|
||||||
@@ -116,7 +116,7 @@ func TestNodeStart_SyncChecker(t *testing.T) {
|
|||||||
WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
|
WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
|
||||||
}
|
}
|
||||||
|
|
||||||
node, err := New(ctx, cancel, nil, options...)
|
node, err := New(ctx, cancel, options...)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
go func() {
|
go func() {
|
||||||
node.Start()
|
node.Start()
|
||||||
@@ -151,7 +151,7 @@ func TestClearDB(t *testing.T) {
|
|||||||
WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
|
WithDataColumnStorage(filesystem.NewEphemeralDataColumnStorage(t)),
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = New(context, cancel, nil, options...)
|
_, err = New(context, cancel, options...)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.LogsContain(t, hook, "Removing database")
|
require.LogsContain(t, hook, "Removing database")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -52,7 +52,6 @@ go_library(
|
|||||||
"//beacon-chain/db:go_default_library",
|
"//beacon-chain/db:go_default_library",
|
||||||
"//beacon-chain/db/kv:go_default_library",
|
"//beacon-chain/db/kv:go_default_library",
|
||||||
"//beacon-chain/p2p/encoder:go_default_library",
|
"//beacon-chain/p2p/encoder:go_default_library",
|
||||||
"//beacon-chain/p2p/partialdatacolumnbroadcaster:go_default_library",
|
|
||||||
"//beacon-chain/p2p/peers:go_default_library",
|
"//beacon-chain/p2p/peers:go_default_library",
|
||||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||||
|
|||||||
@@ -343,7 +343,7 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
|||||||
// there is at least one peer in each needed subnet. If not, it will attempt to find one before broadcasting.
|
// there is at least one peer in each needed subnet. If not, it will attempt to find one before broadcasting.
|
||||||
// This function is non-blocking. It stops trying to broadcast a given sidecar when more than one slot has passed, or the context is
|
// This function is non-blocking. It stops trying to broadcast a given sidecar when more than one slot has passed, or the context is
|
||||||
// cancelled (whichever comes first).
|
// cancelled (whichever comes first).
|
||||||
func (s *Service) BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn, partialColumns []blocks.PartialDataColumn) error {
|
func (s *Service) BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn) error {
|
||||||
// Increase the number of broadcast attempts.
|
// Increase the number of broadcast attempts.
|
||||||
dataColumnSidecarBroadcastAttempts.Add(float64(len(sidecars)))
|
dataColumnSidecarBroadcastAttempts.Add(float64(len(sidecars)))
|
||||||
|
|
||||||
@@ -353,15 +353,16 @@ func (s *Service) BroadcastDataColumnSidecars(ctx context.Context, sidecars []bl
|
|||||||
return errors.Wrap(err, "current fork digest")
|
return errors.Wrap(err, "current fork digest")
|
||||||
}
|
}
|
||||||
|
|
||||||
go s.broadcastDataColumnSidecars(ctx, forkDigest, sidecars, partialColumns)
|
go s.broadcastDataColumnSidecars(ctx, forkDigest, sidecars)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// broadcastDataColumnSidecars broadcasts multiple data column sidecars to the p2p network, after ensuring
|
// broadcastDataColumnSidecars broadcasts multiple data column sidecars to the p2p network.
|
||||||
// there is at least one peer in each needed subnet. If not, it will attempt to find one before broadcasting.
|
// For sidecars with available peers, it uses batch publishing.
|
||||||
// It returns when all broadcasts are complete, or the context is cancelled (whichever comes first).
|
// For sidecars without peers, it finds peers first and then publishes individually.
|
||||||
func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [fieldparams.VersionLength]byte, sidecars []blocks.VerifiedRODataColumn, partialColumns []blocks.PartialDataColumn) {
|
// Both paths run in parallel. It returns when all broadcasts are complete, or the context is cancelled.
|
||||||
|
func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [fieldparams.VersionLength]byte, sidecars []blocks.VerifiedRODataColumn) {
|
||||||
type rootAndIndex struct {
|
type rootAndIndex struct {
|
||||||
root [fieldparams.RootLength]byte
|
root [fieldparams.RootLength]byte
|
||||||
index uint64
|
index uint64
|
||||||
@@ -371,8 +372,8 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
|||||||
logLevel := logrus.GetLevel()
|
logLevel := logrus.GetLevel()
|
||||||
slotPerRoot := make(map[[fieldparams.RootLength]byte]primitives.Slot, 1)
|
slotPerRoot := make(map[[fieldparams.RootLength]byte]primitives.Slot, 1)
|
||||||
|
|
||||||
topicFunc := func(dcIndex uint64) (topic string, wrappedSubIdx uint64, subnet uint64) {
|
topicFunc := func(sidecar blocks.VerifiedRODataColumn) (topic string, wrappedSubIdx uint64, subnet uint64) {
|
||||||
subnet = peerdas.ComputeSubnetForDataColumnSidecar(dcIndex)
|
subnet = peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
|
||||||
topic = dataColumnSubnetToTopic(subnet, forkDigest)
|
topic = dataColumnSubnetToTopic(subnet, forkDigest)
|
||||||
wrappedSubIdx = subnet + dataColumnSubnetVal
|
wrappedSubIdx = subnet + dataColumnSubnetVal
|
||||||
return
|
return
|
||||||
@@ -385,7 +386,7 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
|||||||
for _, sidecar := range sidecars {
|
for _, sidecar := range sidecars {
|
||||||
slotPerRoot[sidecar.BlockRoot()] = sidecar.Slot()
|
slotPerRoot[sidecar.BlockRoot()] = sidecar.Slot()
|
||||||
|
|
||||||
topic, wrappedSubIdx, _ := topicFunc(sidecar.Index)
|
topic, wrappedSubIdx, _ := topicFunc(sidecar)
|
||||||
// Check if we have a peer for this subnet (use RLock for read-only check).
|
// Check if we have a peer for this subnet (use RLock for read-only check).
|
||||||
mu := s.subnetLocker(wrappedSubIdx)
|
mu := s.subnetLocker(wrappedSubIdx)
|
||||||
mu.RLock()
|
mu.RLock()
|
||||||
@@ -410,7 +411,7 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
|||||||
ctx := trace.NewContext(s.ctx, span)
|
ctx := trace.NewContext(s.ctx, span)
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
topic, _, _ := topicFunc(sidecar.Index)
|
topic, _, _ := topicFunc(sidecar)
|
||||||
|
|
||||||
if err := s.batchObject(ctx, &messageBatch, sidecar, topic); err != nil {
|
if err := s.batchObject(ctx, &messageBatch, sidecar, topic); err != nil {
|
||||||
tracing.AnnotateError(span, err)
|
tracing.AnnotateError(span, err)
|
||||||
@@ -418,10 +419,6 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Increase the number of successful broadcasts.
|
|
||||||
dataColumnSidecarBroadcasts.Inc()
|
|
||||||
|
|
||||||
// Record the timing for log purposes.
|
|
||||||
if logLevel >= logrus.DebugLevel {
|
if logLevel >= logrus.DebugLevel {
|
||||||
root := sidecar.BlockRoot()
|
root := sidecar.BlockRoot()
|
||||||
timings.Store(rootAndIndex{root: root, index: sidecar.Index}, time.Now())
|
timings.Store(rootAndIndex{root: root, index: sidecar.Index}, time.Now())
|
||||||
@@ -436,7 +433,7 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
|||||||
ctx := trace.NewContext(s.ctx, span)
|
ctx := trace.NewContext(s.ctx, span)
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
topic, wrappedSubIdx, subnet := topicFunc(sidecar.Index)
|
topic, wrappedSubIdx, subnet := topicFunc(sidecar)
|
||||||
|
|
||||||
// Find peers for this sidecar's subnet.
|
// Find peers for this sidecar's subnet.
|
||||||
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, subnet); err != nil {
|
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, subnet); err != nil {
|
||||||
@@ -461,32 +458,6 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.partialColumnBroadcaster != nil {
|
|
||||||
// Note: There is not batch publish for partial columns.
|
|
||||||
for _, partialColumn := range partialColumns {
|
|
||||||
individualWg.Go(func() {
|
|
||||||
_, span := trace.StartSpan(ctx, "p2p.broadcastPartialDataColumn")
|
|
||||||
ctx := trace.NewContext(s.ctx, span)
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
topic, wrappedSubIdx, subnet := topicFunc(partialColumn.Index)
|
|
||||||
|
|
||||||
// Find peers for this sidecar's subnet.
|
|
||||||
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, subnet); err != nil {
|
|
||||||
tracing.AnnotateError(span, err)
|
|
||||||
log.WithError(err).Error("Cannot find peers if needed")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
fullTopicStr := topic + s.Encoding().ProtocolSuffix()
|
|
||||||
if err := s.partialColumnBroadcaster.Publish(fullTopicStr, partialColumn); err != nil {
|
|
||||||
tracing.AnnotateError(span, err)
|
|
||||||
log.WithError(err).Error("Cannot partial broadcast data column sidecar")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for batch to be populated, then publish.
|
// Wait for batch to be populated, then publish.
|
||||||
batchWg.Wait()
|
batchWg.Wait()
|
||||||
if len(sidecarsWithPeers) > 0 {
|
if len(sidecarsWithPeers) > 0 {
|
||||||
|
|||||||
@@ -803,7 +803,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
|||||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||||
|
|
||||||
// Broadcast to peers and wait.
|
// Broadcast to peers and wait.
|
||||||
err = service.BroadcastDataColumnSidecars(ctx, []blocks.VerifiedRODataColumn{verifiedRoSidecar}, nil)
|
err = service.BroadcastDataColumnSidecars(ctx, []blocks.VerifiedRODataColumn{verifiedRoSidecar})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Receive the message.
|
// Receive the message.
|
||||||
@@ -867,7 +867,7 @@ func (*rpcOrderTracer) DeliverMessage(*pubsub.Message) {}
|
|||||||
func (*rpcOrderTracer) RejectMessage(*pubsub.Message, string) {}
|
func (*rpcOrderTracer) RejectMessage(*pubsub.Message, string) {}
|
||||||
func (*rpcOrderTracer) DuplicateMessage(*pubsub.Message) {}
|
func (*rpcOrderTracer) DuplicateMessage(*pubsub.Message) {}
|
||||||
func (*rpcOrderTracer) ThrottlePeer(peer.ID) {}
|
func (*rpcOrderTracer) ThrottlePeer(peer.ID) {}
|
||||||
func (*rpcOrderTracer) RecvRPC(*pubsub.RPC, peer.ID) {}
|
func (*rpcOrderTracer) RecvRPC(*pubsub.RPC) {}
|
||||||
func (*rpcOrderTracer) DropRPC(*pubsub.RPC, peer.ID) {}
|
func (*rpcOrderTracer) DropRPC(*pubsub.RPC, peer.ID) {}
|
||||||
func (*rpcOrderTracer) UndeliverableMessage(*pubsub.Message) {}
|
func (*rpcOrderTracer) UndeliverableMessage(*pubsub.Message) {}
|
||||||
|
|
||||||
@@ -969,7 +969,7 @@ func TestService_BroadcastDataColumnRoundRobin(t *testing.T) {
|
|||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|
||||||
// Broadcast all sidecars.
|
// Broadcast all sidecars.
|
||||||
err = service.BroadcastDataColumnSidecars(ctx, verifiedRoSidecars, nil)
|
err = service.BroadcastDataColumnSidecars(ctx, verifiedRoSidecars)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// Give some time for messages to be sent.
|
// Give some time for messages to be sent.
|
||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|||||||
@@ -26,7 +26,6 @@ const (
|
|||||||
// Config for the p2p service. These parameters are set from application level flags
|
// Config for the p2p service. These parameters are set from application level flags
|
||||||
// to initialize the p2p service.
|
// to initialize the p2p service.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
PartialDataColumns bool
|
|
||||||
NoDiscovery bool
|
NoDiscovery bool
|
||||||
EnableUPnP bool
|
EnableUPnP bool
|
||||||
StaticPeerID bool
|
StaticPeerID bool
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||||
@@ -29,7 +28,6 @@ type (
|
|||||||
Broadcaster
|
Broadcaster
|
||||||
SetStreamHandler
|
SetStreamHandler
|
||||||
PubSubProvider
|
PubSubProvider
|
||||||
PartialColumnBroadcasterProvider
|
|
||||||
PubSubTopicUser
|
PubSubTopicUser
|
||||||
SenderEncoder
|
SenderEncoder
|
||||||
PeerManager
|
PeerManager
|
||||||
@@ -54,7 +52,7 @@ type (
|
|||||||
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
|
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
|
||||||
BroadcastLightClientOptimisticUpdate(ctx context.Context, update interfaces.LightClientOptimisticUpdate) error
|
BroadcastLightClientOptimisticUpdate(ctx context.Context, update interfaces.LightClientOptimisticUpdate) error
|
||||||
BroadcastLightClientFinalityUpdate(ctx context.Context, update interfaces.LightClientFinalityUpdate) error
|
BroadcastLightClientFinalityUpdate(ctx context.Context, update interfaces.LightClientFinalityUpdate) error
|
||||||
BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn, partialColumns []blocks.PartialDataColumn) error
|
BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||||
@@ -94,11 +92,6 @@ type (
|
|||||||
PubSub() *pubsub.PubSub
|
PubSub() *pubsub.PubSub
|
||||||
}
|
}
|
||||||
|
|
||||||
// PubSubProvider provides the p2p pubsub protocol.
|
|
||||||
PartialColumnBroadcasterProvider interface {
|
|
||||||
PartialColumnBroadcaster() *partialdatacolumnbroadcaster.PartialColumnBroadcaster
|
|
||||||
}
|
|
||||||
|
|
||||||
// PeerManager abstracts some peer management methods from libp2p.
|
// PeerManager abstracts some peer management methods from libp2p.
|
||||||
PeerManager interface {
|
PeerManager interface {
|
||||||
Disconnect(peer.ID) error
|
Disconnect(peer.ID) error
|
||||||
|
|||||||
@@ -157,11 +157,6 @@ var (
|
|||||||
Help: "The number of publish messages received via rpc for a particular topic",
|
Help: "The number of publish messages received via rpc for a particular topic",
|
||||||
},
|
},
|
||||||
[]string{"topic"})
|
[]string{"topic"})
|
||||||
pubsubRPCPubRecvSize = promauto.NewCounterVec(prometheus.CounterOpts{
|
|
||||||
Name: "p2p_pubsub_rpc_recv_pub_size_total",
|
|
||||||
Help: "The total size of publish messages received via rpc for a particular topic",
|
|
||||||
},
|
|
||||||
[]string{"topic", "is_partial"})
|
|
||||||
pubsubRPCDrop = promauto.NewCounterVec(prometheus.CounterOpts{
|
pubsubRPCDrop = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||||
Name: "p2p_pubsub_rpc_drop_total",
|
Name: "p2p_pubsub_rpc_drop_total",
|
||||||
Help: "The number of messages dropped via rpc for a particular control message",
|
Help: "The number of messages dropped via rpc for a particular control message",
|
||||||
@@ -176,11 +171,6 @@ var (
|
|||||||
Help: "The number of publish messages dropped via rpc for a particular topic",
|
Help: "The number of publish messages dropped via rpc for a particular topic",
|
||||||
},
|
},
|
||||||
[]string{"topic"})
|
[]string{"topic"})
|
||||||
pubsubRPCPubDropSize = promauto.NewCounterVec(prometheus.CounterOpts{
|
|
||||||
Name: "p2p_pubsub_rpc_drop_pub_size_total",
|
|
||||||
Help: "The total size of publish messages dropped via rpc for a particular topic",
|
|
||||||
},
|
|
||||||
[]string{"topic", "is_partial"})
|
|
||||||
pubsubRPCSent = promauto.NewCounterVec(prometheus.CounterOpts{
|
pubsubRPCSent = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||||
Name: "p2p_pubsub_rpc_sent_total",
|
Name: "p2p_pubsub_rpc_sent_total",
|
||||||
Help: "The number of messages sent via rpc for a particular control message",
|
Help: "The number of messages sent via rpc for a particular control message",
|
||||||
@@ -195,16 +185,6 @@ var (
|
|||||||
Help: "The number of publish messages sent via rpc for a particular topic",
|
Help: "The number of publish messages sent via rpc for a particular topic",
|
||||||
},
|
},
|
||||||
[]string{"topic"})
|
[]string{"topic"})
|
||||||
pubsubRPCPubSentSize = promauto.NewCounterVec(prometheus.CounterOpts{
|
|
||||||
Name: "gossipsub_pubsub_rpc_sent_pub_size_total",
|
|
||||||
Help: "The total size of publish messages sent via rpc for a particular topic",
|
|
||||||
},
|
|
||||||
[]string{"topic", "is_partial"})
|
|
||||||
pubsubMeshPeers = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
|
||||||
Name: "gossipsub_mesh_peers",
|
|
||||||
Help: "The number of capable peers in mesh",
|
|
||||||
},
|
|
||||||
[]string{"topic", "supports_partial"})
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (s *Service) updateMetrics() {
|
func (s *Service) updateMetrics() {
|
||||||
|
|||||||
@@ -1,28 +0,0 @@
|
|||||||
load("@prysm//tools/go:def.bzl", "go_library")
|
|
||||||
|
|
||||||
go_library(
|
|
||||||
name = "go_default_library",
|
|
||||||
srcs = [
|
|
||||||
"log.go",
|
|
||||||
"metrics.go",
|
|
||||||
"partial.go",
|
|
||||||
],
|
|
||||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster",
|
|
||||||
visibility = ["//visibility:public"],
|
|
||||||
deps = [
|
|
||||||
"//config/params:go_default_library",
|
|
||||||
"//consensus-types/blocks:go_default_library",
|
|
||||||
"//internal/logrusadapter:go_default_library",
|
|
||||||
"//proto/prysm/v1alpha1:go_default_library",
|
|
||||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
|
||||||
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
|
|
||||||
"@com_github_libp2p_go_libp2p_pubsub//partialmessages:go_default_library",
|
|
||||||
"@com_github_libp2p_go_libp2p_pubsub//partialmessages/bitmap:go_default_library",
|
|
||||||
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
|
|
||||||
"@com_github_pkg_errors//:go_default_library",
|
|
||||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
|
||||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
|
||||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
|
||||||
"@com_github_sirupsen_logrus//:go_default_library",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
@@ -1,25 +0,0 @@
|
|||||||
load("@prysm//tools/go:def.bzl", "go_test")
|
|
||||||
|
|
||||||
go_test(
|
|
||||||
name = "go_default_test",
|
|
||||||
size = "medium",
|
|
||||||
srcs = ["two_node_test.go"],
|
|
||||||
deps = [
|
|
||||||
"//beacon-chain/core/peerdas:go_default_library",
|
|
||||||
"//beacon-chain/p2p:go_default_library",
|
|
||||||
"//beacon-chain/p2p/encoder:go_default_library",
|
|
||||||
"//beacon-chain/p2p/partialdatacolumnbroadcaster:go_default_library",
|
|
||||||
"//config/fieldparams:go_default_library",
|
|
||||||
"//config/params:go_default_library",
|
|
||||||
"//consensus-types/blocks:go_default_library",
|
|
||||||
"//proto/prysm/v1alpha1:go_default_library",
|
|
||||||
"//testing/assert:go_default_library",
|
|
||||||
"//testing/require:go_default_library",
|
|
||||||
"//testing/util:go_default_library",
|
|
||||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
|
||||||
"@com_github_libp2p_go_libp2p//x/simlibp2p:go_default_library",
|
|
||||||
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
|
|
||||||
"@com_github_marcopolo_simnet//:go_default_library",
|
|
||||||
"@com_github_sirupsen_logrus//:go_default_library",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
@@ -1,239 +0,0 @@
|
|||||||
package integrationtest
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/rand"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
"testing/synctest"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
|
|
||||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
|
||||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
|
||||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
|
||||||
simlibp2p "github.com/libp2p/go-libp2p/x/simlibp2p"
|
|
||||||
"github.com/marcopolo/simnet"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestTwoNodePartialColumnExchange tests that two nodes can exchange partial columns
|
|
||||||
// and reconstruct the complete column. Node 1 has cells 0-2, Node 2 has cells 3-5.
|
|
||||||
// After exchange, both should have all cells.
|
|
||||||
func TestTwoNodePartialColumnExchange(t *testing.T) {
|
|
||||||
synctest.Test(t, func(t *testing.T) {
|
|
||||||
// Create a simulated libp2p network
|
|
||||||
latency := time.Millisecond * 10
|
|
||||||
network, meta, err := simlibp2p.SimpleLibp2pNetwork([]simlibp2p.NodeLinkSettingsAndCount{
|
|
||||||
{LinkSettings: simnet.NodeBiDiLinkSettings{
|
|
||||||
Downlink: simnet.LinkSettings{BitsPerSecond: 20 * simlibp2p.OneMbps, Latency: latency / 2},
|
|
||||||
Uplink: simnet.LinkSettings{BitsPerSecond: 20 * simlibp2p.OneMbps, Latency: latency / 2},
|
|
||||||
}, Count: 2},
|
|
||||||
}, simlibp2p.NetworkSettings{UseBlankHost: true})
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, network.Start())
|
|
||||||
defer func() {
|
|
||||||
require.NoError(t, network.Close())
|
|
||||||
}()
|
|
||||||
defer func() {
|
|
||||||
for _, node := range meta.Nodes {
|
|
||||||
err := node.Close()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
h1 := meta.Nodes[0]
|
|
||||||
h2 := meta.Nodes[1]
|
|
||||||
|
|
||||||
logger := logrus.New()
|
|
||||||
logger.SetLevel(logrus.DebugLevel)
|
|
||||||
broadcaster1 := partialdatacolumnbroadcaster.NewBroadcaster(logger)
|
|
||||||
broadcaster2 := partialdatacolumnbroadcaster.NewBroadcaster(logger)
|
|
||||||
|
|
||||||
opts1 := broadcaster1.AppendPubSubOpts([]pubsub.Option{
|
|
||||||
pubsub.WithMessageSigning(false),
|
|
||||||
pubsub.WithStrictSignatureVerification(false),
|
|
||||||
})
|
|
||||||
opts2 := broadcaster2.AppendPubSubOpts([]pubsub.Option{
|
|
||||||
pubsub.WithMessageSigning(false),
|
|
||||||
pubsub.WithStrictSignatureVerification(false),
|
|
||||||
})
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
ps1, err := pubsub.NewGossipSub(ctx, h1, opts1...)
|
|
||||||
require.NoError(t, err)
|
|
||||||
ps2, err := pubsub.NewGossipSub(ctx, h2, opts2...)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
go broadcaster1.Start()
|
|
||||||
go broadcaster2.Start()
|
|
||||||
defer func() {
|
|
||||||
broadcaster1.Stop()
|
|
||||||
broadcaster2.Stop()
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Generate Test Data
|
|
||||||
var blockRoot [fieldparams.RootLength]byte
|
|
||||||
copy(blockRoot[:], []byte("test-block-root"))
|
|
||||||
|
|
||||||
numCells := 6
|
|
||||||
commitments := make([][]byte, numCells)
|
|
||||||
cells := make([][]byte, numCells)
|
|
||||||
proofs := make([][]byte, numCells)
|
|
||||||
|
|
||||||
for i := range numCells {
|
|
||||||
commitments[i] = make([]byte, 48)
|
|
||||||
|
|
||||||
cells[i] = make([]byte, 2048)
|
|
||||||
_, err := rand.Read(cells[i])
|
|
||||||
require.NoError(t, err)
|
|
||||||
proofs[i] = make([]byte, 48)
|
|
||||||
_ = fmt.Appendf(proofs[i][:0], "proof %d", i)
|
|
||||||
}
|
|
||||||
|
|
||||||
roDC, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
|
|
||||||
{
|
|
||||||
BodyRoot: blockRoot[:],
|
|
||||||
KzgCommitments: commitments,
|
|
||||||
Column: cells,
|
|
||||||
KzgProofs: proofs,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
pc1, err := blocks.NewPartialDataColumn(roDC[0].DataColumnSidecar.SignedBlockHeader, roDC[0].Index, roDC[0].KzgCommitments, roDC[0].KzgCommitmentsInclusionProof)
|
|
||||||
require.NoError(t, err)
|
|
||||||
pc2, err := blocks.NewPartialDataColumn(roDC[0].DataColumnSidecar.SignedBlockHeader, roDC[0].Index, roDC[0].KzgCommitments, roDC[0].KzgCommitmentsInclusionProof)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Split data
|
|
||||||
for i := range numCells {
|
|
||||||
if i%2 == 0 {
|
|
||||||
pc1.ExtendFromVerfifiedCell(uint64(i), roDC[0].Column[i], roDC[0].KzgProofs[i])
|
|
||||||
} else {
|
|
||||||
pc2.ExtendFromVerfifiedCell(uint64(i), roDC[0].Column[i], roDC[0].KzgProofs[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Setup Topic and Subscriptions
|
|
||||||
digest := params.ForkDigest(0)
|
|
||||||
columnIndex := uint64(12)
|
|
||||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(columnIndex)
|
|
||||||
topicStr := fmt.Sprintf(p2p.DataColumnSubnetTopicFormat, digest, subnet) +
|
|
||||||
encoder.SszNetworkEncoder{}.ProtocolSuffix()
|
|
||||||
|
|
||||||
time.Sleep(100 * time.Millisecond)
|
|
||||||
|
|
||||||
topic1, err := ps1.Join(topicStr, pubsub.RequestPartialMessages())
|
|
||||||
require.NoError(t, err)
|
|
||||||
topic2, err := ps2.Join(topicStr, pubsub.RequestPartialMessages())
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Header validator that verifies the inclusion proof
|
|
||||||
headerValidator := func(header *ethpb.PartialDataColumnHeader) (reject bool, err error) {
|
|
||||||
if header == nil {
|
|
||||||
return false, fmt.Errorf("nil header")
|
|
||||||
}
|
|
||||||
if header.SignedBlockHeader == nil || header.SignedBlockHeader.Header == nil {
|
|
||||||
return true, fmt.Errorf("nil signed block header")
|
|
||||||
}
|
|
||||||
if len(header.KzgCommitments) == 0 {
|
|
||||||
return true, fmt.Errorf("empty kzg commitments")
|
|
||||||
}
|
|
||||||
// Verify inclusion proof
|
|
||||||
if err := peerdas.VerifyPartialDataColumnHeaderInclusionProof(header); err != nil {
|
|
||||||
return true, fmt.Errorf("invalid inclusion proof: %w", err)
|
|
||||||
}
|
|
||||||
t.Log("Header validation passed")
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
cellValidator := func(_ []blocks.CellProofBundle) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
node1Complete := make(chan blocks.VerifiedRODataColumn, 1)
|
|
||||||
node2Complete := make(chan blocks.VerifiedRODataColumn, 1)
|
|
||||||
|
|
||||||
handler1 := func(topic string, col blocks.VerifiedRODataColumn) {
|
|
||||||
t.Logf("Node 1: Completed! Column has %d cells", len(col.Column))
|
|
||||||
node1Complete <- col
|
|
||||||
}
|
|
||||||
|
|
||||||
handler2 := func(topic string, col blocks.VerifiedRODataColumn) {
|
|
||||||
t.Logf("Node 2: Completed! Column has %d cells", len(col.Column))
|
|
||||||
node2Complete <- col
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connect hosts
|
|
||||||
err = h1.Connect(context.Background(), peer.AddrInfo{
|
|
||||||
ID: h2.ID(),
|
|
||||||
Addrs: h2.Addrs(),
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
time.Sleep(300 * time.Millisecond)
|
|
||||||
|
|
||||||
// Subscribe to regular GossipSub (critical for partial message RPC exchange!)
|
|
||||||
sub1, err := topic1.Subscribe()
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer sub1.Cancel()
|
|
||||||
|
|
||||||
sub2, err := topic2.Subscribe()
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer sub2.Cancel()
|
|
||||||
|
|
||||||
err = broadcaster1.Subscribe(topic1, headerValidator, cellValidator, handler1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = broadcaster2.Subscribe(topic2, headerValidator, cellValidator, handler2)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Wait for mesh to form
|
|
||||||
time.Sleep(2 * time.Second)
|
|
||||||
|
|
||||||
// Publish
|
|
||||||
t.Log("Publishing from Node 1")
|
|
||||||
err = broadcaster1.Publish(topicStr, pc1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
time.Sleep(200 * time.Millisecond)
|
|
||||||
|
|
||||||
t.Log("Publishing from Node 2")
|
|
||||||
err = broadcaster2.Publish(topicStr, pc2)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Wait for Completion
|
|
||||||
timeout := time.After(10 * time.Second)
|
|
||||||
var col1, col2 blocks.VerifiedRODataColumn
|
|
||||||
receivedCount := 0
|
|
||||||
|
|
||||||
for receivedCount < 2 {
|
|
||||||
select {
|
|
||||||
case col1 = <-node1Complete:
|
|
||||||
t.Log("Node 1 completed reconstruction")
|
|
||||||
receivedCount++
|
|
||||||
case col2 = <-node2Complete:
|
|
||||||
t.Log("Node 2 completed reconstruction")
|
|
||||||
receivedCount++
|
|
||||||
case <-timeout:
|
|
||||||
t.Fatalf("Timeout: Only %d/2 nodes completed", receivedCount)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify both columns have all cells
|
|
||||||
assert.Equal(t, numCells, len(col1.Column), "Node 1 should have all cells")
|
|
||||||
assert.Equal(t, numCells, len(col2.Column), "Node 2 should have all cells")
|
|
||||||
assert.DeepSSZEqual(t, cells, col1.Column, "Node 1 cell mismatch")
|
|
||||||
assert.DeepSSZEqual(t, cells, col2.Column, "Node 2 cell mismatch")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
|
||||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
|
||||||
package partialdatacolumnbroadcaster
|
|
||||||
|
|
||||||
import "github.com/sirupsen/logrus"
|
|
||||||
|
|
||||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
|
||||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
|
||||||
var log = logrus.WithField("package", "beacon-chain/p2p/partialdatacolumnbroadcaster")
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
package partialdatacolumnbroadcaster
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
partialMessageUsefulCellsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
|
||||||
Name: "beacon_partial_message_useful_cells_total",
|
|
||||||
Help: "Number of useful cells received via a partial message",
|
|
||||||
}, []string{"column_index"})
|
|
||||||
|
|
||||||
partialMessageCellsReceivedTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
|
||||||
Name: "beacon_partial_message_cells_received_total",
|
|
||||||
Help: "Number of total cells received via a partial message",
|
|
||||||
}, []string{"column_index"})
|
|
||||||
)
|
|
||||||
@@ -1,557 +0,0 @@
|
|||||||
package partialdatacolumnbroadcaster
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"log/slog"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/OffchainLabs/go-bitfield"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/internal/logrusadapter"
|
|
||||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
|
||||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
|
||||||
"github.com/libp2p/go-libp2p-pubsub/partialmessages"
|
|
||||||
"github.com/libp2p/go-libp2p-pubsub/partialmessages/bitmap"
|
|
||||||
pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODOs:
|
|
||||||
// different eager push strategies:
|
|
||||||
// - no eager push
|
|
||||||
// - full column eager push
|
|
||||||
// - With debouncing - some factor of RTT
|
|
||||||
// - eager push missing cells
|
|
||||||
|
|
||||||
const TTLInSlots = 3
|
|
||||||
const maxConcurrentValidators = 128
|
|
||||||
|
|
||||||
var dataColumnTopicRegex = regexp.MustCompile(`data_column_sidecar_(\d+)`)
|
|
||||||
|
|
||||||
func extractColumnIndexFromTopic(topic string) (uint64, error) {
|
|
||||||
matches := dataColumnTopicRegex.FindStringSubmatch(topic)
|
|
||||||
if len(matches) < 2 {
|
|
||||||
return 0, errors.New("could not extract column index from topic")
|
|
||||||
}
|
|
||||||
return strconv.ParseUint(matches[1], 10, 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeaderValidator validates a PartialDataColumnHeader.
|
|
||||||
// Returns (reject, err) where:
|
|
||||||
// - reject=true, err!=nil: REJECT - peer should be penalized
|
|
||||||
// - reject=false, err!=nil: IGNORE - don't penalize, just ignore
|
|
||||||
// - reject=false, err=nil: valid header
|
|
||||||
type HeaderValidator func(header *ethpb.PartialDataColumnHeader) (reject bool, err error)
|
|
||||||
type HeaderHandler func(header *ethpb.PartialDataColumnHeader)
|
|
||||||
type ColumnValidator func(cells []blocks.CellProofBundle) error
|
|
||||||
|
|
||||||
type PartialColumnBroadcaster struct {
|
|
||||||
logger *logrus.Logger
|
|
||||||
|
|
||||||
ps *pubsub.PubSub
|
|
||||||
stop chan struct{}
|
|
||||||
|
|
||||||
// map topic -> headerValidators
|
|
||||||
headerValidators map[string]HeaderValidator
|
|
||||||
// map topic -> Validator
|
|
||||||
validators map[string]ColumnValidator
|
|
||||||
|
|
||||||
// map topic -> handler
|
|
||||||
handlers map[string]SubHandler
|
|
||||||
|
|
||||||
// map topic -> headerHandler
|
|
||||||
headerHandlers map[string]HeaderHandler
|
|
||||||
|
|
||||||
// map topic -> *pubsub.Topic
|
|
||||||
topics map[string]*pubsub.Topic
|
|
||||||
|
|
||||||
concurrentValidatorSemaphore chan struct{}
|
|
||||||
|
|
||||||
// map topic -> map[groupID]PartialColumn
|
|
||||||
partialMsgStore map[string]map[string]*blocks.PartialDataColumn
|
|
||||||
|
|
||||||
groupTTL map[string]int8
|
|
||||||
|
|
||||||
// validHeaderCache caches validated headers by group ID (works across topics)
|
|
||||||
validHeaderCache map[string]*ethpb.PartialDataColumnHeader
|
|
||||||
|
|
||||||
incomingReq chan request
|
|
||||||
}
|
|
||||||
|
|
||||||
type requestKind uint8
|
|
||||||
|
|
||||||
const (
|
|
||||||
requestKindPublish requestKind = iota
|
|
||||||
requestKindSubscribe
|
|
||||||
requestKindUnsubscribe
|
|
||||||
requestKindHandleIncomingRPC
|
|
||||||
requestKindCellsValidated
|
|
||||||
)
|
|
||||||
|
|
||||||
type request struct {
|
|
||||||
kind requestKind
|
|
||||||
response chan error
|
|
||||||
sub subscribe
|
|
||||||
unsub unsubscribe
|
|
||||||
publish publish
|
|
||||||
incomingRPC rpcWithFrom
|
|
||||||
cellsValidated *cellsValidated
|
|
||||||
}
|
|
||||||
|
|
||||||
type publish struct {
|
|
||||||
topic string
|
|
||||||
c blocks.PartialDataColumn
|
|
||||||
}
|
|
||||||
|
|
||||||
type subscribe struct {
|
|
||||||
t *pubsub.Topic
|
|
||||||
headerValidator HeaderValidator
|
|
||||||
validator ColumnValidator
|
|
||||||
handler SubHandler
|
|
||||||
headerHandler HeaderHandler
|
|
||||||
}
|
|
||||||
|
|
||||||
type unsubscribe struct {
|
|
||||||
topic string
|
|
||||||
}
|
|
||||||
|
|
||||||
type rpcWithFrom struct {
|
|
||||||
*pubsub_pb.PartialMessagesExtension
|
|
||||||
from peer.ID
|
|
||||||
}
|
|
||||||
|
|
||||||
type cellsValidated struct {
|
|
||||||
validationTook time.Duration
|
|
||||||
topic string
|
|
||||||
group []byte
|
|
||||||
cellIndices []uint64
|
|
||||||
cells []blocks.CellProofBundle
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBroadcaster(logger *logrus.Logger) *PartialColumnBroadcaster {
|
|
||||||
return &PartialColumnBroadcaster{
|
|
||||||
validators: make(map[string]ColumnValidator),
|
|
||||||
headerValidators: make(map[string]HeaderValidator),
|
|
||||||
handlers: make(map[string]SubHandler),
|
|
||||||
headerHandlers: make(map[string]HeaderHandler),
|
|
||||||
topics: make(map[string]*pubsub.Topic),
|
|
||||||
partialMsgStore: make(map[string]map[string]*blocks.PartialDataColumn),
|
|
||||||
groupTTL: make(map[string]int8),
|
|
||||||
validHeaderCache: make(map[string]*ethpb.PartialDataColumnHeader),
|
|
||||||
// GossipSub sends the messages to this channel. The buffer should be
|
|
||||||
// big enough to avoid dropping messages. We don't want to block the gossipsub event loop for this.
|
|
||||||
incomingReq: make(chan request, 128*16),
|
|
||||||
logger: logger,
|
|
||||||
|
|
||||||
concurrentValidatorSemaphore: make(chan struct{}, maxConcurrentValidators),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendPubSubOpts adds the necessary pubsub options to enable partial messages.
|
|
||||||
func (p *PartialColumnBroadcaster) AppendPubSubOpts(opts []pubsub.Option) []pubsub.Option {
|
|
||||||
slogger := slog.New(logrusadapter.Handler{Logger: p.logger})
|
|
||||||
opts = append(opts,
|
|
||||||
pubsub.WithPartialMessagesExtension(&partialmessages.PartialMessagesExtension{
|
|
||||||
Logger: slogger,
|
|
||||||
MergePartsMetadata: func(topic string, left, right partialmessages.PartsMetadata) partialmessages.PartsMetadata {
|
|
||||||
if len(left) == 0 {
|
|
||||||
return right
|
|
||||||
}
|
|
||||||
merged, err := bitfield.Bitlist(left).Or(bitfield.Bitlist(right))
|
|
||||||
if err != nil {
|
|
||||||
p.logger.Warn("Failed to merge bitfields", "err", err, "left", left, "right", right)
|
|
||||||
return left
|
|
||||||
}
|
|
||||||
return partialmessages.PartsMetadata(merged)
|
|
||||||
},
|
|
||||||
ValidateRPC: func(from peer.ID, rpc *pubsub_pb.PartialMessagesExtension) error {
|
|
||||||
// TODO. Add some basic and fast sanity checks
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
OnIncomingRPC: func(from peer.ID, rpc *pubsub_pb.PartialMessagesExtension) error {
|
|
||||||
select {
|
|
||||||
case p.incomingReq <- request{
|
|
||||||
kind: requestKindHandleIncomingRPC,
|
|
||||||
incomingRPC: rpcWithFrom{rpc, from},
|
|
||||||
}:
|
|
||||||
default:
|
|
||||||
p.logger.Warn("Dropping incoming partial RPC", "rpc", rpc)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
}),
|
|
||||||
func(ps *pubsub.PubSub) error {
|
|
||||||
p.ps = ps
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
)
|
|
||||||
return opts
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start starts the event loop of the PartialColumnBroadcaster. Should be called
|
|
||||||
// within a goroutine (go p.Start())
|
|
||||||
func (p *PartialColumnBroadcaster) Start() {
|
|
||||||
if p.stop != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.stop = make(chan struct{})
|
|
||||||
p.loop()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PartialColumnBroadcaster) loop() {
|
|
||||||
cleanup := time.NewTicker(time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot))
|
|
||||||
defer cleanup.Stop()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-p.stop:
|
|
||||||
return
|
|
||||||
case <-cleanup.C:
|
|
||||||
for groupID, ttl := range p.groupTTL {
|
|
||||||
if ttl > 0 {
|
|
||||||
p.groupTTL[groupID] = ttl - 1
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
delete(p.groupTTL, groupID)
|
|
||||||
delete(p.validHeaderCache, groupID)
|
|
||||||
for topic, msgStore := range p.partialMsgStore {
|
|
||||||
delete(msgStore, groupID)
|
|
||||||
if len(msgStore) == 0 {
|
|
||||||
delete(p.partialMsgStore, topic)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case req := <-p.incomingReq:
|
|
||||||
switch req.kind {
|
|
||||||
case requestKindPublish:
|
|
||||||
req.response <- p.publish(req.publish.topic, req.publish.c)
|
|
||||||
case requestKindSubscribe:
|
|
||||||
req.response <- p.subscribe(req.sub.t, req.sub.headerValidator, req.sub.validator, req.sub.handler, req.sub.headerHandler)
|
|
||||||
case requestKindUnsubscribe:
|
|
||||||
req.response <- p.unsubscribe(req.unsub.topic)
|
|
||||||
case requestKindHandleIncomingRPC:
|
|
||||||
err := p.handleIncomingRPC(req.incomingRPC)
|
|
||||||
if err != nil {
|
|
||||||
p.logger.Error("Failed to handle incoming partial RPC", "err", err)
|
|
||||||
}
|
|
||||||
case requestKindCellsValidated:
|
|
||||||
err := p.handleCellsValidated(req.cellsValidated)
|
|
||||||
if err != nil {
|
|
||||||
p.logger.Error("Failed to handle cells validated", "err", err)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
p.logger.Error("Unknown request kind", "kind", req.kind)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PartialColumnBroadcaster) getDataColumn(topic string, group []byte) *blocks.PartialDataColumn {
|
|
||||||
topicStore, ok := p.partialMsgStore[topic]
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
msg, ok := topicStore[string(group)]
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return msg
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PartialColumnBroadcaster) handleIncomingRPC(rpcWithFrom rpcWithFrom) error {
|
|
||||||
if p.ps == nil {
|
|
||||||
return errors.New("pubsub not initialized")
|
|
||||||
}
|
|
||||||
|
|
||||||
hasMessage := len(rpcWithFrom.PartialMessage) > 0
|
|
||||||
|
|
||||||
var message ethpb.PartialDataColumnSidecar
|
|
||||||
if hasMessage {
|
|
||||||
err := message.UnmarshalSSZ(rpcWithFrom.PartialMessage)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "failed to unmarshal partial message data")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
topicID := rpcWithFrom.GetTopicID()
|
|
||||||
groupID := rpcWithFrom.GroupID
|
|
||||||
ourDataColumn := p.getDataColumn(topicID, groupID)
|
|
||||||
var shouldRepublish bool
|
|
||||||
|
|
||||||
if ourDataColumn == nil && hasMessage {
|
|
||||||
var header *ethpb.PartialDataColumnHeader
|
|
||||||
// Check cache first for this group
|
|
||||||
if cachedHeader, ok := p.validHeaderCache[string(groupID)]; ok {
|
|
||||||
header = cachedHeader
|
|
||||||
} else {
|
|
||||||
// We haven't seen this group before. Check if we have a valid header.
|
|
||||||
if len(message.Header) == 0 {
|
|
||||||
p.logger.Debug("No partial column found and no header in message, ignoring")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
header = message.Header[0]
|
|
||||||
headerValidator, ok := p.headerValidators[topicID]
|
|
||||||
if !ok || headerValidator == nil {
|
|
||||||
p.logger.Debug("No header validator registered for topic")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
reject, err := headerValidator(header)
|
|
||||||
if err != nil {
|
|
||||||
p.logger.Debug("Header validation failed", "err", err, "reject", reject)
|
|
||||||
if reject {
|
|
||||||
// REJECT case: penalize the peer
|
|
||||||
_ = p.ps.PeerFeedback(topicID, rpcWithFrom.from, pubsub.PeerFeedbackInvalidMessage)
|
|
||||||
}
|
|
||||||
// Both REJECT and IGNORE: don't process further
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Cache the valid header
|
|
||||||
p.validHeaderCache[string(groupID)] = header
|
|
||||||
|
|
||||||
headerHandler, ok := p.headerHandlers[topicID]
|
|
||||||
if !ok || headerHandler == nil {
|
|
||||||
p.logger.Debug("No header handler registered for topic")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
headerHandler(header)
|
|
||||||
}
|
|
||||||
|
|
||||||
columnIndex, err := extractColumnIndexFromTopic(topicID)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
newColumn, err := blocks.NewPartialDataColumn(
|
|
||||||
header.SignedBlockHeader,
|
|
||||||
columnIndex,
|
|
||||||
header.KzgCommitments,
|
|
||||||
header.KzgCommitmentsInclusionProof,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
p.logger.WithError(err).WithFields(logrus.Fields{
|
|
||||||
"topic": topicID,
|
|
||||||
"columnIndex": columnIndex,
|
|
||||||
"numCommitments": len(header.KzgCommitments),
|
|
||||||
}).Error("Failed to create partial data column from header")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save to store
|
|
||||||
topicStore, ok := p.partialMsgStore[topicID]
|
|
||||||
if !ok {
|
|
||||||
topicStore = make(map[string]*blocks.PartialDataColumn)
|
|
||||||
p.partialMsgStore[topicID] = topicStore
|
|
||||||
}
|
|
||||||
topicStore[string(newColumn.GroupID())] = &newColumn
|
|
||||||
p.groupTTL[string(newColumn.GroupID())] = TTLInSlots
|
|
||||||
|
|
||||||
ourDataColumn = &newColumn
|
|
||||||
shouldRepublish = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if ourDataColumn == nil {
|
|
||||||
// We don't have a partial column for this. Can happen if we got cells
|
|
||||||
// without a header.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
logger := p.logger.WithFields(logrus.Fields{
|
|
||||||
"from": rpcWithFrom.from,
|
|
||||||
"topic": topicID,
|
|
||||||
"group": groupID,
|
|
||||||
})
|
|
||||||
|
|
||||||
validator, validatorOK := p.validators[topicID]
|
|
||||||
if len(rpcWithFrom.PartialMessage) > 0 && validatorOK {
|
|
||||||
// TODO: is there any penalty we want to consider for giving us data we didn't request?
|
|
||||||
// Note that we need to be careful around race conditions and eager data.
|
|
||||||
// Also note that protobufs by design allow extra data that we don't parse.
|
|
||||||
// Marco's thoughts. No, we don't need to do anything else here.
|
|
||||||
cellIndices, cellsToVerify, err := ourDataColumn.CellsToVerifyFromPartialMessage(&message)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Track cells received via partial message
|
|
||||||
if len(cellIndices) > 0 {
|
|
||||||
columnIndexStr := strconv.FormatUint(ourDataColumn.Index, 10)
|
|
||||||
partialMessageCellsReceivedTotal.WithLabelValues(columnIndexStr).Add(float64(len(cellIndices)))
|
|
||||||
}
|
|
||||||
if len(cellsToVerify) > 0 {
|
|
||||||
p.concurrentValidatorSemaphore <- struct{}{}
|
|
||||||
go func() {
|
|
||||||
defer func() {
|
|
||||||
<-p.concurrentValidatorSemaphore
|
|
||||||
}()
|
|
||||||
start := time.Now()
|
|
||||||
err := validator(cellsToVerify)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error("failed to validate cells", "err", err)
|
|
||||||
_ = p.ps.PeerFeedback(topicID, rpcWithFrom.from, pubsub.PeerFeedbackInvalidMessage)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
_ = p.ps.PeerFeedback(topicID, rpcWithFrom.from, pubsub.PeerFeedbackUsefulMessage)
|
|
||||||
p.incomingReq <- request{
|
|
||||||
kind: requestKindCellsValidated,
|
|
||||||
cellsValidated: &cellsValidated{
|
|
||||||
validationTook: time.Since(start),
|
|
||||||
topic: topicID,
|
|
||||||
group: groupID,
|
|
||||||
cells: cellsToVerify,
|
|
||||||
cellIndices: cellIndices,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
peerHas := bitmap.Bitmap(rpcWithFrom.PartsMetadata)
|
|
||||||
iHave := bitmap.Bitmap(ourDataColumn.PartsMetadata())
|
|
||||||
if !shouldRepublish && len(peerHas) > 0 && !bytes.Equal(peerHas, iHave) {
|
|
||||||
// Either we have something they don't or vice versa
|
|
||||||
shouldRepublish = true
|
|
||||||
logger.Debug("republishing due to parts metadata difference")
|
|
||||||
}
|
|
||||||
|
|
||||||
if shouldRepublish {
|
|
||||||
err := p.ps.PublishPartialMessage(topicID, ourDataColumn, partialmessages.PublishOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PartialColumnBroadcaster) handleCellsValidated(cells *cellsValidated) error {
|
|
||||||
ourDataColumn := p.getDataColumn(cells.topic, cells.group)
|
|
||||||
if ourDataColumn == nil {
|
|
||||||
return errors.New("data column not found for verified cells")
|
|
||||||
}
|
|
||||||
extended := ourDataColumn.ExtendFromVerfifiedCells(cells.cellIndices, cells.cells)
|
|
||||||
p.logger.Debug("Extended partial message", "duration", cells.validationTook, "extended", extended)
|
|
||||||
|
|
||||||
columnIndexStr := strconv.FormatUint(ourDataColumn.Index, 10)
|
|
||||||
if extended {
|
|
||||||
// Track useful cells (cells that extended our data)
|
|
||||||
partialMessageUsefulCellsTotal.WithLabelValues(columnIndexStr).Add(float64(len(cells.cells)))
|
|
||||||
|
|
||||||
// TODO: we could use the heuristic here that if this data was
|
|
||||||
// useful to us, it's likely useful to our peers and we should
|
|
||||||
// republish eagerly
|
|
||||||
|
|
||||||
if col, ok := ourDataColumn.Complete(p.logger); ok {
|
|
||||||
p.logger.Info("Completed partial column", "topic", cells.topic, "group", cells.group)
|
|
||||||
handler, handlerOK := p.handlers[cells.topic]
|
|
||||||
|
|
||||||
if handlerOK {
|
|
||||||
go handler(cells.topic, col)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
p.logger.Info("Extended partial column", "topic", cells.topic, "group", cells.group)
|
|
||||||
}
|
|
||||||
|
|
||||||
err := p.ps.PublishPartialMessage(cells.topic, ourDataColumn, partialmessages.PublishOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PartialColumnBroadcaster) Stop() {
|
|
||||||
if p.stop != nil {
|
|
||||||
close(p.stop)
|
|
||||||
p.stop = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Publish publishes the partial column.
|
|
||||||
func (p *PartialColumnBroadcaster) Publish(topic string, c blocks.PartialDataColumn) error {
|
|
||||||
if p.ps == nil {
|
|
||||||
return errors.New("pubsub not initialized")
|
|
||||||
}
|
|
||||||
respCh := make(chan error)
|
|
||||||
p.incomingReq <- request{
|
|
||||||
kind: requestKindPublish,
|
|
||||||
response: respCh,
|
|
||||||
publish: publish{
|
|
||||||
topic: topic,
|
|
||||||
c: c,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return <-respCh
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PartialColumnBroadcaster) publish(topic string, c blocks.PartialDataColumn) error {
|
|
||||||
topicStore, ok := p.partialMsgStore[topic]
|
|
||||||
if !ok {
|
|
||||||
topicStore = make(map[string]*blocks.PartialDataColumn)
|
|
||||||
p.partialMsgStore[topic] = topicStore
|
|
||||||
}
|
|
||||||
topicStore[string(c.GroupID())] = &c
|
|
||||||
p.groupTTL[string(c.GroupID())] = TTLInSlots
|
|
||||||
|
|
||||||
return p.ps.PublishPartialMessage(topic, &c, partialmessages.PublishOptions{})
|
|
||||||
}
|
|
||||||
|
|
||||||
type SubHandler func(topic string, col blocks.VerifiedRODataColumn)
|
|
||||||
|
|
||||||
func (p *PartialColumnBroadcaster) Subscribe(t *pubsub.Topic, headerValidator HeaderValidator, validator ColumnValidator, handler SubHandler, headerHandler HeaderHandler) error {
|
|
||||||
respCh := make(chan error)
|
|
||||||
p.incomingReq <- request{
|
|
||||||
kind: requestKindSubscribe,
|
|
||||||
sub: subscribe{
|
|
||||||
t: t,
|
|
||||||
headerValidator: headerValidator,
|
|
||||||
validator: validator,
|
|
||||||
handler: handler,
|
|
||||||
headerHandler: headerHandler,
|
|
||||||
},
|
|
||||||
response: respCh,
|
|
||||||
}
|
|
||||||
return <-respCh
|
|
||||||
}
|
|
||||||
func (p *PartialColumnBroadcaster) subscribe(t *pubsub.Topic, headerValidator HeaderValidator, validator ColumnValidator, handler SubHandler, headerHandler HeaderHandler) error {
|
|
||||||
topic := t.String()
|
|
||||||
if _, ok := p.topics[topic]; ok {
|
|
||||||
return errors.New("already subscribed")
|
|
||||||
}
|
|
||||||
|
|
||||||
p.topics[topic] = t
|
|
||||||
p.headerValidators[topic] = headerValidator
|
|
||||||
p.validators[topic] = validator
|
|
||||||
p.handlers[topic] = handler
|
|
||||||
p.headerHandlers[topic] = headerHandler
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *PartialColumnBroadcaster) Unsubscribe(topic string) error {
|
|
||||||
respCh := make(chan error)
|
|
||||||
p.incomingReq <- request{
|
|
||||||
kind: requestKindUnsubscribe,
|
|
||||||
unsub: unsubscribe{
|
|
||||||
topic: topic,
|
|
||||||
},
|
|
||||||
response: respCh,
|
|
||||||
}
|
|
||||||
return <-respCh
|
|
||||||
}
|
|
||||||
func (p *PartialColumnBroadcaster) unsubscribe(topic string) error {
|
|
||||||
t, ok := p.topics[topic]
|
|
||||||
if !ok {
|
|
||||||
return errors.New("topic not found")
|
|
||||||
}
|
|
||||||
delete(p.topics, topic)
|
|
||||||
delete(p.partialMsgStore, topic)
|
|
||||||
delete(p.headerValidators, topic)
|
|
||||||
delete(p.validators, topic)
|
|
||||||
delete(p.handlers, topic)
|
|
||||||
delete(p.headerHandlers, topic)
|
|
||||||
return t.Close()
|
|
||||||
}
|
|
||||||
@@ -58,7 +58,7 @@ func TestPeerExplicitAdd(t *testing.T) {
|
|||||||
|
|
||||||
resAddress, err := p.Address(id)
|
resAddress, err := p.Address(id)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, address.Equal(resAddress), true, "Unexpected address")
|
assert.Equal(t, address, resAddress, "Unexpected address")
|
||||||
|
|
||||||
resDirection, err := p.Direction(id)
|
resDirection, err := p.Direction(id)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -72,7 +72,7 @@ func TestPeerExplicitAdd(t *testing.T) {
|
|||||||
|
|
||||||
resAddress2, err := p.Address(id)
|
resAddress2, err := p.Address(id)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, address2.Equal(resAddress2), true, "Unexpected address")
|
assert.Equal(t, address2, resAddress2, "Unexpected address")
|
||||||
|
|
||||||
resDirection2, err := p.Direction(id)
|
resDirection2, err := p.Direction(id)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|||||||
@@ -170,7 +170,7 @@ func (s *Service) pubsubOptions() []pubsub.Option {
|
|||||||
pubsub.WithPeerScore(peerScoringParams(s.cfg.IPColocationWhitelist)),
|
pubsub.WithPeerScore(peerScoringParams(s.cfg.IPColocationWhitelist)),
|
||||||
pubsub.WithPeerScoreInspect(s.peerInspector, time.Minute),
|
pubsub.WithPeerScoreInspect(s.peerInspector, time.Minute),
|
||||||
pubsub.WithGossipSubParams(pubsubGossipParam()),
|
pubsub.WithGossipSubParams(pubsubGossipParam()),
|
||||||
pubsub.WithRawTracer(&gossipTracer{host: s.host}),
|
pubsub.WithRawTracer(gossipTracer{host: s.host}),
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(s.cfg.StaticPeers) > 0 {
|
if len(s.cfg.StaticPeers) > 0 {
|
||||||
@@ -181,9 +181,6 @@ func (s *Service) pubsubOptions() []pubsub.Option {
|
|||||||
}
|
}
|
||||||
psOpts = append(psOpts, pubsub.WithDirectPeers(directPeersAddrInfos))
|
psOpts = append(psOpts, pubsub.WithDirectPeers(directPeersAddrInfos))
|
||||||
}
|
}
|
||||||
if s.partialColumnBroadcaster != nil {
|
|
||||||
psOpts = s.partialColumnBroadcaster.AppendPubSubOpts(psOpts)
|
|
||||||
}
|
|
||||||
|
|
||||||
return psOpts
|
return psOpts
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
package p2p
|
package p2p
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sync"
|
|
||||||
|
|
||||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||||
"github.com/libp2p/go-libp2p/core/host"
|
"github.com/libp2p/go-libp2p/core/host"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
@@ -10,7 +8,7 @@ import (
|
|||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = pubsub.RawTracer(&gossipTracer{})
|
var _ = pubsub.RawTracer(gossipTracer{})
|
||||||
|
|
||||||
// Initializes the values for the pubsub rpc action.
|
// Initializes the values for the pubsub rpc action.
|
||||||
type action int
|
type action int
|
||||||
@@ -25,146 +23,85 @@ const (
|
|||||||
// and broadcasted through gossipsub.
|
// and broadcasted through gossipsub.
|
||||||
type gossipTracer struct {
|
type gossipTracer struct {
|
||||||
host host.Host
|
host host.Host
|
||||||
|
|
||||||
mu sync.Mutex
|
|
||||||
// map topic -> Set(peerID). Peer is in set if it supports partial messages.
|
|
||||||
partialMessagePeers map[string]map[peer.ID]struct{}
|
|
||||||
// map topic -> Set(peerID). Peer is in set if in the mesh.
|
|
||||||
meshPeers map[string]map[peer.ID]struct{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddPeer .
|
// AddPeer .
|
||||||
func (g *gossipTracer) AddPeer(p peer.ID, proto protocol.ID) {
|
func (g gossipTracer) AddPeer(p peer.ID, proto protocol.ID) {
|
||||||
// no-op
|
// no-op
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemovePeer .
|
// RemovePeer .
|
||||||
func (g *gossipTracer) RemovePeer(p peer.ID) {
|
func (g gossipTracer) RemovePeer(p peer.ID) {
|
||||||
g.mu.Lock()
|
// no-op
|
||||||
defer g.mu.Unlock()
|
|
||||||
for _, peers := range g.partialMessagePeers {
|
|
||||||
delete(peers, p)
|
|
||||||
}
|
|
||||||
for topic, peers := range g.meshPeers {
|
|
||||||
if _, ok := peers[p]; ok {
|
|
||||||
delete(peers, p)
|
|
||||||
g.updateMeshPeersMetric(topic)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Join .
|
// Join .
|
||||||
func (g *gossipTracer) Join(topic string) {
|
func (g gossipTracer) Join(topic string) {
|
||||||
pubsubTopicsActive.WithLabelValues(topic).Set(1)
|
pubsubTopicsActive.WithLabelValues(topic).Set(1)
|
||||||
g.mu.Lock()
|
|
||||||
defer g.mu.Unlock()
|
|
||||||
if g.partialMessagePeers == nil {
|
|
||||||
g.partialMessagePeers = make(map[string]map[peer.ID]struct{})
|
|
||||||
}
|
|
||||||
if g.partialMessagePeers[topic] == nil {
|
|
||||||
g.partialMessagePeers[topic] = make(map[peer.ID]struct{})
|
|
||||||
}
|
|
||||||
|
|
||||||
if g.meshPeers == nil {
|
|
||||||
g.meshPeers = make(map[string]map[peer.ID]struct{})
|
|
||||||
}
|
|
||||||
if g.meshPeers[topic] == nil {
|
|
||||||
g.meshPeers[topic] = make(map[peer.ID]struct{})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Leave .
|
// Leave .
|
||||||
func (g *gossipTracer) Leave(topic string) {
|
func (g gossipTracer) Leave(topic string) {
|
||||||
pubsubTopicsActive.WithLabelValues(topic).Set(0)
|
pubsubTopicsActive.WithLabelValues(topic).Set(0)
|
||||||
g.mu.Lock()
|
|
||||||
defer g.mu.Unlock()
|
|
||||||
delete(g.partialMessagePeers, topic)
|
|
||||||
delete(g.meshPeers, topic)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Graft .
|
// Graft .
|
||||||
func (g *gossipTracer) Graft(p peer.ID, topic string) {
|
func (g gossipTracer) Graft(p peer.ID, topic string) {
|
||||||
pubsubTopicsGraft.WithLabelValues(topic).Inc()
|
pubsubTopicsGraft.WithLabelValues(topic).Inc()
|
||||||
g.mu.Lock()
|
|
||||||
defer g.mu.Unlock()
|
|
||||||
if m, ok := g.meshPeers[topic]; ok {
|
|
||||||
m[p] = struct{}{}
|
|
||||||
}
|
|
||||||
g.updateMeshPeersMetric(topic)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prune .
|
// Prune .
|
||||||
func (g *gossipTracer) Prune(p peer.ID, topic string) {
|
func (g gossipTracer) Prune(p peer.ID, topic string) {
|
||||||
pubsubTopicsPrune.WithLabelValues(topic).Inc()
|
pubsubTopicsPrune.WithLabelValues(topic).Inc()
|
||||||
g.mu.Lock()
|
|
||||||
defer g.mu.Unlock()
|
|
||||||
if m, ok := g.meshPeers[topic]; ok {
|
|
||||||
delete(m, p)
|
|
||||||
}
|
|
||||||
g.updateMeshPeersMetric(topic)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidateMessage .
|
// ValidateMessage .
|
||||||
func (g *gossipTracer) ValidateMessage(msg *pubsub.Message) {
|
func (g gossipTracer) ValidateMessage(msg *pubsub.Message) {
|
||||||
pubsubMessageValidate.WithLabelValues(*msg.Topic).Inc()
|
pubsubMessageValidate.WithLabelValues(*msg.Topic).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeliverMessage .
|
// DeliverMessage .
|
||||||
func (g *gossipTracer) DeliverMessage(msg *pubsub.Message) {
|
func (g gossipTracer) DeliverMessage(msg *pubsub.Message) {
|
||||||
pubsubMessageDeliver.WithLabelValues(*msg.Topic).Inc()
|
pubsubMessageDeliver.WithLabelValues(*msg.Topic).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
// RejectMessage .
|
// RejectMessage .
|
||||||
func (g *gossipTracer) RejectMessage(msg *pubsub.Message, reason string) {
|
func (g gossipTracer) RejectMessage(msg *pubsub.Message, reason string) {
|
||||||
pubsubMessageReject.WithLabelValues(*msg.Topic, reason).Inc()
|
pubsubMessageReject.WithLabelValues(*msg.Topic, reason).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
// DuplicateMessage .
|
// DuplicateMessage .
|
||||||
func (g *gossipTracer) DuplicateMessage(msg *pubsub.Message) {
|
func (g gossipTracer) DuplicateMessage(msg *pubsub.Message) {
|
||||||
pubsubMessageDuplicate.WithLabelValues(*msg.Topic).Inc()
|
pubsubMessageDuplicate.WithLabelValues(*msg.Topic).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
// UndeliverableMessage .
|
// UndeliverableMessage .
|
||||||
func (g *gossipTracer) UndeliverableMessage(msg *pubsub.Message) {
|
func (g gossipTracer) UndeliverableMessage(msg *pubsub.Message) {
|
||||||
pubsubMessageUndeliverable.WithLabelValues(*msg.Topic).Inc()
|
pubsubMessageUndeliverable.WithLabelValues(*msg.Topic).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ThrottlePeer .
|
// ThrottlePeer .
|
||||||
func (g *gossipTracer) ThrottlePeer(p peer.ID) {
|
func (g gossipTracer) ThrottlePeer(p peer.ID) {
|
||||||
agent := agentFromPid(p, g.host.Peerstore())
|
agent := agentFromPid(p, g.host.Peerstore())
|
||||||
pubsubPeerThrottle.WithLabelValues(agent).Inc()
|
pubsubPeerThrottle.WithLabelValues(agent).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
// RecvRPC .
|
// RecvRPC .
|
||||||
func (g *gossipTracer) RecvRPC(rpc *pubsub.RPC, from peer.ID) {
|
func (g gossipTracer) RecvRPC(rpc *pubsub.RPC) {
|
||||||
g.setMetricFromRPC(recv, pubsubRPCSubRecv, pubsubRPCPubRecv, pubsubRPCPubRecvSize, pubsubRPCRecv, rpc)
|
g.setMetricFromRPC(recv, pubsubRPCSubRecv, pubsubRPCPubRecv, pubsubRPCRecv, rpc)
|
||||||
|
|
||||||
g.mu.Lock()
|
|
||||||
defer g.mu.Unlock()
|
|
||||||
for _, sub := range rpc.Subscriptions {
|
|
||||||
m, ok := g.partialMessagePeers[sub.GetTopicid()]
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if sub.GetSubscribe() && sub.GetRequestsPartial() {
|
|
||||||
m[from] = struct{}{}
|
|
||||||
} else {
|
|
||||||
delete(m, from)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendRPC .
|
// SendRPC .
|
||||||
func (g *gossipTracer) SendRPC(rpc *pubsub.RPC, p peer.ID) {
|
func (g gossipTracer) SendRPC(rpc *pubsub.RPC, p peer.ID) {
|
||||||
g.setMetricFromRPC(send, pubsubRPCSubSent, pubsubRPCPubSent, pubsubRPCPubSentSize, pubsubRPCSent, rpc)
|
g.setMetricFromRPC(send, pubsubRPCSubSent, pubsubRPCPubSent, pubsubRPCSent, rpc)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DropRPC .
|
// DropRPC .
|
||||||
func (g *gossipTracer) DropRPC(rpc *pubsub.RPC, p peer.ID) {
|
func (g gossipTracer) DropRPC(rpc *pubsub.RPC, p peer.ID) {
|
||||||
g.setMetricFromRPC(drop, pubsubRPCSubDrop, pubsubRPCPubDrop, pubsubRPCPubDropSize, pubsubRPCDrop, rpc)
|
g.setMetricFromRPC(drop, pubsubRPCSubDrop, pubsubRPCPubDrop, pubsubRPCDrop, rpc)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *gossipTracer) setMetricFromRPC(act action, subCtr prometheus.Counter, pubCtr, pubSizeCtr, ctrlCtr *prometheus.CounterVec, rpc *pubsub.RPC) {
|
func (g gossipTracer) setMetricFromRPC(act action, subCtr prometheus.Counter, pubCtr, ctrlCtr *prometheus.CounterVec, rpc *pubsub.RPC) {
|
||||||
subCtr.Add(float64(len(rpc.Subscriptions)))
|
subCtr.Add(float64(len(rpc.Subscriptions)))
|
||||||
if rpc.Control != nil {
|
if rpc.Control != nil {
|
||||||
ctrlCtr.WithLabelValues("graft").Add(float64(len(rpc.Control.Graft)))
|
ctrlCtr.WithLabelValues("graft").Add(float64(len(rpc.Control.Graft)))
|
||||||
@@ -173,41 +110,12 @@ func (g *gossipTracer) setMetricFromRPC(act action, subCtr prometheus.Counter, p
|
|||||||
ctrlCtr.WithLabelValues("iwant").Add(float64(len(rpc.Control.Iwant)))
|
ctrlCtr.WithLabelValues("iwant").Add(float64(len(rpc.Control.Iwant)))
|
||||||
ctrlCtr.WithLabelValues("idontwant").Add(float64(len(rpc.Control.Idontwant)))
|
ctrlCtr.WithLabelValues("idontwant").Add(float64(len(rpc.Control.Idontwant)))
|
||||||
}
|
}
|
||||||
// For incoming messages from pubsub, we do not record metrics for them as these values
|
|
||||||
// could be junk.
|
|
||||||
if act == recv {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, msg := range rpc.Publish {
|
for _, msg := range rpc.Publish {
|
||||||
pubCtr.WithLabelValues(msg.GetTopic()).Inc()
|
// For incoming messages from pubsub, we do not record metrics for them as these values
|
||||||
pubSizeCtr.WithLabelValues(msg.GetTopic(), "false").Add(float64(msg.Size()))
|
// could be junk.
|
||||||
}
|
if act == recv {
|
||||||
if rpc.Partial != nil {
|
continue
|
||||||
pubCtr.WithLabelValues(rpc.Partial.GetTopicID()).Inc()
|
|
||||||
pubSizeCtr.WithLabelValues(rpc.Partial.GetTopicID(), "true").Add(float64(rpc.Partial.Size()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateMeshPeersMetric requires the caller to hold the state mutex
|
|
||||||
func (g *gossipTracer) updateMeshPeersMetric(topic string) {
|
|
||||||
meshPeers, ok := g.meshPeers[topic]
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
partialPeers, ok := g.partialMessagePeers[topic]
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var supportsPartial, doesNotSupportPartial float64
|
|
||||||
for p := range meshPeers {
|
|
||||||
if _, ok := partialPeers[p]; ok {
|
|
||||||
supportsPartial++
|
|
||||||
} else {
|
|
||||||
doesNotSupportPartial++
|
|
||||||
}
|
}
|
||||||
|
pubCtr.WithLabelValues(*msg.Topic).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
pubsubMeshPeers.WithLabelValues(topic, "true").Set(supportsPartial)
|
|
||||||
pubsubMeshPeers.WithLabelValues(topic, "false").Set(doesNotSupportPartial)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ import (
|
|||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/async"
|
"github.com/OffchainLabs/prysm/v7/async"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/types"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/types"
|
||||||
@@ -78,7 +77,6 @@ type Service struct {
|
|||||||
privKey *ecdsa.PrivateKey
|
privKey *ecdsa.PrivateKey
|
||||||
metaData metadata.Metadata
|
metaData metadata.Metadata
|
||||||
pubsub *pubsub.PubSub
|
pubsub *pubsub.PubSub
|
||||||
partialColumnBroadcaster *partialdatacolumnbroadcaster.PartialColumnBroadcaster
|
|
||||||
joinedTopics map[string]*pubsub.Topic
|
joinedTopics map[string]*pubsub.Topic
|
||||||
joinedTopicsLock sync.RWMutex
|
joinedTopicsLock sync.RWMutex
|
||||||
subnetsLock map[uint64]*sync.RWMutex
|
subnetsLock map[uint64]*sync.RWMutex
|
||||||
@@ -149,10 +147,6 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
|||||||
custodyInfoSet: make(chan struct{}),
|
custodyInfoSet: make(chan struct{}),
|
||||||
}
|
}
|
||||||
|
|
||||||
if cfg.PartialDataColumns {
|
|
||||||
s.partialColumnBroadcaster = partialdatacolumnbroadcaster.NewBroadcaster(log.Logger)
|
|
||||||
}
|
|
||||||
|
|
||||||
ipAddr := prysmnetwork.IPAddr()
|
ipAddr := prysmnetwork.IPAddr()
|
||||||
|
|
||||||
opts, err := s.buildOptions(ipAddr, s.privKey)
|
opts, err := s.buildOptions(ipAddr, s.privKey)
|
||||||
@@ -311,10 +305,6 @@ func (s *Service) Start() {
|
|||||||
logExternalDNSAddr(s.host.ID(), p2pHostDNS, p2pTCPPort)
|
logExternalDNSAddr(s.host.ID(), p2pHostDNS, p2pTCPPort)
|
||||||
}
|
}
|
||||||
go s.forkWatcher()
|
go s.forkWatcher()
|
||||||
|
|
||||||
if s.partialColumnBroadcaster != nil {
|
|
||||||
go s.partialColumnBroadcaster.Start()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop the p2p service and terminate all peer connections.
|
// Stop the p2p service and terminate all peer connections.
|
||||||
@@ -324,10 +314,6 @@ func (s *Service) Stop() error {
|
|||||||
if s.dv5Listener != nil {
|
if s.dv5Listener != nil {
|
||||||
s.dv5Listener.Close()
|
s.dv5Listener.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.partialColumnBroadcaster != nil {
|
|
||||||
s.partialColumnBroadcaster.Stop()
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -364,10 +350,6 @@ func (s *Service) PubSub() *pubsub.PubSub {
|
|||||||
return s.pubsub
|
return s.pubsub
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) PartialColumnBroadcaster() *partialdatacolumnbroadcaster.PartialColumnBroadcaster {
|
|
||||||
return s.partialColumnBroadcaster
|
|
||||||
}
|
|
||||||
|
|
||||||
// Host returns the currently running libp2p
|
// Host returns the currently running libp2p
|
||||||
// host of the service.
|
// host of the service.
|
||||||
func (s *Service) Host() host.Host {
|
func (s *Service) Host() host.Host {
|
||||||
|
|||||||
@@ -21,7 +21,6 @@ go_library(
|
|||||||
deps = [
|
deps = [
|
||||||
"//beacon-chain/core/peerdas:go_default_library",
|
"//beacon-chain/core/peerdas:go_default_library",
|
||||||
"//beacon-chain/p2p/encoder:go_default_library",
|
"//beacon-chain/p2p/encoder:go_default_library",
|
||||||
"//beacon-chain/p2p/partialdatacolumnbroadcaster:go_default_library",
|
|
||||||
"//beacon-chain/p2p/peers:go_default_library",
|
"//beacon-chain/p2p/peers:go_default_library",
|
||||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||||
"//config/fieldparams:go_default_library",
|
"//config/fieldparams:go_default_library",
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||||
@@ -109,10 +108,6 @@ func (*FakeP2P) PubSub() *pubsub.PubSub {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*FakeP2P) PartialColumnBroadcaster() *partialdatacolumnbroadcaster.PartialColumnBroadcaster {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MetadataSeq -- fake.
|
// MetadataSeq -- fake.
|
||||||
func (*FakeP2P) MetadataSeq() uint64 {
|
func (*FakeP2P) MetadataSeq() uint64 {
|
||||||
return 0
|
return 0
|
||||||
@@ -174,7 +169,7 @@ func (*FakeP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interfac
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BroadcastDataColumnSidecar -- fake.
|
// BroadcastDataColumnSidecar -- fake.
|
||||||
func (*FakeP2P) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn, _ []blocks.PartialDataColumn) error {
|
func (*FakeP2P) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -63,7 +63,7 @@ func (m *MockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BroadcastDataColumnSidecar broadcasts a data column for mock.
|
// BroadcastDataColumnSidecar broadcasts a data column for mock.
|
||||||
func (m *MockBroadcaster) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn, []blocks.PartialDataColumn) error {
|
func (m *MockBroadcaster) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn) error {
|
||||||
m.BroadcastCalled.Store(true)
|
m.BroadcastCalled.Store(true)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,7 +13,6 @@ import (
|
|||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
|
||||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||||
@@ -243,7 +242,7 @@ func (p *TestP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interf
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BroadcastDataColumnSidecar broadcasts a data column for mock.
|
// BroadcastDataColumnSidecar broadcasts a data column for mock.
|
||||||
func (p *TestP2P) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn, []blocks.PartialDataColumn) error {
|
func (p *TestP2P) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn) error {
|
||||||
p.BroadcastCalled.Store(true)
|
p.BroadcastCalled.Store(true)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -309,10 +308,6 @@ func (p *TestP2P) PubSub() *pubsub.PubSub {
|
|||||||
return p.pubsub
|
return p.pubsub
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *TestP2P) PartialColumnBroadcaster() *partialdatacolumnbroadcaster.PartialColumnBroadcaster {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disconnect from a peer.
|
// Disconnect from a peer.
|
||||||
func (p *TestP2P) Disconnect(pid peer.ID) error {
|
func (p *TestP2P) Disconnect(pid peer.ID) error {
|
||||||
return p.BHost.Network().ClosePeer(pid)
|
return p.BHost.Network().ClosePeer(pid)
|
||||||
|
|||||||
@@ -26,8 +26,8 @@ import (
|
|||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits/mock"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits/mock"
|
||||||
p2pMock "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
p2pMock "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
|
||||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||||
|
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||||
|
|||||||
@@ -86,6 +86,7 @@ func TestGetSpec(t *testing.T) {
|
|||||||
config.GloasForkEpoch = 110
|
config.GloasForkEpoch = 110
|
||||||
config.BLSWithdrawalPrefixByte = byte('b')
|
config.BLSWithdrawalPrefixByte = byte('b')
|
||||||
config.ETH1AddressWithdrawalPrefixByte = byte('c')
|
config.ETH1AddressWithdrawalPrefixByte = byte('c')
|
||||||
|
config.BuilderWithdrawalPrefixByte = byte('e')
|
||||||
config.GenesisDelay = 24
|
config.GenesisDelay = 24
|
||||||
config.SecondsPerSlot = 25
|
config.SecondsPerSlot = 25
|
||||||
config.SlotDurationMilliseconds = 120
|
config.SlotDurationMilliseconds = 120
|
||||||
|
|||||||
@@ -48,7 +48,6 @@ go_test(
|
|||||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||||
"@org_golang_google_grpc//:go_default_library",
|
"@org_golang_google_grpc//:go_default_library",
|
||||||
"@org_golang_google_grpc//metadata:go_default_library",
|
|
||||||
"@org_golang_google_grpc//reflection:go_default_library",
|
"@org_golang_google_grpc//reflection:go_default_library",
|
||||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||||
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
|
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
|
||||||
|
|||||||
@@ -35,19 +35,18 @@ import (
|
|||||||
// providing RPC endpoints for verifying a beacon node's sync status, genesis and
|
// providing RPC endpoints for verifying a beacon node's sync status, genesis and
|
||||||
// version information, and services the node implements and runs.
|
// version information, and services the node implements and runs.
|
||||||
type Server struct {
|
type Server struct {
|
||||||
LogsStreamer logs.Streamer
|
LogsStreamer logs.Streamer
|
||||||
StreamLogsBufferSize int
|
StreamLogsBufferSize int
|
||||||
SyncChecker sync.Checker
|
SyncChecker sync.Checker
|
||||||
Server *grpc.Server
|
Server *grpc.Server
|
||||||
BeaconDB db.ReadOnlyDatabase
|
BeaconDB db.ReadOnlyDatabase
|
||||||
PeersFetcher p2p.PeersProvider
|
PeersFetcher p2p.PeersProvider
|
||||||
PeerManager p2p.PeerManager
|
PeerManager p2p.PeerManager
|
||||||
GenesisTimeFetcher blockchain.TimeFetcher
|
GenesisTimeFetcher blockchain.TimeFetcher
|
||||||
GenesisFetcher blockchain.GenesisFetcher
|
GenesisFetcher blockchain.GenesisFetcher
|
||||||
POWChainInfoFetcher execution.ChainInfoFetcher
|
POWChainInfoFetcher execution.ChainInfoFetcher
|
||||||
BeaconMonitoringHost string
|
BeaconMonitoringHost string
|
||||||
BeaconMonitoringPort int
|
BeaconMonitoringPort int
|
||||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
|
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
|
||||||
@@ -62,28 +61,21 @@ func (ns *Server) GetHealth(ctx context.Context, request *ethpb.HealthRequest) (
|
|||||||
ctx, cancel := context.WithTimeout(ctx, timeoutDuration)
|
ctx, cancel := context.WithTimeout(ctx, timeoutDuration)
|
||||||
defer cancel() // Important to avoid a context leak
|
defer cancel() // Important to avoid a context leak
|
||||||
|
|
||||||
// Check optimistic status - validators should not participate when optimistic
|
if ns.SyncChecker.Synced() {
|
||||||
isOptimistic, err := ns.OptimisticModeFetcher.IsOptimistic(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not check optimistic status: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if ns.SyncChecker.Synced() && !isOptimistic {
|
|
||||||
return &empty.Empty{}, nil
|
return &empty.Empty{}, nil
|
||||||
}
|
}
|
||||||
if ns.SyncChecker.Syncing() || ns.SyncChecker.Initialized() {
|
if ns.SyncChecker.Syncing() || ns.SyncChecker.Initialized() {
|
||||||
// Set header for REST API clients (via gRPC-gateway)
|
if request.SyncingStatus != 0 {
|
||||||
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", strconv.FormatUint(http.StatusPartialContent, 10))); err != nil {
|
// override the 200 success with the provided request status
|
||||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set status code header: %v", err)
|
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", strconv.FormatUint(request.SyncingStatus, 10))); err != nil {
|
||||||
|
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set custom success code header: %v", err)
|
||||||
|
}
|
||||||
|
return &empty.Empty{}, nil
|
||||||
}
|
}
|
||||||
return &empty.Empty{}, status.Error(codes.Unavailable, "node is syncing")
|
|
||||||
}
|
|
||||||
if isOptimistic {
|
|
||||||
// Set header for REST API clients (via gRPC-gateway)
|
|
||||||
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", strconv.FormatUint(http.StatusPartialContent, 10))); err != nil {
|
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", strconv.FormatUint(http.StatusPartialContent, 10))); err != nil {
|
||||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set status code header: %v", err)
|
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set custom success code header: %v", err)
|
||||||
}
|
}
|
||||||
return &empty.Empty{}, status.Error(codes.Unavailable, "node is optimistic")
|
return &empty.Empty{}, nil
|
||||||
}
|
}
|
||||||
return &empty.Empty{}, status.Errorf(codes.Unavailable, "service unavailable")
|
return &empty.Empty{}, status.Errorf(codes.Unavailable, "service unavailable")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package node
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"maps"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -22,7 +21,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/metadata"
|
|
||||||
"google.golang.org/grpc/reflection"
|
"google.golang.org/grpc/reflection"
|
||||||
"google.golang.org/protobuf/types/known/emptypb"
|
"google.golang.org/protobuf/types/known/emptypb"
|
||||||
"google.golang.org/protobuf/types/known/timestamppb"
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
@@ -189,71 +187,32 @@ func TestNodeServer_GetETH1ConnectionStatus(t *testing.T) {
|
|||||||
assert.Equal(t, errStr, res.CurrentConnectionError)
|
assert.Equal(t, errStr, res.CurrentConnectionError)
|
||||||
}
|
}
|
||||||
|
|
||||||
// mockServerTransportStream implements grpc.ServerTransportStream for testing
|
|
||||||
type mockServerTransportStream struct {
|
|
||||||
headers map[string][]string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockServerTransportStream) Method() string { return "" }
|
|
||||||
func (m *mockServerTransportStream) SetHeader(md metadata.MD) error {
|
|
||||||
maps.Copy(m.headers, md)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (m *mockServerTransportStream) SendHeader(metadata.MD) error { return nil }
|
|
||||||
func (m *mockServerTransportStream) SetTrailer(metadata.MD) error { return nil }
|
|
||||||
|
|
||||||
func TestNodeServer_GetHealth(t *testing.T) {
|
func TestNodeServer_GetHealth(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
input *mockSync.Sync
|
input *mockSync.Sync
|
||||||
isOptimistic bool
|
customStatus uint64
|
||||||
wantedErr string
|
wantedErr string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "happy path - synced and not optimistic",
|
name: "happy path",
|
||||||
input: &mockSync.Sync{IsSyncing: false, IsSynced: true},
|
input: &mockSync.Sync{IsSyncing: false, IsSynced: true},
|
||||||
isOptimistic: false,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "returns error when not synced and not syncing",
|
name: "syncing",
|
||||||
input: &mockSync.Sync{IsSyncing: false, IsSynced: false},
|
input: &mockSync.Sync{IsSyncing: false},
|
||||||
isOptimistic: false,
|
wantedErr: "service unavailable",
|
||||||
wantedErr: "service unavailable",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "returns error when syncing",
|
|
||||||
input: &mockSync.Sync{IsSyncing: true, IsSynced: false},
|
|
||||||
isOptimistic: false,
|
|
||||||
wantedErr: "node is syncing",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "returns error when synced but optimistic",
|
|
||||||
input: &mockSync.Sync{IsSyncing: false, IsSynced: true},
|
|
||||||
isOptimistic: true,
|
|
||||||
wantedErr: "node is optimistic",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "returns error when syncing and optimistic",
|
|
||||||
input: &mockSync.Sync{IsSyncing: true, IsSynced: false},
|
|
||||||
isOptimistic: true,
|
|
||||||
wantedErr: "node is syncing",
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
server := grpc.NewServer()
|
server := grpc.NewServer()
|
||||||
ns := &Server{
|
ns := &Server{
|
||||||
SyncChecker: tt.input,
|
SyncChecker: tt.input,
|
||||||
OptimisticModeFetcher: &mock.ChainService{Optimistic: tt.isOptimistic},
|
|
||||||
}
|
}
|
||||||
ethpb.RegisterNodeServer(server, ns)
|
ethpb.RegisterNodeServer(server, ns)
|
||||||
reflection.Register(server)
|
reflection.Register(server)
|
||||||
|
_, err := ns.GetHealth(t.Context(), ðpb.HealthRequest{SyncingStatus: tt.customStatus})
|
||||||
// Create context with mock transport stream so grpc.SetHeader works
|
|
||||||
stream := &mockServerTransportStream{headers: make(map[string][]string)}
|
|
||||||
ctx := grpc.NewContextWithServerTransportStream(t.Context(), stream)
|
|
||||||
|
|
||||||
_, err := ns.GetHealth(ctx, ðpb.HealthRequest{})
|
|
||||||
if tt.wantedErr == "" {
|
if tt.wantedErr == "" {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/OffchainLabs/go-bitfield"
|
|
||||||
builderapi "github.com/OffchainLabs/prysm/v7/api/client/builder"
|
builderapi "github.com/OffchainLabs/prysm/v7/api/client/builder"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/builder"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/builder"
|
||||||
@@ -309,7 +308,6 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
|||||||
}
|
}
|
||||||
|
|
||||||
rob, err := blocks.NewROBlockWithRoot(block, root)
|
rob, err := blocks.NewROBlockWithRoot(block, root)
|
||||||
var partialColumns []blocks.PartialDataColumn
|
|
||||||
if block.IsBlinded() {
|
if block.IsBlinded() {
|
||||||
block, blobSidecars, err = vs.handleBlindedBlock(ctx, block)
|
block, blobSidecars, err = vs.handleBlindedBlock(ctx, block)
|
||||||
if errors.Is(err, builderapi.ErrBadGateway) {
|
if errors.Is(err, builderapi.ErrBadGateway) {
|
||||||
@@ -317,7 +315,7 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
|||||||
return ðpb.ProposeResponse{BlockRoot: root[:]}, nil
|
return ðpb.ProposeResponse{BlockRoot: root[:]}, nil
|
||||||
}
|
}
|
||||||
} else if block.Version() >= version.Deneb {
|
} else if block.Version() >= version.Deneb {
|
||||||
blobSidecars, dataColumnSidecars, partialColumns, err = vs.handleUnblindedBlock(rob, req)
|
blobSidecars, dataColumnSidecars, err = vs.handleUnblindedBlock(rob, req)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
|
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
|
||||||
@@ -337,7 +335,7 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
|||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
if err := vs.broadcastAndReceiveSidecars(ctx, block, root, blobSidecars, dataColumnSidecars, partialColumns); err != nil {
|
if err := vs.broadcastAndReceiveSidecars(ctx, block, root, blobSidecars, dataColumnSidecars); err != nil {
|
||||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive sidecars: %v", err)
|
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive sidecars: %v", err)
|
||||||
}
|
}
|
||||||
if err := <-errChan; err != nil {
|
if err := <-errChan; err != nil {
|
||||||
@@ -354,10 +352,9 @@ func (vs *Server) broadcastAndReceiveSidecars(
|
|||||||
root [fieldparams.RootLength]byte,
|
root [fieldparams.RootLength]byte,
|
||||||
blobSidecars []*ethpb.BlobSidecar,
|
blobSidecars []*ethpb.BlobSidecar,
|
||||||
dataColumnSidecars []blocks.RODataColumn,
|
dataColumnSidecars []blocks.RODataColumn,
|
||||||
partialColumns []blocks.PartialDataColumn,
|
|
||||||
) error {
|
) error {
|
||||||
if block.Version() >= version.Fulu {
|
if block.Version() >= version.Fulu {
|
||||||
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSidecars, partialColumns); err != nil {
|
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSidecars); err != nil {
|
||||||
return errors.Wrap(err, "broadcast and receive data columns")
|
return errors.Wrap(err, "broadcast and receive data columns")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -406,41 +403,34 @@ func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.Signe
|
|||||||
func (vs *Server) handleUnblindedBlock(
|
func (vs *Server) handleUnblindedBlock(
|
||||||
block blocks.ROBlock,
|
block blocks.ROBlock,
|
||||||
req *ethpb.GenericSignedBeaconBlock,
|
req *ethpb.GenericSignedBeaconBlock,
|
||||||
) ([]*ethpb.BlobSidecar, []blocks.RODataColumn, []blocks.PartialDataColumn, error) {
|
) ([]*ethpb.BlobSidecar, []blocks.RODataColumn, error) {
|
||||||
rawBlobs, proofs, err := blobsAndProofs(req)
|
rawBlobs, proofs, err := blobsAndProofs(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if block.Version() >= version.Fulu {
|
if block.Version() >= version.Fulu {
|
||||||
// Compute cells and proofs from the blobs and cell proofs.
|
// Compute cells and proofs from the blobs and cell proofs.
|
||||||
cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromFlat(rawBlobs, proofs)
|
cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromFlat(rawBlobs, proofs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, errors.Wrap(err, "compute cells and proofs")
|
return nil, nil, errors.Wrap(err, "compute cells and proofs")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Construct data column sidecars from the signed block and cells and proofs.
|
// Construct data column sidecars from the signed block and cells and proofs.
|
||||||
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(block))
|
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(block))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, errors.Wrap(err, "data column sidcars")
|
return nil, nil, errors.Wrap(err, "data column sidcars")
|
||||||
}
|
}
|
||||||
|
|
||||||
included := bitfield.NewBitlist(uint64(len(cellsPerBlob)))
|
return nil, roDataColumnSidecars, nil
|
||||||
included = included.Not() // all bits set to 1
|
|
||||||
partialColumns, err := peerdas.PartialColumns(included, cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(block))
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, errors.Wrap(err, "data column sidcars")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, roDataColumnSidecars, partialColumns, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
blobSidecars, err := BuildBlobSidecars(block, rawBlobs, proofs)
|
blobSidecars, err := BuildBlobSidecars(block, rawBlobs, proofs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, errors.Wrap(err, "build blob sidecars")
|
return nil, nil, errors.Wrap(err, "build blob sidecars")
|
||||||
}
|
}
|
||||||
|
|
||||||
return blobSidecars, nil, nil, nil
|
return blobSidecars, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// broadcastReceiveBlock broadcasts a block and handles its reception.
|
// broadcastReceiveBlock broadcasts a block and handles its reception.
|
||||||
@@ -507,7 +497,7 @@ func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethp
|
|||||||
}
|
}
|
||||||
|
|
||||||
// broadcastAndReceiveDataColumns handles the broadcasting and reception of data columns sidecars.
|
// broadcastAndReceiveDataColumns handles the broadcasting and reception of data columns sidecars.
|
||||||
func (vs *Server) broadcastAndReceiveDataColumns(ctx context.Context, roSidecars []blocks.RODataColumn, partialColumns []blocks.PartialDataColumn) error {
|
func (vs *Server) broadcastAndReceiveDataColumns(ctx context.Context, roSidecars []blocks.RODataColumn) error {
|
||||||
// We built this block ourselves, so we can upgrade the read only data column sidecar into a verified one.
|
// We built this block ourselves, so we can upgrade the read only data column sidecar into a verified one.
|
||||||
verifiedSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roSidecars))
|
verifiedSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roSidecars))
|
||||||
for _, sidecar := range roSidecars {
|
for _, sidecar := range roSidecars {
|
||||||
@@ -516,7 +506,7 @@ func (vs *Server) broadcastAndReceiveDataColumns(ctx context.Context, roSidecars
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Broadcast sidecars (non blocking).
|
// Broadcast sidecars (non blocking).
|
||||||
if err := vs.P2P.BroadcastDataColumnSidecars(ctx, verifiedSidecars, partialColumns); err != nil {
|
if err := vs.P2P.BroadcastDataColumnSidecars(ctx, verifiedSidecars); err != nil {
|
||||||
return errors.Wrap(err, "broadcast data column sidecars")
|
return errors.Wrap(err, "broadcast data column sidecars")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -259,19 +259,18 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
|||||||
}
|
}
|
||||||
s.validatorServer = validatorServer
|
s.validatorServer = validatorServer
|
||||||
nodeServer := &nodev1alpha1.Server{
|
nodeServer := &nodev1alpha1.Server{
|
||||||
LogsStreamer: logs.NewStreamServer(),
|
LogsStreamer: logs.NewStreamServer(),
|
||||||
StreamLogsBufferSize: 1000, // Enough to handle bursts of beacon node logs for gRPC streaming.
|
StreamLogsBufferSize: 1000, // Enough to handle bursts of beacon node logs for gRPC streaming.
|
||||||
BeaconDB: s.cfg.BeaconDB,
|
BeaconDB: s.cfg.BeaconDB,
|
||||||
Server: s.grpcServer,
|
Server: s.grpcServer,
|
||||||
SyncChecker: s.cfg.SyncService,
|
SyncChecker: s.cfg.SyncService,
|
||||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||||
PeersFetcher: s.cfg.PeersFetcher,
|
PeersFetcher: s.cfg.PeersFetcher,
|
||||||
PeerManager: s.cfg.PeerManager,
|
PeerManager: s.cfg.PeerManager,
|
||||||
GenesisFetcher: s.cfg.GenesisFetcher,
|
GenesisFetcher: s.cfg.GenesisFetcher,
|
||||||
POWChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
|
POWChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
|
||||||
BeaconMonitoringHost: s.cfg.BeaconMonitoringHost,
|
BeaconMonitoringHost: s.cfg.BeaconMonitoringHost,
|
||||||
BeaconMonitoringPort: s.cfg.BeaconMonitoringPort,
|
BeaconMonitoringPort: s.cfg.BeaconMonitoringPort,
|
||||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
|
||||||
}
|
}
|
||||||
beaconChainServer := &beaconv1alpha1.Server{
|
beaconChainServer := &beaconv1alpha1.Server{
|
||||||
Ctx: s.ctx,
|
Ctx: s.ctx,
|
||||||
|
|||||||
@@ -1,24 +1,51 @@
|
|||||||
package state
|
package state
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||||
|
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||||
)
|
)
|
||||||
|
|
||||||
type writeOnlyGloasFields interface {
|
type writeOnlyGloasFields interface {
|
||||||
|
// Bids.
|
||||||
SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid) error
|
SetExecutionPayloadBid(h interfaces.ROExecutionPayloadBid) error
|
||||||
|
|
||||||
|
// Builder pending payments / withdrawals.
|
||||||
SetBuilderPendingPayment(index primitives.Slot, payment *ethpb.BuilderPendingPayment) error
|
SetBuilderPendingPayment(index primitives.Slot, payment *ethpb.BuilderPendingPayment) error
|
||||||
ClearBuilderPendingPayment(index primitives.Slot) error
|
ClearBuilderPendingPayment(index primitives.Slot) error
|
||||||
|
QueueBuilderPayment() error
|
||||||
RotateBuilderPendingPayments() error
|
RotateBuilderPendingPayments() error
|
||||||
AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal) error
|
AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal) error
|
||||||
|
|
||||||
|
// Execution payload availability.
|
||||||
UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val byte) error
|
UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val byte) error
|
||||||
|
|
||||||
|
// Misc.
|
||||||
|
SetLatestBlockHash(hash [32]byte) error
|
||||||
|
SetExecutionPayloadAvailability(index primitives.Slot, available bool) error
|
||||||
|
|
||||||
|
// Builders.
|
||||||
|
IncreaseBuilderBalance(index primitives.BuilderIndex, amount uint64) error
|
||||||
|
AddBuilderFromDeposit(pubkey [fieldparams.BLSPubkeyLength]byte, withdrawalCredentials [fieldparams.RootLength]byte, amount uint64) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type readOnlyGloasFields interface {
|
type readOnlyGloasFields interface {
|
||||||
|
// Bids.
|
||||||
|
LatestExecutionPayloadBid() (interfaces.ROExecutionPayloadBid, error)
|
||||||
|
|
||||||
|
// Builder pending payments / withdrawals.
|
||||||
|
BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error)
|
||||||
|
WithdrawalsMatchPayloadExpected(withdrawals []*enginev1.Withdrawal) (bool, error)
|
||||||
|
|
||||||
|
// Misc.
|
||||||
|
LatestBlockHash() ([32]byte, error)
|
||||||
|
|
||||||
|
// Builders.
|
||||||
|
Builder(index primitives.BuilderIndex) (*ethpb.Builder, error)
|
||||||
BuilderPubkey(primitives.BuilderIndex) ([48]byte, error)
|
BuilderPubkey(primitives.BuilderIndex) ([48]byte, error)
|
||||||
|
BuilderIndexByPubkey(pubkey [fieldparams.BLSPubkeyLength]byte) (primitives.BuilderIndex, bool)
|
||||||
IsActiveBuilder(primitives.BuilderIndex) (bool, error)
|
IsActiveBuilder(primitives.BuilderIndex) (bool, error)
|
||||||
CanBuilderCoverBid(primitives.BuilderIndex, primitives.Gwei) (bool, error)
|
CanBuilderCoverBid(primitives.BuilderIndex, primitives.Gwei) (bool, error)
|
||||||
LatestBlockHash() ([32]byte, error)
|
|
||||||
BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,11 +1,16 @@
|
|||||||
package state_native
|
package state_native
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/encoding/ssz"
|
||||||
|
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||||
)
|
)
|
||||||
@@ -147,3 +152,78 @@ func (b *BeaconState) BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment,
|
|||||||
|
|
||||||
return b.builderPendingPaymentsVal(), nil
|
return b.builderPendingPaymentsVal(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LatestExecutionPayloadBid returns the cached latest execution payload bid for Gloas.
|
||||||
|
func (b *BeaconState) LatestExecutionPayloadBid() (interfaces.ROExecutionPayloadBid, error) {
|
||||||
|
b.lock.RLock()
|
||||||
|
defer b.lock.RUnlock()
|
||||||
|
|
||||||
|
if b.latestExecutionPayloadBid == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return blocks.WrappedROExecutionPayloadBid(b.latestExecutionPayloadBid.Copy())
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithdrawalsMatchPayloadExpected returns true if the given withdrawals root matches the state's
|
||||||
|
// payload_expected_withdrawals root.
|
||||||
|
func (b *BeaconState) WithdrawalsMatchPayloadExpected(withdrawals []*enginev1.Withdrawal) (bool, error) {
|
||||||
|
if b.version < version.Gloas {
|
||||||
|
return false, errNotSupported("WithdrawalsMatchPayloadExpected", b.version)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.lock.RLock()
|
||||||
|
defer b.lock.RUnlock()
|
||||||
|
|
||||||
|
cfg := params.BeaconConfig()
|
||||||
|
|
||||||
|
withdrawalsRoot, err := ssz.WithdrawalSliceRoot(withdrawals, cfg.MaxWithdrawalsPerPayload)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("could not compute withdrawals root: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := b.payloadExpectedWithdrawals
|
||||||
|
if expected == nil {
|
||||||
|
expected = []*enginev1.Withdrawal{}
|
||||||
|
}
|
||||||
|
expectedRoot, err := ssz.WithdrawalSliceRoot(expected, cfg.MaxWithdrawalsPerPayload)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("could not compute expected withdrawals root: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return withdrawalsRoot == expectedRoot, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Builder returns the builder at the given index.
|
||||||
|
func (b *BeaconState) Builder(index primitives.BuilderIndex) (*ethpb.Builder, error) {
|
||||||
|
b.lock.RLock()
|
||||||
|
defer b.lock.RUnlock()
|
||||||
|
|
||||||
|
if b.builders == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if uint64(index) >= uint64(len(b.builders)) {
|
||||||
|
return nil, fmt.Errorf("builder index %d out of bounds", index)
|
||||||
|
}
|
||||||
|
if b.builders[index] == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return ethpb.CopyBuilder(b.builders[index]), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuilderIndexByPubkey returns the builder index for the given pubkey, if present.
|
||||||
|
func (b *BeaconState) BuilderIndexByPubkey(pubkey [fieldparams.BLSPubkeyLength]byte) (primitives.BuilderIndex, bool) {
|
||||||
|
b.lock.RLock()
|
||||||
|
defer b.lock.RUnlock()
|
||||||
|
|
||||||
|
for i, builder := range b.builders {
|
||||||
|
if builder == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if bytes.Equal(builder.Pubkey, pubkey[:]) {
|
||||||
|
return primitives.BuilderIndex(i), true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|||||||
@@ -5,8 +5,10 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||||
|
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||||
|
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||||
@@ -166,3 +168,132 @@ func TestBuilderPendingPayments_UnsupportedVersion(t *testing.T) {
|
|||||||
_, err = st.BuilderPendingPayments()
|
_, err = st.BuilderPendingPayments()
|
||||||
require.ErrorContains(t, "BuilderPendingPayments", err)
|
require.ErrorContains(t, "BuilderPendingPayments", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestWithdrawalsMatchPayloadExpected(t *testing.T) {
|
||||||
|
t.Run("returns error before gloas", func(t *testing.T) {
|
||||||
|
stIface, _ := util.DeterministicGenesisState(t, 1)
|
||||||
|
native, ok := stIface.(*state_native.BeaconState)
|
||||||
|
require.Equal(t, true, ok)
|
||||||
|
|
||||||
|
_, err := native.WithdrawalsMatchPayloadExpected(nil)
|
||||||
|
require.ErrorContains(t, "is not supported", err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns true when roots match", func(t *testing.T) {
|
||||||
|
withdrawals := []*enginev1.Withdrawal{
|
||||||
|
{Index: 0, ValidatorIndex: 1, Address: bytes.Repeat([]byte{0x01}, 20), Amount: 10},
|
||||||
|
}
|
||||||
|
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||||
|
PayloadExpectedWithdrawals: withdrawals,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ok, err := st.WithdrawalsMatchPayloadExpected(withdrawals)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, true, ok)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns false when roots do not match", func(t *testing.T) {
|
||||||
|
expected := []*enginev1.Withdrawal{
|
||||||
|
{Index: 0, ValidatorIndex: 1, Address: bytes.Repeat([]byte{0x01}, 20), Amount: 10},
|
||||||
|
}
|
||||||
|
actual := []*enginev1.Withdrawal{
|
||||||
|
{Index: 0, ValidatorIndex: 1, Address: bytes.Repeat([]byte{0x01}, 20), Amount: 11},
|
||||||
|
}
|
||||||
|
|
||||||
|
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||||
|
PayloadExpectedWithdrawals: expected,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ok, err := st.WithdrawalsMatchPayloadExpected(actual)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, false, ok)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilder(t *testing.T) {
|
||||||
|
t.Run("nil builders returns nil", func(t *testing.T) {
|
||||||
|
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||||
|
Builders: nil,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
got, err := st.Builder(0)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, (*ethpb.Builder)(nil), got)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("out of bounds returns error", func(t *testing.T) {
|
||||||
|
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||||
|
Builders: []*ethpb.Builder{{}},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = st.Builder(1)
|
||||||
|
require.ErrorContains(t, "out of bounds", err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns copy", func(t *testing.T) {
|
||||||
|
pubkey := bytes.Repeat([]byte{0xAA}, fieldparams.BLSPubkeyLength)
|
||||||
|
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||||
|
Builders: []*ethpb.Builder{
|
||||||
|
{
|
||||||
|
Pubkey: pubkey,
|
||||||
|
Balance: 42,
|
||||||
|
DepositEpoch: 3,
|
||||||
|
WithdrawableEpoch: 4,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
got1, err := st.Builder(0)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotEqual(t, (*ethpb.Builder)(nil), got1)
|
||||||
|
require.Equal(t, primitives.Gwei(42), got1.Balance)
|
||||||
|
require.DeepEqual(t, pubkey, got1.Pubkey)
|
||||||
|
|
||||||
|
// Mutate returned builder; state should be unchanged.
|
||||||
|
got1.Pubkey[0] = 0xFF
|
||||||
|
got2, err := st.Builder(0)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, byte(0xAA), got2.Pubkey[0])
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilderIndexByPubkey(t *testing.T) {
|
||||||
|
t.Run("not found returns false", func(t *testing.T) {
|
||||||
|
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||||
|
Builders: []*ethpb.Builder{
|
||||||
|
{Pubkey: bytes.Repeat([]byte{0x11}, fieldparams.BLSPubkeyLength)},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var pk [fieldparams.BLSPubkeyLength]byte
|
||||||
|
copy(pk[:], bytes.Repeat([]byte{0x22}, fieldparams.BLSPubkeyLength))
|
||||||
|
idx, ok := st.BuilderIndexByPubkey(pk)
|
||||||
|
require.Equal(t, false, ok)
|
||||||
|
require.Equal(t, primitives.BuilderIndex(0), idx)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("skips nil entries and finds match", func(t *testing.T) {
|
||||||
|
wantIdx := primitives.BuilderIndex(1)
|
||||||
|
wantPkBytes := bytes.Repeat([]byte{0xAB}, fieldparams.BLSPubkeyLength)
|
||||||
|
|
||||||
|
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||||
|
Builders: []*ethpb.Builder{
|
||||||
|
nil,
|
||||||
|
{Pubkey: wantPkBytes},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var pk [fieldparams.BLSPubkeyLength]byte
|
||||||
|
copy(pk[:], wantPkBytes)
|
||||||
|
idx, ok := st.BuilderIndexByPubkey(pk)
|
||||||
|
require.Equal(t, true, ok)
|
||||||
|
require.Equal(t, wantIdx, idx)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
@@ -5,11 +5,14 @@ import (
|
|||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native/types"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
|
||||||
|
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RotateBuilderPendingPayments rotates the queue by dropping slots per epoch payments from the
|
// RotateBuilderPendingPayments rotates the queue by dropping slots per epoch payments from the
|
||||||
@@ -121,6 +124,41 @@ func (b *BeaconState) ClearBuilderPendingPayment(index primitives.Slot) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// QueueBuilderPayment implements the builder payment queuing logic for Gloas.
|
||||||
|
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||||
|
// payment = state.builder_pending_payments[SLOTS_PER_EPOCH + state.slot % SLOTS_PER_EPOCH]
|
||||||
|
// amount = payment.withdrawal.amount
|
||||||
|
// if amount > 0:
|
||||||
|
//
|
||||||
|
// state.builder_pending_withdrawals.append(payment.withdrawal)
|
||||||
|
//
|
||||||
|
// state.builder_pending_payments[SLOTS_PER_EPOCH + state.slot % SLOTS_PER_EPOCH] = BuilderPendingPayment()
|
||||||
|
func (b *BeaconState) QueueBuilderPayment() error {
|
||||||
|
if b.version < version.Gloas {
|
||||||
|
return errNotSupported("QueueBuilderPayment", b.version)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.lock.Lock()
|
||||||
|
defer b.lock.Unlock()
|
||||||
|
|
||||||
|
slot := b.slot
|
||||||
|
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||||
|
paymentIndex := slotsPerEpoch + (slot % slotsPerEpoch)
|
||||||
|
if uint64(paymentIndex) >= uint64(len(b.builderPendingPayments)) {
|
||||||
|
return fmt.Errorf("builder pending payments index %d out of range (len=%d)", paymentIndex, len(b.builderPendingPayments))
|
||||||
|
}
|
||||||
|
|
||||||
|
payment := b.builderPendingPayments[paymentIndex]
|
||||||
|
if payment != nil && payment.Withdrawal != nil && payment.Withdrawal.Amount > 0 {
|
||||||
|
b.builderPendingWithdrawals = append(b.builderPendingWithdrawals, ethpb.CopyBuilderPendingWithdrawal(payment.Withdrawal))
|
||||||
|
b.markFieldAsDirty(types.BuilderPendingWithdrawals)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.builderPendingPayments[paymentIndex] = emptyBuilderPendingPayment
|
||||||
|
b.markFieldAsDirty(types.BuilderPendingPayments)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// SetBuilderPendingPayment sets a builder pending payment at the specified index.
|
// SetBuilderPendingPayment sets a builder pending payment at the specified index.
|
||||||
func (b *BeaconState) SetBuilderPendingPayment(index primitives.Slot, payment *ethpb.BuilderPendingPayment) error {
|
func (b *BeaconState) SetBuilderPendingPayment(index primitives.Slot, payment *ethpb.BuilderPendingPayment) error {
|
||||||
if b.version < version.Gloas {
|
if b.version < version.Gloas {
|
||||||
@@ -161,3 +199,91 @@ func (b *BeaconState) UpdateExecutionPayloadAvailabilityAtIndex(idx uint64, val
|
|||||||
b.markFieldAsDirty(types.ExecutionPayloadAvailability)
|
b.markFieldAsDirty(types.ExecutionPayloadAvailability)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetLatestBlockHash sets the latest execution block hash.
|
||||||
|
func (b *BeaconState) SetLatestBlockHash(hash [32]byte) error {
|
||||||
|
b.lock.Lock()
|
||||||
|
defer b.lock.Unlock()
|
||||||
|
|
||||||
|
b.latestBlockHash = hash[:]
|
||||||
|
b.markFieldAsDirty(types.LatestBlockHash)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetExecutionPayloadAvailability sets the execution payload availability bit for a specific slot.
|
||||||
|
func (b *BeaconState) SetExecutionPayloadAvailability(index primitives.Slot, available bool) error {
|
||||||
|
b.lock.Lock()
|
||||||
|
defer b.lock.Unlock()
|
||||||
|
|
||||||
|
bitIndex := index % params.BeaconConfig().SlotsPerHistoricalRoot
|
||||||
|
byteIndex := bitIndex / 8
|
||||||
|
bitPosition := bitIndex % 8
|
||||||
|
|
||||||
|
// Set or clear the bit
|
||||||
|
if available {
|
||||||
|
b.executionPayloadAvailability[byteIndex] |= 1 << bitPosition
|
||||||
|
} else {
|
||||||
|
b.executionPayloadAvailability[byteIndex] &^= 1 << bitPosition
|
||||||
|
}
|
||||||
|
|
||||||
|
b.markFieldAsDirty(types.ExecutionPayloadAvailability)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IncreaseBuilderBalance increases the balance of the builder at the given index.
|
||||||
|
func (b *BeaconState) IncreaseBuilderBalance(index primitives.BuilderIndex, amount uint64) error {
|
||||||
|
b.lock.Lock()
|
||||||
|
defer b.lock.Unlock()
|
||||||
|
|
||||||
|
if b.builders == nil || uint64(index) >= uint64(len(b.builders)) {
|
||||||
|
return fmt.Errorf("builder index %d out of bounds", index)
|
||||||
|
}
|
||||||
|
if b.builders[index] == nil {
|
||||||
|
return fmt.Errorf("builder at index %d is nil", index)
|
||||||
|
}
|
||||||
|
|
||||||
|
builder := ethpb.CopyBuilder(b.builders[index])
|
||||||
|
builder.Balance += primitives.Gwei(amount)
|
||||||
|
b.builders[index] = builder
|
||||||
|
|
||||||
|
b.markFieldAsDirty(types.Builders)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddBuilderFromDeposit creates or replaces a builder entry derived from a deposit.
|
||||||
|
func (b *BeaconState) AddBuilderFromDeposit(pubkey [fieldparams.BLSPubkeyLength]byte, withdrawalCredentials [fieldparams.RootLength]byte, amount uint64) error {
|
||||||
|
b.lock.Lock()
|
||||||
|
defer b.lock.Unlock()
|
||||||
|
|
||||||
|
currentEpoch := slots.ToEpoch(b.slot)
|
||||||
|
index := b.builderInsertionIndex(currentEpoch)
|
||||||
|
|
||||||
|
builder := ðpb.Builder{
|
||||||
|
Pubkey: bytesutil.SafeCopyBytes(pubkey[:]),
|
||||||
|
Version: []byte{withdrawalCredentials[0]},
|
||||||
|
ExecutionAddress: bytesutil.SafeCopyBytes(withdrawalCredentials[12:]),
|
||||||
|
Balance: primitives.Gwei(amount),
|
||||||
|
DepositEpoch: currentEpoch,
|
||||||
|
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||||
|
}
|
||||||
|
|
||||||
|
if index < primitives.BuilderIndex(len(b.builders)) {
|
||||||
|
b.builders[index] = builder
|
||||||
|
} else {
|
||||||
|
gap := index - primitives.BuilderIndex(len(b.builders)) + 1
|
||||||
|
b.builders = append(b.builders, make([]*ethpb.Builder, gap)...)
|
||||||
|
b.builders[index] = builder
|
||||||
|
}
|
||||||
|
|
||||||
|
b.markFieldAsDirty(types.Builders)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BeaconState) builderInsertionIndex(currentEpoch primitives.Epoch) primitives.BuilderIndex {
|
||||||
|
for i, builder := range b.builders {
|
||||||
|
if builder.WithdrawableEpoch <= currentEpoch && builder.Balance == 0 {
|
||||||
|
return primitives.BuilderIndex(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return primitives.BuilderIndex(len(b.builders))
|
||||||
|
}
|
||||||
|
|||||||
@@ -181,6 +181,80 @@ func TestClearBuilderPendingPayment(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestQueueBuilderPayment(t *testing.T) {
|
||||||
|
t.Run("previous fork returns expected error", func(t *testing.T) {
|
||||||
|
st := &BeaconState{version: version.Fulu}
|
||||||
|
err := st.QueueBuilderPayment()
|
||||||
|
require.ErrorContains(t, "is not supported", err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("appends withdrawal, clears payment, and marks dirty", func(t *testing.T) {
|
||||||
|
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||||
|
slot := primitives.Slot(3)
|
||||||
|
paymentIndex := slotsPerEpoch + (slot % slotsPerEpoch)
|
||||||
|
|
||||||
|
st := &BeaconState{
|
||||||
|
version: version.Gloas,
|
||||||
|
slot: slot,
|
||||||
|
dirtyFields: make(map[types.FieldIndex]bool),
|
||||||
|
rebuildTrie: make(map[types.FieldIndex]bool),
|
||||||
|
sharedFieldReferences: make(map[types.FieldIndex]*stateutil.Reference),
|
||||||
|
builderPendingPayments: make([]*ethpb.BuilderPendingPayment, slotsPerEpoch*2),
|
||||||
|
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{},
|
||||||
|
}
|
||||||
|
st.builderPendingPayments[paymentIndex] = ðpb.BuilderPendingPayment{
|
||||||
|
Weight: 1,
|
||||||
|
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||||
|
FeeRecipient: bytes.Repeat([]byte{0xAB}, 20),
|
||||||
|
Amount: 99,
|
||||||
|
BuilderIndex: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, st.QueueBuilderPayment())
|
||||||
|
require.Equal(t, emptyBuilderPendingPayment, st.builderPendingPayments[paymentIndex])
|
||||||
|
require.Equal(t, true, st.dirtyFields[types.BuilderPendingPayments])
|
||||||
|
require.Equal(t, true, st.dirtyFields[types.BuilderPendingWithdrawals])
|
||||||
|
require.Equal(t, 1, len(st.builderPendingWithdrawals))
|
||||||
|
require.DeepEqual(t, bytes.Repeat([]byte{0xAB}, 20), st.builderPendingWithdrawals[0].FeeRecipient)
|
||||||
|
require.Equal(t, primitives.Gwei(99), st.builderPendingWithdrawals[0].Amount)
|
||||||
|
|
||||||
|
// Ensure copied withdrawal is not aliased.
|
||||||
|
st.builderPendingPayments[paymentIndex].Withdrawal.FeeRecipient[0] = 0x01
|
||||||
|
require.Equal(t, byte(0xAB), st.builderPendingWithdrawals[0].FeeRecipient[0])
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("zero amount does not append withdrawal", func(t *testing.T) {
|
||||||
|
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||||
|
slot := primitives.Slot(3)
|
||||||
|
paymentIndex := slotsPerEpoch + (slot % slotsPerEpoch)
|
||||||
|
|
||||||
|
st := &BeaconState{
|
||||||
|
version: version.Gloas,
|
||||||
|
slot: slot,
|
||||||
|
dirtyFields: make(map[types.FieldIndex]bool),
|
||||||
|
rebuildTrie: make(map[types.FieldIndex]bool),
|
||||||
|
sharedFieldReferences: make(map[types.FieldIndex]*stateutil.Reference),
|
||||||
|
builderPendingPayments: make([]*ethpb.BuilderPendingPayment, slotsPerEpoch*2),
|
||||||
|
builderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{},
|
||||||
|
}
|
||||||
|
st.builderPendingPayments[paymentIndex] = ðpb.BuilderPendingPayment{
|
||||||
|
Weight: 1,
|
||||||
|
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||||
|
FeeRecipient: bytes.Repeat([]byte{0xAB}, 20),
|
||||||
|
Amount: 0,
|
||||||
|
BuilderIndex: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, st.QueueBuilderPayment())
|
||||||
|
require.Equal(t, emptyBuilderPendingPayment, st.builderPendingPayments[paymentIndex])
|
||||||
|
require.Equal(t, true, st.dirtyFields[types.BuilderPendingPayments])
|
||||||
|
require.Equal(t, false, st.dirtyFields[types.BuilderPendingWithdrawals])
|
||||||
|
require.Equal(t, 0, len(st.builderPendingWithdrawals))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestRotateBuilderPendingPayments(t *testing.T) {
|
func TestRotateBuilderPendingPayments(t *testing.T) {
|
||||||
totalPayments := 2 * params.BeaconConfig().SlotsPerEpoch
|
totalPayments := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||||
payments := make([]*ethpb.BuilderPendingPayment, totalPayments)
|
payments := make([]*ethpb.BuilderPendingPayment, totalPayments)
|
||||||
@@ -328,3 +402,134 @@ func newGloasStateWithAvailability(t *testing.T, availability []byte) *BeaconSta
|
|||||||
|
|
||||||
return st.(*BeaconState)
|
return st.(*BeaconState)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSetLatestBlockHash(t *testing.T) {
|
||||||
|
var hash [32]byte
|
||||||
|
copy(hash[:], []byte("latest-block-hash"))
|
||||||
|
|
||||||
|
state := &BeaconState{
|
||||||
|
dirtyFields: make(map[types.FieldIndex]bool),
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, state.SetLatestBlockHash(hash))
|
||||||
|
require.Equal(t, true, state.dirtyFields[types.LatestBlockHash])
|
||||||
|
require.DeepEqual(t, hash[:], state.latestBlockHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSetExecutionPayloadAvailability(t *testing.T) {
|
||||||
|
state := &BeaconState{
|
||||||
|
executionPayloadAvailability: make([]byte, params.BeaconConfig().SlotsPerHistoricalRoot/8),
|
||||||
|
dirtyFields: make(map[types.FieldIndex]bool),
|
||||||
|
}
|
||||||
|
|
||||||
|
slot := primitives.Slot(10)
|
||||||
|
bitIndex := slot % params.BeaconConfig().SlotsPerHistoricalRoot
|
||||||
|
byteIndex := bitIndex / 8
|
||||||
|
bitPosition := bitIndex % 8
|
||||||
|
|
||||||
|
require.NoError(t, state.SetExecutionPayloadAvailability(slot, true))
|
||||||
|
require.Equal(t, true, state.dirtyFields[types.ExecutionPayloadAvailability])
|
||||||
|
require.Equal(t, byte(1<<bitPosition), state.executionPayloadAvailability[byteIndex]&(1<<bitPosition))
|
||||||
|
|
||||||
|
require.NoError(t, state.SetExecutionPayloadAvailability(slot, false))
|
||||||
|
require.Equal(t, byte(0), state.executionPayloadAvailability[byteIndex]&(1<<bitPosition))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIncreaseBuilderBalance(t *testing.T) {
|
||||||
|
t.Run("out of bounds returns error", func(t *testing.T) {
|
||||||
|
st := &BeaconState{
|
||||||
|
version: version.Gloas,
|
||||||
|
dirtyFields: make(map[types.FieldIndex]bool),
|
||||||
|
builders: []*ethpb.Builder{},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := st.IncreaseBuilderBalance(0, 1)
|
||||||
|
require.ErrorContains(t, "out of bounds", err)
|
||||||
|
require.Equal(t, false, st.dirtyFields[types.Builders])
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("nil builder returns error", func(t *testing.T) {
|
||||||
|
st := &BeaconState{
|
||||||
|
version: version.Gloas,
|
||||||
|
dirtyFields: make(map[types.FieldIndex]bool),
|
||||||
|
builders: []*ethpb.Builder{nil},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := st.IncreaseBuilderBalance(0, 1)
|
||||||
|
require.ErrorContains(t, "is nil", err)
|
||||||
|
require.Equal(t, false, st.dirtyFields[types.Builders])
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("increments and marks dirty", func(t *testing.T) {
|
||||||
|
orig := ðpb.Builder{Balance: 10}
|
||||||
|
st := &BeaconState{
|
||||||
|
version: version.Gloas,
|
||||||
|
dirtyFields: make(map[types.FieldIndex]bool),
|
||||||
|
builders: []*ethpb.Builder{orig},
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, st.IncreaseBuilderBalance(0, 5))
|
||||||
|
require.Equal(t, primitives.Gwei(15), st.builders[0].Balance)
|
||||||
|
require.Equal(t, true, st.dirtyFields[types.Builders])
|
||||||
|
// Copy-on-write semantics: builder pointer replaced.
|
||||||
|
require.NotEqual(t, orig, st.builders[0])
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddBuilderFromDeposit(t *testing.T) {
|
||||||
|
t.Run("reuses empty withdrawable slot", func(t *testing.T) {
|
||||||
|
var pubkey [48]byte
|
||||||
|
copy(pubkey[:], bytes.Repeat([]byte{0xAA}, 48))
|
||||||
|
var wc [32]byte
|
||||||
|
copy(wc[:], bytes.Repeat([]byte{0xBB}, 32))
|
||||||
|
wc[0] = 0x42 // version byte
|
||||||
|
|
||||||
|
st := &BeaconState{
|
||||||
|
version: version.Gloas,
|
||||||
|
slot: 0, // epoch 0
|
||||||
|
dirtyFields: make(map[types.FieldIndex]bool),
|
||||||
|
builders: []*ethpb.Builder{
|
||||||
|
{
|
||||||
|
WithdrawableEpoch: 0,
|
||||||
|
Balance: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, st.AddBuilderFromDeposit(pubkey, wc, 123))
|
||||||
|
require.Equal(t, 1, len(st.builders))
|
||||||
|
got := st.builders[0]
|
||||||
|
require.NotNil(t, got)
|
||||||
|
require.DeepEqual(t, pubkey[:], got.Pubkey)
|
||||||
|
require.DeepEqual(t, []byte{0x42}, got.Version)
|
||||||
|
require.DeepEqual(t, wc[12:], got.ExecutionAddress)
|
||||||
|
require.Equal(t, primitives.Gwei(123), got.Balance)
|
||||||
|
require.Equal(t, primitives.Epoch(0), got.DepositEpoch)
|
||||||
|
require.Equal(t, params.BeaconConfig().FarFutureEpoch, got.WithdrawableEpoch)
|
||||||
|
require.Equal(t, true, st.dirtyFields[types.Builders])
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("appends new builder when no reusable slot", func(t *testing.T) {
|
||||||
|
var pubkey [48]byte
|
||||||
|
copy(pubkey[:], bytes.Repeat([]byte{0xAA}, 48))
|
||||||
|
var wc [32]byte
|
||||||
|
copy(wc[:], bytes.Repeat([]byte{0xBB}, 32))
|
||||||
|
|
||||||
|
st := &BeaconState{
|
||||||
|
version: version.Gloas,
|
||||||
|
slot: 0,
|
||||||
|
dirtyFields: make(map[types.FieldIndex]bool),
|
||||||
|
builders: []*ethpb.Builder{
|
||||||
|
{
|
||||||
|
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||||
|
Balance: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, st.AddBuilderFromDeposit(pubkey, wc, 5))
|
||||||
|
require.Equal(t, 2, len(st.builders))
|
||||||
|
require.NotNil(t, st.builders[1])
|
||||||
|
require.Equal(t, primitives.Gwei(5), st.builders[1].Balance)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
@@ -58,7 +58,6 @@ go_library(
|
|||||||
"validate_bls_to_execution_change.go",
|
"validate_bls_to_execution_change.go",
|
||||||
"validate_data_column.go",
|
"validate_data_column.go",
|
||||||
"validate_light_client.go",
|
"validate_light_client.go",
|
||||||
"validate_partial_header.go",
|
|
||||||
"validate_proposer_slashing.go",
|
"validate_proposer_slashing.go",
|
||||||
"validate_sync_committee_message.go",
|
"validate_sync_committee_message.go",
|
||||||
"validate_sync_contribution_proof.go",
|
"validate_sync_contribution_proof.go",
|
||||||
@@ -99,7 +98,6 @@ go_library(
|
|||||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||||
"//beacon-chain/p2p:go_default_library",
|
"//beacon-chain/p2p:go_default_library",
|
||||||
"//beacon-chain/p2p/encoder:go_default_library",
|
"//beacon-chain/p2p/encoder:go_default_library",
|
||||||
"//beacon-chain/p2p/partialdatacolumnbroadcaster:go_default_library",
|
|
||||||
"//beacon-chain/p2p/peers:go_default_library",
|
"//beacon-chain/p2p/peers:go_default_library",
|
||||||
"//beacon-chain/p2p/types:go_default_library",
|
"//beacon-chain/p2p/types:go_default_library",
|
||||||
"//beacon-chain/slasher/types:go_default_library",
|
"//beacon-chain/slasher/types:go_default_library",
|
||||||
|
|||||||
@@ -2,10 +2,8 @@ package sync
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"iter"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
|
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
|
||||||
@@ -21,16 +19,9 @@ type signatureVerifier struct {
|
|||||||
resChan chan error
|
resChan chan error
|
||||||
}
|
}
|
||||||
|
|
||||||
type errorWithSegment struct {
|
|
||||||
err error
|
|
||||||
// segment is only available if the batched verification failed
|
|
||||||
segment peerdas.CellProofBundleSegment
|
|
||||||
}
|
|
||||||
|
|
||||||
type kzgVerifier struct {
|
type kzgVerifier struct {
|
||||||
sizeHint int
|
dataColumns []blocks.RODataColumn
|
||||||
cellProofs iter.Seq[blocks.CellProofBundle]
|
resChan chan error
|
||||||
resChan chan errorWithSegment
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// A routine that runs in the background to perform batch
|
// A routine that runs in the background to perform batch
|
||||||
|
|||||||
@@ -256,16 +256,6 @@ var (
|
|||||||
Help: "Count the number of data column sidecars obtained via the execution layer.",
|
Help: "Count the number of data column sidecars obtained via the execution layer.",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
usefulFullColumnsReceivedTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
|
||||||
Name: "beacon_useful_full_columns_received_total",
|
|
||||||
Help: "Number of useful full columns (any cell being useful) received",
|
|
||||||
}, []string{"column_index"})
|
|
||||||
|
|
||||||
partialMessageColumnCompletionsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
|
||||||
Name: "beacon_partial_message_column_completions_total",
|
|
||||||
Help: "How often the partial message first completed the column",
|
|
||||||
}, []string{"column_index"})
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (s *Service) updateMetrics() {
|
func (s *Service) updateMetrics() {
|
||||||
|
|||||||
@@ -5,8 +5,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
"slices"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@@ -16,13 +14,11 @@ import (
|
|||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
|
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
|
||||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||||
@@ -65,16 +61,6 @@ type subscribeParameters struct {
|
|||||||
// getSubnetsRequiringPeers is a function that returns all subnets that require peers to be found
|
// getSubnetsRequiringPeers is a function that returns all subnets that require peers to be found
|
||||||
// but for which no subscriptions are needed.
|
// but for which no subscriptions are needed.
|
||||||
getSubnetsRequiringPeers func(currentSlot primitives.Slot) map[uint64]bool
|
getSubnetsRequiringPeers func(currentSlot primitives.Slot) map[uint64]bool
|
||||||
|
|
||||||
partial *partialSubscribeParameters
|
|
||||||
}
|
|
||||||
|
|
||||||
type partialSubscribeParameters struct {
|
|
||||||
broadcaster *partialdatacolumnbroadcaster.PartialColumnBroadcaster
|
|
||||||
validateHeader partialdatacolumnbroadcaster.HeaderValidator
|
|
||||||
validate partialdatacolumnbroadcaster.ColumnValidator
|
|
||||||
handle partialdatacolumnbroadcaster.SubHandler
|
|
||||||
handleHeader partialdatacolumnbroadcaster.HeaderHandler
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// shortTopic is a less verbose version of topic strings used for logging.
|
// shortTopic is a less verbose version of topic strings used for logging.
|
||||||
@@ -334,43 +320,6 @@ func (s *Service) registerSubscribers(nse params.NetworkScheduleEntry) bool {
|
|||||||
// New gossip topic in Fulu.
|
// New gossip topic in Fulu.
|
||||||
if params.BeaconConfig().FuluForkEpoch <= nse.Epoch {
|
if params.BeaconConfig().FuluForkEpoch <= nse.Epoch {
|
||||||
s.spawn(func() {
|
s.spawn(func() {
|
||||||
var ps *partialSubscribeParameters
|
|
||||||
broadcaster := s.cfg.p2p.PartialColumnBroadcaster()
|
|
||||||
if broadcaster != nil {
|
|
||||||
ps = &partialSubscribeParameters{
|
|
||||||
broadcaster: broadcaster,
|
|
||||||
validateHeader: func(header *ethpb.PartialDataColumnHeader) (bool, error) {
|
|
||||||
return s.validatePartialDataColumnHeader(context.TODO(), header)
|
|
||||||
},
|
|
||||||
validate: func(cellsToVerify []blocks.CellProofBundle) error {
|
|
||||||
return peerdas.VerifyDataColumnsCellsKZGProofs(len(cellsToVerify), slices.Values(cellsToVerify))
|
|
||||||
},
|
|
||||||
handle: func(topic string, col blocks.VerifiedRODataColumn) {
|
|
||||||
ctx, cancel := context.WithTimeout(s.ctx, pubsubMessageTimeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
slot := col.SignedBlockHeader.Header.Slot
|
|
||||||
proposerIndex := col.SignedBlockHeader.Header.ProposerIndex
|
|
||||||
if !s.hasSeenDataColumnIndex(slot, proposerIndex, col.Index) {
|
|
||||||
s.setSeenDataColumnIndex(slot, proposerIndex, col.Index)
|
|
||||||
// This column was completed from a partial message.
|
|
||||||
partialMessageColumnCompletionsTotal.WithLabelValues(strconv.FormatUint(col.Index, 10)).Inc()
|
|
||||||
}
|
|
||||||
err := s.verifiedRODataColumnSubscriber(ctx, col)
|
|
||||||
if err != nil {
|
|
||||||
log.WithError(err).Error("Failed to handle verified RO data column subscriber")
|
|
||||||
}
|
|
||||||
},
|
|
||||||
handleHeader: func(header *ethpb.PartialDataColumnHeader) {
|
|
||||||
ctx, cancel := context.WithTimeout(s.ctx, pubsubMessageTimeout)
|
|
||||||
defer cancel()
|
|
||||||
err := s.partialDataColumnHeaderSubscriber(ctx, header)
|
|
||||||
if err != nil {
|
|
||||||
log.WithError(err).Error("Failed to handle partial data column header")
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s.subscribeWithParameters(subscribeParameters{
|
s.subscribeWithParameters(subscribeParameters{
|
||||||
topicFormat: p2p.DataColumnSubnetTopicFormat,
|
topicFormat: p2p.DataColumnSubnetTopicFormat,
|
||||||
validate: s.validateDataColumn,
|
validate: s.validateDataColumn,
|
||||||
@@ -378,7 +327,6 @@ func (s *Service) registerSubscribers(nse params.NetworkScheduleEntry) bool {
|
|||||||
nse: nse,
|
nse: nse,
|
||||||
getSubnetsToJoin: s.dataColumnSubnetIndices,
|
getSubnetsToJoin: s.dataColumnSubnetIndices,
|
||||||
getSubnetsRequiringPeers: s.allDataColumnSubnets,
|
getSubnetsRequiringPeers: s.allDataColumnSubnets,
|
||||||
partial: ps,
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -417,10 +365,11 @@ func (s *Service) subscribe(topic string, validator wrappedVal, handle subHandle
|
|||||||
// Impossible condition as it would mean topic does not exist.
|
// Impossible condition as it would mean topic does not exist.
|
||||||
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topic)) // lint:nopanic -- Impossible condition.
|
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topic)) // lint:nopanic -- Impossible condition.
|
||||||
}
|
}
|
||||||
s.subscribeWithBase(s.addDigestToTopic(topic, nse.ForkDigest)+s.cfg.p2p.Encoding().ProtocolSuffix(), validator, handle)
|
s.subscribeWithBase(s.addDigestToTopic(topic, nse.ForkDigest), validator, handle)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle subHandler) *pubsub.Subscription {
|
func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle subHandler) *pubsub.Subscription {
|
||||||
|
topic += s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||||
log := log.WithField("topic", topic)
|
log := log.WithField("topic", topic)
|
||||||
|
|
||||||
// Do not resubscribe already seen subscriptions.
|
// Do not resubscribe already seen subscriptions.
|
||||||
@@ -583,11 +532,7 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
|
|||||||
func (s *Service) pruneNotWanted(t *subnetTracker, wantedSubnets map[uint64]bool) {
|
func (s *Service) pruneNotWanted(t *subnetTracker, wantedSubnets map[uint64]bool) {
|
||||||
for _, subnet := range t.unwanted(wantedSubnets) {
|
for _, subnet := range t.unwanted(wantedSubnets) {
|
||||||
t.cancelSubscription(subnet)
|
t.cancelSubscription(subnet)
|
||||||
topic := t.fullTopic(subnet, s.cfg.p2p.Encoding().ProtocolSuffix())
|
s.unSubscribeFromTopic(t.fullTopic(subnet, s.cfg.p2p.Encoding().ProtocolSuffix()))
|
||||||
if t.partial != nil {
|
|
||||||
_ = t.partial.broadcaster.Unsubscribe(topic)
|
|
||||||
}
|
|
||||||
s.unSubscribeFromTopic(topic)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -634,34 +579,9 @@ func (s *Service) trySubscribeSubnets(t *subnetTracker) {
|
|||||||
subnetsToJoin := t.getSubnetsToJoin(s.cfg.clock.CurrentSlot())
|
subnetsToJoin := t.getSubnetsToJoin(s.cfg.clock.CurrentSlot())
|
||||||
s.pruneNotWanted(t, subnetsToJoin)
|
s.pruneNotWanted(t, subnetsToJoin)
|
||||||
for _, subnet := range t.missing(subnetsToJoin) {
|
for _, subnet := range t.missing(subnetsToJoin) {
|
||||||
topicStr := t.fullTopic(subnet, s.cfg.p2p.Encoding().ProtocolSuffix())
|
// TODO: subscribeWithBase appends the protocol suffix, other methods don't. Make this consistent.
|
||||||
topicOpts := make([]pubsub.TopicOpt, 0, 2)
|
topic := t.fullTopic(subnet, "")
|
||||||
|
t.track(subnet, s.subscribeWithBase(topic, t.validate, t.handle))
|
||||||
requestPartial := t.partial != nil
|
|
||||||
|
|
||||||
if requestPartial {
|
|
||||||
// TODO: do we want the ability to support partial messages without requesting them?
|
|
||||||
topicOpts = append(topicOpts, pubsub.RequestPartialMessages())
|
|
||||||
}
|
|
||||||
|
|
||||||
topic, err := s.cfg.p2p.JoinTopic(topicStr, topicOpts...)
|
|
||||||
if err != nil {
|
|
||||||
log.WithError(err).Error("Failed to join topic")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if requestPartial {
|
|
||||||
log.Info("Subscribing to partial columns on", topicStr)
|
|
||||||
err = t.partial.broadcaster.Subscribe(topic, t.partial.validateHeader, t.partial.validate, t.partial.handle, t.partial.handleHeader)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
log.WithError(err).Error("Failed to subscribe to partial column")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We still need to subscribe to the full columns as well as partial in
|
|
||||||
// case our peers don't support partial messages.
|
|
||||||
t.track(subnet, s.subscribeWithBase(topicStr, t.validate, t.handle))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ import (
|
|||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition/interop"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition/interop"
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
@@ -202,16 +201,6 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
|||||||
return nil, errors.Wrap(err, "column indices to sample")
|
return nil, errors.Wrap(err, "column indices to sample")
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: the deadline here was removed in https://github.com/OffchainLabs/prysm/pull/16155/files
|
|
||||||
// make sure that reintroducing it does not cause issues.
|
|
||||||
secondsPerHalfSlot := time.Duration(params.BeaconConfig().SecondsPerSlot/2) * time.Second
|
|
||||||
ctx, cancel := context.WithTimeout(ctx, secondsPerHalfSlot)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
digest, err := s.currentForkDigest()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
log := log.WithFields(logrus.Fields{
|
log := log.WithFields(logrus.Fields{
|
||||||
"root": fmt.Sprintf("%#x", source.Root()),
|
"root": fmt.Sprintf("%#x", source.Root()),
|
||||||
"slot": source.Slot(),
|
"slot": source.Slot(),
|
||||||
@@ -242,30 +231,11 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Try to reconstruct data column constructedSidecars from the execution client.
|
// Try to reconstruct data column constructedSidecars from the execution client.
|
||||||
constructedSidecars, partialColumns, err := s.cfg.executionReconstructor.ConstructDataColumnSidecars(ctx, source)
|
constructedSidecars, err := s.cfg.executionReconstructor.ConstructDataColumnSidecars(ctx, source)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "reconstruct data column sidecars")
|
return nil, errors.Wrap(err, "reconstruct data column sidecars")
|
||||||
}
|
}
|
||||||
|
|
||||||
partialBroadcaster := s.cfg.p2p.PartialColumnBroadcaster()
|
|
||||||
if partialBroadcaster != nil {
|
|
||||||
log.WithField("len(partialColumns)", len(partialColumns)).Debug("Publishing partial columns")
|
|
||||||
for i := range uint64(len(partialColumns)) {
|
|
||||||
if !columnIndicesToSample[i] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(i)
|
|
||||||
topic := fmt.Sprintf(p2p.DataColumnSubnetTopicFormat, digest, subnet) + s.cfg.p2p.Encoding().ProtocolSuffix()
|
|
||||||
// Publish the partial column. This is idempotent if we republish the same data twice.
|
|
||||||
// Note, the "partial column" may indeed be complete. We still
|
|
||||||
// should publish to help our peers.
|
|
||||||
err = partialBroadcaster.Publish(topic, partialColumns[i])
|
|
||||||
if err != nil {
|
|
||||||
log.WithError(err).Warn("Failed to publish partial column")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// No sidecars are retrieved from the EL, retry later
|
// No sidecars are retrieved from the EL, retry later
|
||||||
constructedCount := uint64(len(constructedSidecars))
|
constructedCount := uint64(len(constructedSidecars))
|
||||||
|
|
||||||
@@ -337,7 +307,7 @@ func (s *Service) broadcastAndReceiveUnseenDataColumnSidecars(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Broadcast all the data column sidecars we reconstructed but did not see via gossip (non blocking).
|
// Broadcast all the data column sidecars we reconstructed but did not see via gossip (non blocking).
|
||||||
if err := s.cfg.p2p.BroadcastDataColumnSidecars(ctx, unseenSidecars, nil); err != nil {
|
if err := s.cfg.p2p.BroadcastDataColumnSidecars(ctx, unseenSidecars); err != nil {
|
||||||
return nil, errors.Wrap(err, "broadcast data column sidecars")
|
return nil, errors.Wrap(err, "broadcast data column sidecars")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package sync
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
||||||
opfeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
opfeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||||
@@ -11,9 +10,7 @@ import (
|
|||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
)
|
)
|
||||||
@@ -27,13 +24,6 @@ func (s *Service) dataColumnSubscriber(ctx context.Context, msg proto.Message) e
|
|||||||
return fmt.Errorf("message was not type blocks.VerifiedRODataColumn, type=%T", msg)
|
return fmt.Errorf("message was not type blocks.VerifiedRODataColumn, type=%T", msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Track useful full columns received via gossip (not previously seen)
|
|
||||||
slot := sidecar.SignedBlockHeader.Header.Slot
|
|
||||||
proposerIndex := sidecar.SignedBlockHeader.Header.ProposerIndex
|
|
||||||
if !s.hasSeenDataColumnIndex(slot, proposerIndex, sidecar.Index) {
|
|
||||||
usefulFullColumnsReceivedTotal.WithLabelValues(strconv.FormatUint(sidecar.Index, 10)).Inc()
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.receiveDataColumnSidecar(ctx, sidecar); err != nil {
|
if err := s.receiveDataColumnSidecar(ctx, sidecar); err != nil {
|
||||||
return wrapDataColumnError(sidecar, "receive data column sidecar", err)
|
return wrapDataColumnError(sidecar, "receive data column sidecar", err)
|
||||||
}
|
}
|
||||||
@@ -67,54 +57,6 @@ func (s *Service) dataColumnSubscriber(ctx context.Context, msg proto.Message) e
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) partialDataColumnHeaderSubscriber(ctx context.Context, header *ethpb.PartialDataColumnHeader) error {
|
|
||||||
source := peerdas.PopulateFromPartialHeader(header)
|
|
||||||
log.WithField("slot", source.Slot()).Info("Received data column header")
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
if err := s.processDataColumnSidecarsFromExecution(ctx, source); err != nil {
|
|
||||||
log.WithError(err).WithFields(logrus.Fields{
|
|
||||||
"root": fmt.Sprintf("%#x", source.Root()),
|
|
||||||
"slot": source.Slot(),
|
|
||||||
}).Error("Failed to process sidecars from execution for partial data column header")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) verifiedRODataColumnSubscriber(ctx context.Context, sidecar blocks.VerifiedRODataColumn) error {
|
|
||||||
log.WithField("slot", sidecar.Slot()).WithField("column", sidecar.Index).Info("Received data column sidecar")
|
|
||||||
|
|
||||||
if err := s.receiveDataColumnSidecar(ctx, sidecar); err != nil {
|
|
||||||
return errors.Wrap(err, "receive data column sidecar")
|
|
||||||
}
|
|
||||||
|
|
||||||
var wg errgroup.Group
|
|
||||||
wg.Go(func() error {
|
|
||||||
if err := s.processDataColumnSidecarsFromReconstruction(ctx, sidecar); err != nil {
|
|
||||||
return errors.Wrap(err, "process data column sidecars from reconstruction")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
wg.Go(func() error {
|
|
||||||
// Broadcast our complete column for peers that don't use partial messages
|
|
||||||
if err := s.cfg.p2p.BroadcastDataColumnSidecars(ctx, []blocks.VerifiedRODataColumn{sidecar}, nil); err != nil {
|
|
||||||
return errors.Wrap(err, "process data column sidecars from execution")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
if err := wg.Wait(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// receiveDataColumnSidecar receives a single data column sidecar: marks it as seen and saves it to the chain.
|
// receiveDataColumnSidecar receives a single data column sidecar: marks it as seen and saves it to the chain.
|
||||||
// Do not loop over this function to receive multiple sidecars, use receiveDataColumnSidecars instead.
|
// Do not loop over this function to receive multiple sidecars, use receiveDataColumnSidecars instead.
|
||||||
func (s *Service) receiveDataColumnSidecar(ctx context.Context, sidecar blocks.VerifiedRODataColumn) error {
|
func (s *Service) receiveDataColumnSidecar(ctx context.Context, sidecar blocks.VerifiedRODataColumn) error {
|
||||||
|
|||||||
@@ -71,7 +71,6 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
|||||||
roDataColumns := []blocks.RODataColumn{roDataColumn}
|
roDataColumns := []blocks.RODataColumn{roDataColumn}
|
||||||
|
|
||||||
// Create the verifier.
|
// Create the verifier.
|
||||||
// Question(marco): Do we want the multiple columns verifier? Is batching used only for kzg proofs?
|
|
||||||
verifier := s.newColumnsVerifier(roDataColumns, verification.GossipDataColumnSidecarRequirements)
|
verifier := s.newColumnsVerifier(roDataColumns, verification.GossipDataColumnSidecarRequirements)
|
||||||
|
|
||||||
// Start the verification process.
|
// Start the verification process.
|
||||||
|
|||||||
@@ -1,143 +0,0 @@
|
|||||||
package sync
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
|
||||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
|
||||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// REJECT errors - peer should be penalized
|
|
||||||
errHeaderEmptyCommitments = errors.New("header has no kzg commitments")
|
|
||||||
errHeaderParentInvalid = errors.New("header parent invalid")
|
|
||||||
errHeaderSlotNotAfterParent = errors.New("header slot not after parent")
|
|
||||||
errHeaderNotFinalizedDescendant = errors.New("header not finalized descendant")
|
|
||||||
errHeaderInvalidInclusionProof = errors.New("invalid inclusion proof")
|
|
||||||
errHeaderInvalidSignature = errors.New("invalid proposer signature")
|
|
||||||
errHeaderUnexpectedProposer = errors.New("unexpected proposer index")
|
|
||||||
|
|
||||||
// IGNORE errors - don't penalize peer
|
|
||||||
errHeaderNil = errors.New("nil header")
|
|
||||||
errHeaderFromFuture = errors.New("header is from future slot")
|
|
||||||
errHeaderNotAboveFinalized = errors.New("header slot not above finalized")
|
|
||||||
errHeaderParentNotSeen = errors.New("header parent not seen")
|
|
||||||
)
|
|
||||||
|
|
||||||
// validatePartialDataColumnHeader validates a PartialDataColumnHeader per the consensus spec.
|
|
||||||
// Returns (reject, err) where reject=true means the peer should be penalized.
|
|
||||||
// TODO: we should consolidate this with the existing DataColumn validation pipeline.
|
|
||||||
func (s *Service) validatePartialDataColumnHeader(ctx context.Context, header *ethpb.PartialDataColumnHeader) (reject bool, err error) {
|
|
||||||
if header == nil || header.SignedBlockHeader == nil || header.SignedBlockHeader.Header == nil {
|
|
||||||
return false, errHeaderNil // IGNORE
|
|
||||||
}
|
|
||||||
|
|
||||||
blockHeader := header.SignedBlockHeader.Header
|
|
||||||
headerSlot := blockHeader.Slot
|
|
||||||
parentRoot := bytesutil.ToBytes32(blockHeader.ParentRoot)
|
|
||||||
|
|
||||||
// [REJECT] kzg_commitments list is non-empty
|
|
||||||
if len(header.KzgCommitments) == 0 {
|
|
||||||
return true, errHeaderEmptyCommitments
|
|
||||||
}
|
|
||||||
|
|
||||||
// [IGNORE] Not from future slot (with MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance)
|
|
||||||
currentSlot := s.cfg.clock.CurrentSlot()
|
|
||||||
if headerSlot > currentSlot {
|
|
||||||
maxDisparity := params.BeaconConfig().MaximumGossipClockDisparityDuration()
|
|
||||||
slotStart, err := s.cfg.clock.SlotStart(headerSlot)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if s.cfg.clock.Now().Before(slotStart.Add(-maxDisparity)) {
|
|
||||||
return false, errHeaderFromFuture // IGNORE
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// [IGNORE] Slot above finalized
|
|
||||||
finalizedCheckpoint := s.cfg.chain.FinalizedCheckpt()
|
|
||||||
startSlot, err := slots.EpochStart(finalizedCheckpoint.Epoch)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if headerSlot <= startSlot {
|
|
||||||
return false, errHeaderNotAboveFinalized // IGNORE
|
|
||||||
}
|
|
||||||
|
|
||||||
// [IGNORE] Parent has been seen
|
|
||||||
if !s.cfg.chain.HasBlock(ctx, parentRoot) {
|
|
||||||
return false, errHeaderParentNotSeen // IGNORE
|
|
||||||
}
|
|
||||||
|
|
||||||
// [REJECT] Parent passes validation (not a bad block)
|
|
||||||
if s.hasBadBlock(parentRoot) {
|
|
||||||
return true, errHeaderParentInvalid
|
|
||||||
}
|
|
||||||
|
|
||||||
// [REJECT] Header slot > parent slot
|
|
||||||
parentSlot, err := s.cfg.chain.RecentBlockSlot(parentRoot)
|
|
||||||
if err != nil {
|
|
||||||
return false, errors.Wrap(err, "get parent slot")
|
|
||||||
}
|
|
||||||
if headerSlot <= parentSlot {
|
|
||||||
return true, errHeaderSlotNotAfterParent
|
|
||||||
}
|
|
||||||
|
|
||||||
// [REJECT] Finalized checkpoint is ancestor (parent is in forkchoice)
|
|
||||||
if !s.cfg.chain.InForkchoice(parentRoot) {
|
|
||||||
return true, errHeaderNotFinalizedDescendant
|
|
||||||
}
|
|
||||||
|
|
||||||
// [REJECT] Inclusion proof valid
|
|
||||||
if err := peerdas.VerifyPartialDataColumnHeaderInclusionProof(header); err != nil {
|
|
||||||
return true, errHeaderInvalidInclusionProof
|
|
||||||
}
|
|
||||||
|
|
||||||
// [REJECT] Valid proposer signature
|
|
||||||
parentState, err := s.cfg.stateGen.StateByRoot(ctx, parentRoot)
|
|
||||||
if err != nil {
|
|
||||||
return false, errors.Wrap(err, "get parent state")
|
|
||||||
}
|
|
||||||
|
|
||||||
proposerIdx := blockHeader.ProposerIndex
|
|
||||||
proposer, err := parentState.ValidatorAtIndex(proposerIdx)
|
|
||||||
if err != nil {
|
|
||||||
return false, errors.Wrap(err, "get proposer")
|
|
||||||
}
|
|
||||||
|
|
||||||
domain, err := signing.Domain(
|
|
||||||
parentState.Fork(),
|
|
||||||
slots.ToEpoch(headerSlot),
|
|
||||||
params.BeaconConfig().DomainBeaconProposer,
|
|
||||||
parentState.GenesisValidatorsRoot(),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return false, errors.Wrap(err, "get domain")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := signing.VerifyBlockHeaderSigningRoot(
|
|
||||||
blockHeader,
|
|
||||||
proposer.PublicKey,
|
|
||||||
header.SignedBlockHeader.Signature,
|
|
||||||
domain,
|
|
||||||
); err != nil {
|
|
||||||
return true, errHeaderInvalidSignature
|
|
||||||
}
|
|
||||||
|
|
||||||
// [REJECT] Expected proposer for slot
|
|
||||||
expectedProposer, err := helpers.BeaconProposerIndexAtSlot(ctx, parentState, headerSlot)
|
|
||||||
if err != nil {
|
|
||||||
return false, errors.Wrap(err, "compute expected proposer")
|
|
||||||
}
|
|
||||||
if expectedProposer != proposerIdx {
|
|
||||||
return true, errHeaderUnexpectedProposer
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, nil // Valid header
|
|
||||||
}
|
|
||||||
@@ -1027,10 +1027,10 @@ func TestGetVerifyingStateEdgeCases(t *testing.T) {
|
|||||||
sc: signatureCache,
|
sc: signatureCache,
|
||||||
sr: &mockStateByRooter{sbr: sbrErrorIfCalled(t)}, // Should not be called
|
sr: &mockStateByRooter{sbr: sbrErrorIfCalled(t)}, // Should not be called
|
||||||
hsp: &mockHeadStateProvider{
|
hsp: &mockHeadStateProvider{
|
||||||
headRoot: parentRoot[:], // Same as parent
|
headRoot: parentRoot[:], // Same as parent
|
||||||
headSlot: 32, // Epoch 1
|
headSlot: 32, // Epoch 1
|
||||||
headState: fuluState.Copy(), // HeadState (not ReadOnly) for ProcessSlots
|
headState: fuluState.Copy(), // HeadState (not ReadOnly) for ProcessSlots
|
||||||
headStateReadOnly: nil, // Should not use ReadOnly path
|
headStateReadOnly: nil, // Should not use ReadOnly path
|
||||||
},
|
},
|
||||||
fc: &mockForkchoicer{
|
fc: &mockForkchoicer{
|
||||||
// Return same root for both to simulate same chain
|
// Return same root for both to simulate same chain
|
||||||
@@ -1045,8 +1045,8 @@ func TestGetVerifyingStateEdgeCases(t *testing.T) {
|
|||||||
// Wrap to detect HeadState call
|
// Wrap to detect HeadState call
|
||||||
originalHsp := initializer.shared.hsp.(*mockHeadStateProvider)
|
originalHsp := initializer.shared.hsp.(*mockHeadStateProvider)
|
||||||
wrappedHsp := &mockHeadStateProvider{
|
wrappedHsp := &mockHeadStateProvider{
|
||||||
headRoot: originalHsp.headRoot,
|
headRoot: originalHsp.headRoot,
|
||||||
headSlot: originalHsp.headSlot,
|
headSlot: originalHsp.headSlot,
|
||||||
headState: originalHsp.headState,
|
headState: originalHsp.headState,
|
||||||
}
|
}
|
||||||
initializer.shared.hsp = &headStateCallTracker{
|
initializer.shared.hsp = &headStateCallTracker{
|
||||||
|
|||||||
@@ -78,21 +78,11 @@ func (ini *Initializer) NewBlobVerifier(b blocks.ROBlob, reqs []Requirement) *RO
|
|||||||
// WARNING: The returned verifier is not thread-safe, and should not be used concurrently.
|
// WARNING: The returned verifier is not thread-safe, and should not be used concurrently.
|
||||||
func (ini *Initializer) NewDataColumnsVerifier(roDataColumns []blocks.RODataColumn, reqs []Requirement) *RODataColumnsVerifier {
|
func (ini *Initializer) NewDataColumnsVerifier(roDataColumns []blocks.RODataColumn, reqs []Requirement) *RODataColumnsVerifier {
|
||||||
return &RODataColumnsVerifier{
|
return &RODataColumnsVerifier{
|
||||||
sharedResources: ini.shared,
|
sharedResources: ini.shared,
|
||||||
dataColumns: roDataColumns,
|
dataColumns: roDataColumns,
|
||||||
results: newResults(reqs...),
|
results: newResults(reqs...),
|
||||||
verifyDataColumnsCommitment: func(rc []blocks.RODataColumn) error {
|
verifyDataColumnsCommitment: peerdas.VerifyDataColumnsSidecarKZGProofs,
|
||||||
if len(rc) == 0 {
|
stateByRoot: make(map[[fieldparams.RootLength]byte]state.BeaconState),
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var sizeHint int
|
|
||||||
if len(rc) > 0 {
|
|
||||||
sizeHint = len(rc[0].Column)
|
|
||||||
}
|
|
||||||
sizeHint *= len(rc)
|
|
||||||
return peerdas.VerifyDataColumnsCellsKZGProofs(sizeHint, blocks.RODataColumnsToCellProofBundles(rc))
|
|
||||||
},
|
|
||||||
stateByRoot: make(map[[fieldparams.RootLength]byte]state.BeaconState),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +0,0 @@
|
|||||||
### Added
|
|
||||||
|
|
||||||
- Set beacon node options after reading the config file.
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
### Fixed
|
|
||||||
|
|
||||||
- Fix Bazel build failure on macOS x86_64 (darwin_amd64) (adds missing assembly stub to hashtree patch).
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
### Added
|
|
||||||
|
|
||||||
- Added new proofCollector type to ssz-query
|
|
||||||
|
|
||||||
### Ignored
|
|
||||||
- Added testing covering the production of Merkle proof from Phase0 beacon state and benchmarked against real Hoodi beacon state (Fulu version)
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
### Changed
|
|
||||||
|
|
||||||
- gRPC fallback now matches rest api implementation and will also check and connect to only synced nodes.
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
- gRPC resolver for load balancing, the new implementation matches rest api's so we should remove the resolver so it's handled the same way for consistency.
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
### Changed
|
|
||||||
|
|
||||||
- gRPC health endpoint will now return an error on syncing or optimistic status showing that it's unavailable.
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
### Added
|
|
||||||
|
|
||||||
- Added README for maintaining specrefs.
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
### Added
|
|
||||||
|
|
||||||
- The ability to download the nightly reference tests from a specific day.
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
### Added
|
|
||||||
|
|
||||||
- Add support for partial columns
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
### Ignored
|
|
||||||
|
|
||||||
- Updated golangci to run lint on tests too.
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
### Ignored
|
|
||||||
|
|
||||||
- Add handy documentation for SSZ Query package (`encoding/ssz/query`).
|
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
### Added
|
||||||
|
- Add process execution payload for gloas
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
### Changed
|
|
||||||
- Sample PTC per committee to reduce allocations.
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
### Ignored
|
|
||||||
- Run go fmt
|
|
||||||
@@ -368,9 +368,4 @@ var (
|
|||||||
Usage: "Disables the engine_getBlobsV2 usage.",
|
Usage: "Disables the engine_getBlobsV2 usage.",
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
}
|
}
|
||||||
// PartialDataColumns specifies the regex for enabling partial messages on datacolumns
|
|
||||||
PartialDataColumns = &cli.BoolFlag{
|
|
||||||
Name: "partial-data-columns",
|
|
||||||
Usage: "Enable cell-level dissemination for PeerDAS data columns",
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -162,7 +162,6 @@ var appFlags = []cli.Flag{
|
|||||||
flags.BatchVerifierLimit,
|
flags.BatchVerifierLimit,
|
||||||
flags.StateDiffExponents,
|
flags.StateDiffExponents,
|
||||||
flags.DisableEphemeralLogFile,
|
flags.DisableEphemeralLogFile,
|
||||||
flags.PartialDataColumns,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -368,8 +367,17 @@ func startNode(ctx *cli.Context, cancel context.CancelFunc) error {
|
|||||||
backfill.BeaconNodeOptions,
|
backfill.BeaconNodeOptions,
|
||||||
das.BeaconNodeOptions,
|
das.BeaconNodeOptions,
|
||||||
}
|
}
|
||||||
|
for _, of := range optFuncs {
|
||||||
|
ofo, err := of(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if ofo != nil {
|
||||||
|
opts = append(opts, ofo...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
beacon, err := node.New(ctx, cancel, optFuncs, opts...)
|
beacon, err := node.New(ctx, cancel, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to start beacon node: %w", err)
|
return fmt.Errorf("unable to start beacon node: %w", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -75,7 +75,6 @@ var appHelpFlagGroups = []flagGroup{
|
|||||||
flags.RPCPort,
|
flags.RPCPort,
|
||||||
flags.BatchVerifierLimit,
|
flags.BatchVerifierLimit,
|
||||||
flags.StateDiffExponents,
|
flags.StateDiffExponents,
|
||||||
flags.PartialDataColumns,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -98,6 +98,7 @@ func compareConfigs(t *testing.T, expected, actual *BeaconChainConfig) {
|
|||||||
require.DeepEqual(t, expected.EjectionBalance, actual.EjectionBalance)
|
require.DeepEqual(t, expected.EjectionBalance, actual.EjectionBalance)
|
||||||
require.DeepEqual(t, expected.EffectiveBalanceIncrement, actual.EffectiveBalanceIncrement)
|
require.DeepEqual(t, expected.EffectiveBalanceIncrement, actual.EffectiveBalanceIncrement)
|
||||||
require.DeepEqual(t, expected.BLSWithdrawalPrefixByte, actual.BLSWithdrawalPrefixByte)
|
require.DeepEqual(t, expected.BLSWithdrawalPrefixByte, actual.BLSWithdrawalPrefixByte)
|
||||||
|
require.DeepEqual(t, expected.BuilderWithdrawalPrefixByte, actual.BuilderWithdrawalPrefixByte)
|
||||||
require.DeepEqual(t, expected.ZeroHash, actual.ZeroHash)
|
require.DeepEqual(t, expected.ZeroHash, actual.ZeroHash)
|
||||||
require.DeepEqual(t, expected.GenesisDelay, actual.GenesisDelay)
|
require.DeepEqual(t, expected.GenesisDelay, actual.GenesisDelay)
|
||||||
require.DeepEqual(t, expected.MinAttestationInclusionDelay, actual.MinAttestationInclusionDelay)
|
require.DeepEqual(t, expected.MinAttestationInclusionDelay, actual.MinAttestationInclusionDelay)
|
||||||
|
|||||||
@@ -119,6 +119,7 @@ func assertEqualConfigs(t *testing.T, name string, fields []string, expected, ac
|
|||||||
// Initial values.
|
// Initial values.
|
||||||
assert.DeepEqual(t, expected.GenesisForkVersion, actual.GenesisForkVersion, "%s: GenesisForkVersion", name)
|
assert.DeepEqual(t, expected.GenesisForkVersion, actual.GenesisForkVersion, "%s: GenesisForkVersion", name)
|
||||||
assert.DeepEqual(t, expected.BLSWithdrawalPrefixByte, actual.BLSWithdrawalPrefixByte, "%s: BLSWithdrawalPrefixByte", name)
|
assert.DeepEqual(t, expected.BLSWithdrawalPrefixByte, actual.BLSWithdrawalPrefixByte, "%s: BLSWithdrawalPrefixByte", name)
|
||||||
|
assert.DeepEqual(t, expected.BuilderWithdrawalPrefixByte, actual.BuilderWithdrawalPrefixByte, "%s: BuilderWithdrawalPrefixByte", name)
|
||||||
assert.DeepEqual(t, expected.ETH1AddressWithdrawalPrefixByte, actual.ETH1AddressWithdrawalPrefixByte, "%s: ETH1AddressWithdrawalPrefixByte", name)
|
assert.DeepEqual(t, expected.ETH1AddressWithdrawalPrefixByte, actual.ETH1AddressWithdrawalPrefixByte, "%s: ETH1AddressWithdrawalPrefixByte", name)
|
||||||
|
|
||||||
// Time parameters.
|
// Time parameters.
|
||||||
|
|||||||
@@ -31,6 +31,7 @@ func MinimalSpecConfig() *BeaconChainConfig {
|
|||||||
// Initial values
|
// Initial values
|
||||||
minimalConfig.BLSWithdrawalPrefixByte = byte(0)
|
minimalConfig.BLSWithdrawalPrefixByte = byte(0)
|
||||||
minimalConfig.ETH1AddressWithdrawalPrefixByte = byte(1)
|
minimalConfig.ETH1AddressWithdrawalPrefixByte = byte(1)
|
||||||
|
minimalConfig.BuilderWithdrawalPrefixByte = byte(3)
|
||||||
|
|
||||||
// Time parameters
|
// Time parameters
|
||||||
minimalConfig.SecondsPerSlot = 6
|
minimalConfig.SecondsPerSlot = 6
|
||||||
|
|||||||
@@ -54,6 +54,7 @@ func compareConfigs(t *testing.T, expected, actual *params.BeaconChainConfig) {
|
|||||||
require.DeepEqual(t, expected.EjectionBalance, actual.EjectionBalance)
|
require.DeepEqual(t, expected.EjectionBalance, actual.EjectionBalance)
|
||||||
require.DeepEqual(t, expected.EffectiveBalanceIncrement, actual.EffectiveBalanceIncrement)
|
require.DeepEqual(t, expected.EffectiveBalanceIncrement, actual.EffectiveBalanceIncrement)
|
||||||
require.DeepEqual(t, expected.BLSWithdrawalPrefixByte, actual.BLSWithdrawalPrefixByte)
|
require.DeepEqual(t, expected.BLSWithdrawalPrefixByte, actual.BLSWithdrawalPrefixByte)
|
||||||
|
require.DeepEqual(t, expected.BuilderWithdrawalPrefixByte, actual.BuilderWithdrawalPrefixByte)
|
||||||
require.DeepEqual(t, expected.ZeroHash, actual.ZeroHash)
|
require.DeepEqual(t, expected.ZeroHash, actual.ZeroHash)
|
||||||
require.DeepEqual(t, expected.GenesisDelay, actual.GenesisDelay)
|
require.DeepEqual(t, expected.GenesisDelay, actual.GenesisDelay)
|
||||||
require.DeepEqual(t, expected.MinAttestationInclusionDelay, actual.MinAttestationInclusionDelay)
|
require.DeepEqual(t, expected.MinAttestationInclusionDelay, actual.MinAttestationInclusionDelay)
|
||||||
|
|||||||
@@ -4,12 +4,11 @@ go_library(
|
|||||||
name = "go_default_library",
|
name = "go_default_library",
|
||||||
srcs = [
|
srcs = [
|
||||||
"execution.go",
|
"execution.go",
|
||||||
|
"execution_payload_envelope.go",
|
||||||
"factory.go",
|
"factory.go",
|
||||||
"get_payload.go",
|
"get_payload.go",
|
||||||
"getters.go",
|
"getters.go",
|
||||||
"kzg.go",
|
"kzg.go",
|
||||||
"log.go",
|
|
||||||
"partialdatacolumn.go",
|
|
||||||
"proofs.go",
|
"proofs.go",
|
||||||
"proto.go",
|
"proto.go",
|
||||||
"roblob.go",
|
"roblob.go",
|
||||||
@@ -38,11 +37,9 @@ go_library(
|
|||||||
"//proto/prysm/v1alpha1:go_default_library",
|
"//proto/prysm/v1alpha1:go_default_library",
|
||||||
"//proto/prysm/v1alpha1/validator-client:go_default_library",
|
"//proto/prysm/v1alpha1/validator-client:go_default_library",
|
||||||
"//runtime/version:go_default_library",
|
"//runtime/version:go_default_library",
|
||||||
"@com_github_libp2p_go_libp2p_pubsub//partialmessages:go_default_library",
|
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||||
"@com_github_pkg_errors//:go_default_library",
|
"@com_github_pkg_errors//:go_default_library",
|
||||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
|
||||||
"@com_github_sirupsen_logrus//:go_default_library",
|
|
||||||
"@org_golang_google_protobuf//proto:go_default_library",
|
"@org_golang_google_protobuf//proto:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
@@ -50,11 +47,11 @@ go_library(
|
|||||||
go_test(
|
go_test(
|
||||||
name = "go_default_test",
|
name = "go_default_test",
|
||||||
srcs = [
|
srcs = [
|
||||||
|
"execution_payload_envelope_test.go",
|
||||||
"execution_test.go",
|
"execution_test.go",
|
||||||
"factory_test.go",
|
"factory_test.go",
|
||||||
"getters_test.go",
|
"getters_test.go",
|
||||||
"kzg_test.go",
|
"kzg_test.go",
|
||||||
"partialdatacolumn_invariants_test.go",
|
|
||||||
"proofs_test.go",
|
"proofs_test.go",
|
||||||
"proto_test.go",
|
"proto_test.go",
|
||||||
"roblob_test.go",
|
"roblob_test.go",
|
||||||
@@ -80,9 +77,6 @@ go_test(
|
|||||||
"//runtime/version:go_default_library",
|
"//runtime/version:go_default_library",
|
||||||
"//testing/assert:go_default_library",
|
"//testing/assert:go_default_library",
|
||||||
"//testing/require:go_default_library",
|
"//testing/require:go_default_library",
|
||||||
"//testing/util:go_default_library",
|
|
||||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
|
||||||
"@com_github_libp2p_go_libp2p_pubsub//partialmessages:go_default_library",
|
|
||||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||||
],
|
],
|
||||||
|
|||||||
153
consensus-types/blocks/execution_payload_envelope.go
Normal file
153
consensus-types/blocks/execution_payload_envelope.go
Normal file
@@ -0,0 +1,153 @@
|
|||||||
|
package blocks
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||||
|
field_params "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||||
|
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/encoding/ssz"
|
||||||
|
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||||
|
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
type signedExecutionPayloadEnvelope struct {
|
||||||
|
s *ethpb.SignedExecutionPayloadEnvelope
|
||||||
|
}
|
||||||
|
|
||||||
|
type executionPayloadEnvelope struct {
|
||||||
|
p *ethpb.ExecutionPayloadEnvelope
|
||||||
|
}
|
||||||
|
|
||||||
|
// WrappedROSignedExecutionPayloadEnvelope wraps a signed execution payload envelope proto in a read-only interface.
|
||||||
|
func WrappedROSignedExecutionPayloadEnvelope(s *ethpb.SignedExecutionPayloadEnvelope) (interfaces.ROSignedExecutionPayloadEnvelope, error) {
|
||||||
|
w := signedExecutionPayloadEnvelope{s: s}
|
||||||
|
if w.IsNil() {
|
||||||
|
return nil, consensus_types.ErrNilObjectWrapped
|
||||||
|
}
|
||||||
|
return w, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WrappedROExecutionPayloadEnvelope wraps an execution payload envelope proto in a read-only interface.
|
||||||
|
func WrappedROExecutionPayloadEnvelope(p *ethpb.ExecutionPayloadEnvelope) (interfaces.ROExecutionPayloadEnvelope, error) {
|
||||||
|
w := &executionPayloadEnvelope{p: p}
|
||||||
|
if w.IsNil() {
|
||||||
|
return nil, consensus_types.ErrNilObjectWrapped
|
||||||
|
}
|
||||||
|
return w, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Envelope returns the execution payload envelope as a read-only interface.
|
||||||
|
func (s signedExecutionPayloadEnvelope) Envelope() (interfaces.ROExecutionPayloadEnvelope, error) {
|
||||||
|
return WrappedROExecutionPayloadEnvelope(s.s.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Signature returns the BLS signature as a 96-byte array.
|
||||||
|
func (s signedExecutionPayloadEnvelope) Signature() [field_params.BLSSignatureLength]byte {
|
||||||
|
return [field_params.BLSSignatureLength]byte(s.s.Signature)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNil reports whether the signed envelope or its contents are invalid.
|
||||||
|
func (s signedExecutionPayloadEnvelope) IsNil() bool {
|
||||||
|
if s.s == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if len(s.s.Signature) != field_params.BLSSignatureLength {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
w := executionPayloadEnvelope{p: s.s.Message}
|
||||||
|
return w.IsNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SigningRoot computes the signing root for the signed envelope with the provided domain.
|
||||||
|
func (s signedExecutionPayloadEnvelope) SigningRoot(domain []byte) (root [32]byte, err error) {
|
||||||
|
return signing.ComputeSigningRoot(s.s.Message, domain)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Proto returns the underlying protobuf message.
|
||||||
|
func (s signedExecutionPayloadEnvelope) Proto() proto.Message {
|
||||||
|
return s.s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNil reports whether the envelope or its required fields are invalid.
|
||||||
|
func (p *executionPayloadEnvelope) IsNil() bool {
|
||||||
|
if p.p == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if p.p.Payload == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if len(p.p.BeaconBlockRoot) != field_params.RootLength {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if p.p.BlobKzgCommitments == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsBlinded reports whether the envelope contains a blinded payload.
|
||||||
|
func (p *executionPayloadEnvelope) IsBlinded() bool {
|
||||||
|
return !p.IsNil() && p.p.Payload == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execution returns the execution payload as a read-only interface.
|
||||||
|
func (p *executionPayloadEnvelope) Execution() (interfaces.ExecutionData, error) {
|
||||||
|
return WrappedExecutionPayloadDeneb(p.p.Payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecutionRequests returns the execution requests attached to the envelope.
|
||||||
|
func (p *executionPayloadEnvelope) ExecutionRequests() *enginev1.ExecutionRequests {
|
||||||
|
return p.p.ExecutionRequests
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuilderIndex returns the proposer/builder index for the envelope.
|
||||||
|
func (p *executionPayloadEnvelope) BuilderIndex() primitives.BuilderIndex {
|
||||||
|
return p.p.BuilderIndex
|
||||||
|
}
|
||||||
|
|
||||||
|
// BeaconBlockRoot returns the beacon block root referenced by the envelope.
|
||||||
|
func (p *executionPayloadEnvelope) BeaconBlockRoot() [field_params.RootLength]byte {
|
||||||
|
return [field_params.RootLength]byte(p.p.BeaconBlockRoot)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlobKzgCommitments returns a copy of the envelope's KZG commitments.
|
||||||
|
func (p *executionPayloadEnvelope) BlobKzgCommitments() [][]byte {
|
||||||
|
commitments := make([][]byte, len(p.p.BlobKzgCommitments))
|
||||||
|
for i, commit := range p.p.BlobKzgCommitments {
|
||||||
|
commitments[i] = make([]byte, len(commit))
|
||||||
|
copy(commitments[i], commit)
|
||||||
|
}
|
||||||
|
return commitments
|
||||||
|
}
|
||||||
|
|
||||||
|
// VersionedHashes returns versioned hashes derived from the KZG commitments.
|
||||||
|
func (p *executionPayloadEnvelope) VersionedHashes() []common.Hash {
|
||||||
|
commitments := p.p.BlobKzgCommitments
|
||||||
|
versionedHashes := make([]common.Hash, len(commitments))
|
||||||
|
for i, commitment := range commitments {
|
||||||
|
versionedHashes[i] = primitives.ConvertKzgCommitmentToVersionedHash(commitment)
|
||||||
|
}
|
||||||
|
return versionedHashes
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlobKzgCommitmentsRoot returns the SSZ root of the envelope's KZG commitments.
|
||||||
|
func (p *executionPayloadEnvelope) BlobKzgCommitmentsRoot() ([field_params.RootLength]byte, error) {
|
||||||
|
if p.IsNil() || p.p.BlobKzgCommitments == nil {
|
||||||
|
return [field_params.RootLength]byte{}, consensus_types.ErrNilObjectWrapped
|
||||||
|
}
|
||||||
|
|
||||||
|
return ssz.KzgCommitmentsRoot(p.p.BlobKzgCommitments)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slot returns the slot of the envelope.
|
||||||
|
func (p *executionPayloadEnvelope) Slot() primitives.Slot {
|
||||||
|
return p.p.Slot
|
||||||
|
}
|
||||||
|
|
||||||
|
// StateRoot returns the state root carried by the envelope.
|
||||||
|
func (p *executionPayloadEnvelope) StateRoot() [field_params.RootLength]byte {
|
||||||
|
return [field_params.RootLength]byte(p.p.StateRoot)
|
||||||
|
}
|
||||||
115
consensus-types/blocks/execution_payload_envelope_test.go
Normal file
115
consensus-types/blocks/execution_payload_envelope_test.go
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
package blocks_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||||
|
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||||
|
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||||
|
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func validExecutionPayloadEnvelope() *ethpb.ExecutionPayloadEnvelope {
|
||||||
|
payload := &enginev1.ExecutionPayloadDeneb{
|
||||||
|
ParentHash: bytes.Repeat([]byte{0x01}, 32),
|
||||||
|
FeeRecipient: bytes.Repeat([]byte{0x02}, 20),
|
||||||
|
StateRoot: bytes.Repeat([]byte{0x03}, 32),
|
||||||
|
ReceiptsRoot: bytes.Repeat([]byte{0x04}, 32),
|
||||||
|
LogsBloom: bytes.Repeat([]byte{0x05}, 256),
|
||||||
|
PrevRandao: bytes.Repeat([]byte{0x06}, 32),
|
||||||
|
BlockNumber: 1,
|
||||||
|
GasLimit: 2,
|
||||||
|
GasUsed: 3,
|
||||||
|
Timestamp: 4,
|
||||||
|
BaseFeePerGas: bytes.Repeat([]byte{0x07}, 32),
|
||||||
|
BlockHash: bytes.Repeat([]byte{0x08}, 32),
|
||||||
|
Transactions: [][]byte{},
|
||||||
|
Withdrawals: []*enginev1.Withdrawal{},
|
||||||
|
BlobGasUsed: 0,
|
||||||
|
ExcessBlobGas: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
return ðpb.ExecutionPayloadEnvelope{
|
||||||
|
Payload: payload,
|
||||||
|
ExecutionRequests: &enginev1.ExecutionRequests{},
|
||||||
|
BuilderIndex: 10,
|
||||||
|
BeaconBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
|
||||||
|
Slot: 9,
|
||||||
|
BlobKzgCommitments: [][]byte{bytes.Repeat([]byte{0x0C}, 48)},
|
||||||
|
StateRoot: bytes.Repeat([]byte{0xBB}, 32),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWrappedROExecutionPayloadEnvelope(t *testing.T) {
|
||||||
|
t.Run("returns error on invalid beacon root length", func(t *testing.T) {
|
||||||
|
invalid := validExecutionPayloadEnvelope()
|
||||||
|
invalid.BeaconBlockRoot = []byte{0x01}
|
||||||
|
_, err := blocks.WrappedROExecutionPayloadEnvelope(invalid)
|
||||||
|
require.Equal(t, consensus_types.ErrNilObjectWrapped, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("wraps and exposes fields", func(t *testing.T) {
|
||||||
|
env := validExecutionPayloadEnvelope()
|
||||||
|
wrapped, err := blocks.WrappedROExecutionPayloadEnvelope(env)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, primitives.BuilderIndex(10), wrapped.BuilderIndex())
|
||||||
|
require.Equal(t, primitives.Slot(9), wrapped.Slot())
|
||||||
|
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0xAA}, 32)), wrapped.BeaconBlockRoot())
|
||||||
|
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0xBB}, 32)), wrapped.StateRoot())
|
||||||
|
|
||||||
|
commitments := wrapped.BlobKzgCommitments()
|
||||||
|
assert.DeepEqual(t, env.BlobKzgCommitments, commitments)
|
||||||
|
|
||||||
|
versioned := wrapped.VersionedHashes()
|
||||||
|
require.Equal(t, 1, len(versioned))
|
||||||
|
|
||||||
|
reqs := wrapped.ExecutionRequests()
|
||||||
|
require.NotNil(t, reqs)
|
||||||
|
|
||||||
|
exec, err := wrapped.Execution()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.DeepEqual(t, env.Payload.ParentHash, exec.ParentHash())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWrappedROSignedExecutionPayloadEnvelope(t *testing.T) {
|
||||||
|
t.Run("returns error for invalid signature length", func(t *testing.T) {
|
||||||
|
signed := ðpb.SignedExecutionPayloadEnvelope{
|
||||||
|
Message: validExecutionPayloadEnvelope(),
|
||||||
|
Signature: bytes.Repeat([]byte{0xAA}, 95),
|
||||||
|
}
|
||||||
|
_, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signed)
|
||||||
|
require.Equal(t, consensus_types.ErrNilObjectWrapped, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("wraps and provides envelope/signing data", func(t *testing.T) {
|
||||||
|
sig := bytes.Repeat([]byte{0xAB}, 96)
|
||||||
|
signed := ðpb.SignedExecutionPayloadEnvelope{
|
||||||
|
Message: validExecutionPayloadEnvelope(),
|
||||||
|
Signature: sig,
|
||||||
|
}
|
||||||
|
|
||||||
|
wrapped, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signed)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
gotSig := wrapped.Signature()
|
||||||
|
assert.DeepEqual(t, [96]byte(sig), gotSig)
|
||||||
|
|
||||||
|
env, err := wrapped.Envelope()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.DeepEqual(t, [32]byte(bytes.Repeat([]byte{0xAA}, 32)), env.BeaconBlockRoot())
|
||||||
|
|
||||||
|
domain := bytes.Repeat([]byte{0xCC}, 32)
|
||||||
|
wantRoot, err := signing.ComputeSigningRoot(signed.Message, domain)
|
||||||
|
require.NoError(t, err)
|
||||||
|
gotRoot, err := wrapped.SigningRoot(domain)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, wantRoot, gotRoot)
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
|
||||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
|
||||||
package blocks
|
|
||||||
|
|
||||||
import "github.com/sirupsen/logrus"
|
|
||||||
|
|
||||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
|
||||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
|
||||||
var log = logrus.WithField("package", "consensus-types/blocks")
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user