mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-02-06 19:15:00 -05:00
Compare commits
46 Commits
gloas-prop
...
feat/add-r
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bcc0a89bf9 | ||
|
|
9fa36dc84e | ||
|
|
0d4f695a48 | ||
|
|
b36bc1fe17 | ||
|
|
a329a77037 | ||
|
|
bfd9ff8651 | ||
|
|
79301d4db6 | ||
|
|
fbfea6f753 | ||
|
|
562ef25527 | ||
|
|
488971f989 | ||
|
|
b129eaaeb8 | ||
|
|
90302adbd2 | ||
|
|
a6262ba07b | ||
|
|
7d5c8d6964 | ||
|
|
38cae3f8de | ||
|
|
a5e6d0a3ac | ||
|
|
c4a308e598 | ||
|
|
ba720c1b4b | ||
|
|
5a3f45f91f | ||
|
|
29010edcd1 | ||
|
|
045c29ccce | ||
|
|
0d80bbe44f | ||
|
|
14f13ed902 | ||
|
|
308199c2e7 | ||
|
|
7a04c6b645 | ||
|
|
93cda45e18 | ||
|
|
1b4265ef3f | ||
|
|
90d1716fd7 | ||
|
|
c4f1a9ac4f | ||
|
|
73d948a710 | ||
|
|
5e2985d36b | ||
|
|
7ac3f3cb68 | ||
|
|
0a759c3d15 | ||
|
|
bf7ca00780 | ||
|
|
efcb98bcaa | ||
|
|
7b0de5ad0e | ||
|
|
2e15cd2068 | ||
|
|
2e5192e496 | ||
|
|
729c54a300 | ||
|
|
4556aa266a | ||
|
|
2f1cac217d | ||
|
|
6bfc779ea1 | ||
|
|
90481d6aa8 | ||
|
|
9f64007dc1 | ||
|
|
879ea624ec | ||
|
|
79841f451c |
8
.github/workflows/check-specrefs.yml
vendored
8
.github/workflows/check-specrefs.yml
vendored
@@ -12,11 +12,11 @@ jobs:
|
||||
- name: Check version consistency
|
||||
run: |
|
||||
WORKSPACE_VERSION=$(grep 'consensus_spec_version = ' WORKSPACE | sed 's/.*"\(.*\)"/\1/')
|
||||
ETHSPECIFY_VERSION=$(grep '^version:' .ethspecify.yml | sed 's/version: //')
|
||||
ETHSPECIFY_VERSION=$(grep '^version:' specrefs/.ethspecify.yml | sed 's/version: //')
|
||||
if [ "$WORKSPACE_VERSION" != "$ETHSPECIFY_VERSION" ]; then
|
||||
echo "Version mismatch between WORKSPACE and ethspecify"
|
||||
echo " WORKSPACE: $WORKSPACE_VERSION"
|
||||
echo " .ethspecify.yml: $ETHSPECIFY_VERSION"
|
||||
echo " specrefs/.ethspecify.yml: $ETHSPECIFY_VERSION"
|
||||
exit 1
|
||||
else
|
||||
echo "Versions match: $WORKSPACE_VERSION"
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
run: python3 -mpip install ethspecify
|
||||
|
||||
- name: Update spec references
|
||||
run: ethspecify
|
||||
run: ethspecify process --path=specrefs
|
||||
|
||||
- name: Check for differences
|
||||
run: |
|
||||
@@ -40,4 +40,4 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Check spec references
|
||||
run: ethspecify check
|
||||
run: ethspecify check --path=specrefs
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"fallback.go",
|
||||
"log.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/api/fallback",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["@com_github_sirupsen_logrus//:go_default_library"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["fallback_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//testing/assert:go_default_library"],
|
||||
)
|
||||
@@ -1,66 +0,0 @@
|
||||
package fallback
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// HostProvider is the subset of connection-provider methods that EnsureReady
|
||||
// needs. Both grpc.GrpcConnectionProvider and rest.RestConnectionProvider
|
||||
// satisfy this interface.
|
||||
type HostProvider interface {
|
||||
Hosts() []string
|
||||
CurrentHost() string
|
||||
SwitchHost(index int) error
|
||||
}
|
||||
|
||||
// ReadyChecker can report whether the current endpoint is ready.
|
||||
// iface.NodeClient satisfies this implicitly.
|
||||
type ReadyChecker interface {
|
||||
IsReady(ctx context.Context) bool
|
||||
}
|
||||
|
||||
// EnsureReady iterates through the configured hosts and returns true as soon as
|
||||
// one responds as ready. It starts from the provider's current host and wraps
|
||||
// around using modular arithmetic, performing failover when a host is not ready.
|
||||
func EnsureReady(ctx context.Context, provider HostProvider, checker ReadyChecker) bool {
|
||||
hosts := provider.Hosts()
|
||||
numHosts := len(hosts)
|
||||
startingHost := provider.CurrentHost()
|
||||
var attemptedHosts []string
|
||||
|
||||
// Find current index
|
||||
currentIdx := 0
|
||||
for i, h := range hosts {
|
||||
if h == startingHost {
|
||||
currentIdx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for i := range numHosts {
|
||||
if checker.IsReady(ctx) {
|
||||
if len(attemptedHosts) > 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
"previous": startingHost,
|
||||
"current": provider.CurrentHost(),
|
||||
"tried": attemptedHosts,
|
||||
}).Info("Switched to responsive beacon node")
|
||||
}
|
||||
return true
|
||||
}
|
||||
attemptedHosts = append(attemptedHosts, provider.CurrentHost())
|
||||
|
||||
// Try next host if not the last iteration
|
||||
if i < numHosts-1 {
|
||||
nextIdx := (currentIdx + i + 1) % numHosts
|
||||
if err := provider.SwitchHost(nextIdx); err != nil {
|
||||
log.WithError(err).Error("Failed to switch host")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.WithField("tried", attemptedHosts).Warn("No responsive beacon node found")
|
||||
return false
|
||||
}
|
||||
@@ -1,94 +0,0 @@
|
||||
package fallback
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
)
|
||||
|
||||
// mockHostProvider is a minimal HostProvider for unit tests.
|
||||
type mockHostProvider struct {
|
||||
hosts []string
|
||||
hostIndex int
|
||||
}
|
||||
|
||||
func (m *mockHostProvider) Hosts() []string { return m.hosts }
|
||||
func (m *mockHostProvider) CurrentHost() string {
|
||||
return m.hosts[m.hostIndex%len(m.hosts)]
|
||||
}
|
||||
func (m *mockHostProvider) SwitchHost(index int) error { m.hostIndex = index; return nil }
|
||||
|
||||
// mockReadyChecker records per-call IsReady results in sequence.
|
||||
type mockReadyChecker struct {
|
||||
results []bool
|
||||
idx int
|
||||
}
|
||||
|
||||
func (m *mockReadyChecker) IsReady(_ context.Context) bool {
|
||||
if m.idx >= len(m.results) {
|
||||
return false
|
||||
}
|
||||
r := m.results[m.idx]
|
||||
m.idx++
|
||||
return r
|
||||
}
|
||||
|
||||
func TestEnsureReady_SingleHostReady(t *testing.T) {
|
||||
provider := &mockHostProvider{hosts: []string{"http://host1:3500"}, hostIndex: 0}
|
||||
checker := &mockReadyChecker{results: []bool{true}}
|
||||
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
|
||||
assert.Equal(t, 0, provider.hostIndex)
|
||||
}
|
||||
|
||||
func TestEnsureReady_SingleHostNotReady(t *testing.T) {
|
||||
provider := &mockHostProvider{hosts: []string{"http://host1:3500"}, hostIndex: 0}
|
||||
checker := &mockReadyChecker{results: []bool{false}}
|
||||
assert.Equal(t, false, EnsureReady(t.Context(), provider, checker))
|
||||
}
|
||||
|
||||
func TestEnsureReady_SingleHostError(t *testing.T) {
|
||||
provider := &mockHostProvider{hosts: []string{"http://host1:3500"}, hostIndex: 0}
|
||||
checker := &mockReadyChecker{results: []bool{false}}
|
||||
assert.Equal(t, false, EnsureReady(t.Context(), provider, checker))
|
||||
}
|
||||
|
||||
func TestEnsureReady_MultipleHostsFirstReady(t *testing.T) {
|
||||
provider := &mockHostProvider{
|
||||
hosts: []string{"http://host1:3500", "http://host2:3500"},
|
||||
hostIndex: 0,
|
||||
}
|
||||
checker := &mockReadyChecker{results: []bool{true}}
|
||||
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
|
||||
assert.Equal(t, 0, provider.hostIndex)
|
||||
}
|
||||
|
||||
func TestEnsureReady_MultipleHostsFailoverToSecond(t *testing.T) {
|
||||
provider := &mockHostProvider{
|
||||
hosts: []string{"http://host1:3500", "http://host2:3500"},
|
||||
hostIndex: 0,
|
||||
}
|
||||
checker := &mockReadyChecker{results: []bool{false, true}}
|
||||
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
|
||||
assert.Equal(t, 1, provider.hostIndex)
|
||||
}
|
||||
|
||||
func TestEnsureReady_MultipleHostsNoneReady(t *testing.T) {
|
||||
provider := &mockHostProvider{
|
||||
hosts: []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"},
|
||||
hostIndex: 0,
|
||||
}
|
||||
checker := &mockReadyChecker{results: []bool{false, false, false}}
|
||||
assert.Equal(t, false, EnsureReady(t.Context(), provider, checker))
|
||||
}
|
||||
|
||||
func TestEnsureReady_WrapAroundFromNonZeroIndex(t *testing.T) {
|
||||
provider := &mockHostProvider{
|
||||
hosts: []string{"http://host0:3500", "http://host1:3500", "http://host2:3500"},
|
||||
hostIndex: 1,
|
||||
}
|
||||
// host1 (start) fails, host2 fails, host0 succeeds
|
||||
checker := &mockReadyChecker{results: []bool{false, false, true}}
|
||||
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
|
||||
assert.Equal(t, 0, provider.hostIndex)
|
||||
}
|
||||
@@ -25,11 +25,6 @@ type GrpcConnectionProvider interface {
|
||||
// SwitchHost switches to the endpoint at the given index.
|
||||
// The new connection is created lazily on next CurrentConn() call.
|
||||
SwitchHost(index int) error
|
||||
// ConnectionCounter returns a monotonically increasing counter that increments
|
||||
// each time SwitchHost changes the active endpoint. This allows consumers to
|
||||
// detect connection changes even when the host string returns to a previous value
|
||||
// (e.g., host0 → host1 → host0).
|
||||
ConnectionCounter() uint64
|
||||
// Close closes the current connection.
|
||||
Close()
|
||||
}
|
||||
@@ -43,7 +38,6 @@ type grpcConnectionProvider struct {
|
||||
// Current connection state (protected by mutex)
|
||||
currentIndex uint64
|
||||
conn *grpc.ClientConn
|
||||
connCounter uint64
|
||||
|
||||
mu sync.Mutex
|
||||
closed bool
|
||||
@@ -144,7 +138,6 @@ func (p *grpcConnectionProvider) SwitchHost(index int) error {
|
||||
|
||||
p.conn = nil // Clear immediately - new connection created lazily
|
||||
p.currentIndex = uint64(index)
|
||||
p.connCounter++
|
||||
|
||||
// Close old connection asynchronously to avoid blocking the caller
|
||||
if oldConn != nil {
|
||||
@@ -162,12 +155,6 @@ func (p *grpcConnectionProvider) SwitchHost(index int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) ConnectionCounter() uint64 {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
return p.connCounter
|
||||
}
|
||||
|
||||
func (p *grpcConnectionProvider) Close() {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
@@ -4,24 +4,17 @@ import "google.golang.org/grpc"
|
||||
|
||||
// MockGrpcProvider implements GrpcConnectionProvider for testing.
|
||||
type MockGrpcProvider struct {
|
||||
MockConn *grpc.ClientConn
|
||||
MockHosts []string
|
||||
CurrentIndex int
|
||||
ConnCounter uint64
|
||||
MockConn *grpc.ClientConn
|
||||
MockHosts []string
|
||||
}
|
||||
|
||||
func (m *MockGrpcProvider) CurrentConn() *grpc.ClientConn { return m.MockConn }
|
||||
func (m *MockGrpcProvider) CurrentHost() string {
|
||||
if len(m.MockHosts) > 0 {
|
||||
return m.MockHosts[m.CurrentIndex]
|
||||
return m.MockHosts[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func (m *MockGrpcProvider) Hosts() []string { return m.MockHosts }
|
||||
func (m *MockGrpcProvider) SwitchHost(idx int) error {
|
||||
m.CurrentIndex = idx
|
||||
m.ConnCounter++
|
||||
return nil
|
||||
}
|
||||
func (m *MockGrpcProvider) ConnectionCounter() uint64 { return m.ConnCounter }
|
||||
func (m *MockGrpcProvider) Close() {}
|
||||
func (m *MockGrpcProvider) Hosts() []string { return m.MockHosts }
|
||||
func (m *MockGrpcProvider) SwitchHost(int) error { return nil }
|
||||
func (m *MockGrpcProvider) Close() {}
|
||||
|
||||
@@ -9,13 +9,13 @@ import (
|
||||
// MockRestProvider implements RestConnectionProvider for testing.
|
||||
type MockRestProvider struct {
|
||||
MockClient *http.Client
|
||||
MockHandler Handler
|
||||
MockHandler RestHandler
|
||||
MockHosts []string
|
||||
HostIndex int
|
||||
}
|
||||
|
||||
func (m *MockRestProvider) HttpClient() *http.Client { return m.MockClient }
|
||||
func (m *MockRestProvider) Handler() Handler { return m.MockHandler }
|
||||
func (m *MockRestProvider) RestHandler() RestHandler { return m.MockHandler }
|
||||
func (m *MockRestProvider) CurrentHost() string {
|
||||
if len(m.MockHosts) > 0 {
|
||||
return m.MockHosts[m.HostIndex%len(m.MockHosts)]
|
||||
@@ -25,22 +25,25 @@ func (m *MockRestProvider) CurrentHost() string {
|
||||
func (m *MockRestProvider) Hosts() []string { return m.MockHosts }
|
||||
func (m *MockRestProvider) SwitchHost(index int) error { m.HostIndex = index; return nil }
|
||||
|
||||
// MockHandler implements Handler for testing.
|
||||
type MockHandler struct {
|
||||
MockHost string
|
||||
// MockRestHandler implements RestHandler for testing.
|
||||
type MockRestHandler struct {
|
||||
MockHost string
|
||||
MockClient *http.Client
|
||||
}
|
||||
|
||||
func (m *MockHandler) Get(_ context.Context, _ string, _ any) error { return nil }
|
||||
func (m *MockHandler) GetStatusCode(_ context.Context, _ string) (int, error) {
|
||||
func (m *MockRestHandler) Get(_ context.Context, _ string, _ any) error { return nil }
|
||||
func (m *MockRestHandler) GetStatusCode(_ context.Context, _ string) (int, error) {
|
||||
return http.StatusOK, nil
|
||||
}
|
||||
func (m *MockHandler) GetSSZ(_ context.Context, _ string) ([]byte, http.Header, error) {
|
||||
func (m *MockRestHandler) GetSSZ(_ context.Context, _ string) ([]byte, http.Header, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
func (m *MockHandler) Post(_ context.Context, _ string, _ map[string]string, _ *bytes.Buffer, _ any) error {
|
||||
func (m *MockRestHandler) Post(_ context.Context, _ string, _ map[string]string, _ *bytes.Buffer, _ any) error {
|
||||
return nil
|
||||
}
|
||||
func (m *MockHandler) PostSSZ(_ context.Context, _ string, _ map[string]string, _ *bytes.Buffer) ([]byte, http.Header, error) {
|
||||
func (m *MockRestHandler) PostSSZ(_ context.Context, _ string, _ map[string]string, _ *bytes.Buffer) ([]byte, http.Header, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
func (m *MockHandler) Host() string { return m.MockHost }
|
||||
func (m *MockRestHandler) HttpClient() *http.Client { return m.MockClient }
|
||||
func (m *MockRestHandler) Host() string { return m.MockHost }
|
||||
func (m *MockRestHandler) SwitchHost(host string) { m.MockHost = host }
|
||||
|
||||
@@ -17,8 +17,8 @@ import (
|
||||
type RestConnectionProvider interface {
|
||||
// HttpClient returns the configured HTTP client with headers, timeout, and optional tracing.
|
||||
HttpClient() *http.Client
|
||||
// Handler returns the REST handler for making API requests.
|
||||
Handler() Handler
|
||||
// RestHandler returns the REST handler for making API requests.
|
||||
RestHandler() RestHandler
|
||||
// CurrentHost returns the current REST API endpoint URL.
|
||||
CurrentHost() string
|
||||
// Hosts returns all configured REST API endpoint URLs.
|
||||
@@ -54,7 +54,7 @@ func WithTracing() RestConnectionProviderOption {
|
||||
type restConnectionProvider struct {
|
||||
endpoints []string
|
||||
httpClient *http.Client
|
||||
restHandler *handler
|
||||
restHandler RestHandler
|
||||
currentIndex atomic.Uint64
|
||||
timeout time.Duration
|
||||
headers map[string][]string
|
||||
@@ -96,7 +96,7 @@ func NewRestConnectionProvider(endpoint string, opts ...RestConnectionProviderOp
|
||||
}
|
||||
|
||||
// Create the REST handler with the HTTP client and initial host
|
||||
p.restHandler = newHandler(*p.httpClient, endpoints[0])
|
||||
p.restHandler = newRestHandler(*p.httpClient, endpoints[0])
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"endpoints": endpoints,
|
||||
@@ -124,7 +124,7 @@ func (p *restConnectionProvider) HttpClient() *http.Client {
|
||||
return p.httpClient
|
||||
}
|
||||
|
||||
func (p *restConnectionProvider) Handler() Handler {
|
||||
func (p *restConnectionProvider) RestHandler() RestHandler {
|
||||
return p.restHandler
|
||||
}
|
||||
|
||||
|
||||
@@ -21,35 +21,32 @@ import (
|
||||
|
||||
type reqOption func(*http.Request)
|
||||
|
||||
// Handler defines the interface for making REST API requests.
|
||||
type Handler interface {
|
||||
// RestHandler defines the interface for making REST API requests.
|
||||
type RestHandler interface {
|
||||
Get(ctx context.Context, endpoint string, resp any) error
|
||||
GetStatusCode(ctx context.Context, endpoint string) (int, error)
|
||||
GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error)
|
||||
Post(ctx context.Context, endpoint string, headers map[string]string, data *bytes.Buffer, resp any) error
|
||||
PostSSZ(ctx context.Context, endpoint string, headers map[string]string, data *bytes.Buffer) ([]byte, http.Header, error)
|
||||
HttpClient() *http.Client
|
||||
Host() string
|
||||
SwitchHost(host string)
|
||||
}
|
||||
|
||||
type handler struct {
|
||||
type restHandler struct {
|
||||
client http.Client
|
||||
host string
|
||||
reqOverrides []reqOption
|
||||
}
|
||||
|
||||
// newHandler returns a *handler for internal use within the rest package.
|
||||
func newHandler(client http.Client, host string) *handler {
|
||||
rh := &handler{
|
||||
client: client,
|
||||
host: host,
|
||||
}
|
||||
rh.appendAcceptOverride()
|
||||
return rh
|
||||
// newRestHandler returns a RestHandler (internal use)
|
||||
func newRestHandler(client http.Client, host string) RestHandler {
|
||||
return NewRestHandler(client, host)
|
||||
}
|
||||
|
||||
// NewHandler returns a Handler
|
||||
func NewHandler(client http.Client, host string) Handler {
|
||||
rh := &handler{
|
||||
// NewRestHandler returns a RestHandler
|
||||
func NewRestHandler(client http.Client, host string) RestHandler {
|
||||
rh := &restHandler{
|
||||
client: client,
|
||||
host: host,
|
||||
}
|
||||
@@ -60,7 +57,7 @@ func NewHandler(client http.Client, host string) Handler {
|
||||
// appendAcceptOverride enables the Accept header to be customized at runtime via an environment variable.
|
||||
// This is specified as an env var because it is a niche option that prysm may use for performance testing or debugging
|
||||
// bug which users are unlikely to need. Using an env var keeps the set of user-facing flags cleaner.
|
||||
func (c *handler) appendAcceptOverride() {
|
||||
func (c *restHandler) appendAcceptOverride() {
|
||||
if accept := os.Getenv(params.EnvNameOverrideAccept); accept != "" {
|
||||
c.reqOverrides = append(c.reqOverrides, func(req *http.Request) {
|
||||
req.Header.Set("Accept", accept)
|
||||
@@ -69,18 +66,18 @@ func (c *handler) appendAcceptOverride() {
|
||||
}
|
||||
|
||||
// HttpClient returns the underlying HTTP client of the handler
|
||||
func (c *handler) HttpClient() *http.Client {
|
||||
func (c *restHandler) HttpClient() *http.Client {
|
||||
return &c.client
|
||||
}
|
||||
|
||||
// Host returns the underlying HTTP host
|
||||
func (c *handler) Host() string {
|
||||
func (c *restHandler) Host() string {
|
||||
return c.host
|
||||
}
|
||||
|
||||
// Get sends a GET request and decodes the response body as a JSON object into the passed in object.
|
||||
// If an HTTP error is returned, the body is decoded as a DefaultJsonError JSON object and returned as the first return value.
|
||||
func (c *handler) Get(ctx context.Context, endpoint string, resp any) error {
|
||||
func (c *restHandler) Get(ctx context.Context, endpoint string, resp any) error {
|
||||
url := c.host + endpoint
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
@@ -103,7 +100,7 @@ func (c *handler) Get(ctx context.Context, endpoint string, resp any) error {
|
||||
// GetStatusCode sends a GET request and returns only the HTTP status code.
|
||||
// This is useful for endpoints like /eth/v1/node/health that communicate status via HTTP codes
|
||||
// (200 = ready, 206 = syncing, 503 = unavailable) rather than response bodies.
|
||||
func (c *handler) GetStatusCode(ctx context.Context, endpoint string) (int, error) {
|
||||
func (c *restHandler) GetStatusCode(ctx context.Context, endpoint string) (int, error) {
|
||||
url := c.host + endpoint
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
@@ -122,7 +119,7 @@ func (c *handler) GetStatusCode(ctx context.Context, endpoint string) (int, erro
|
||||
return httpResp.StatusCode, nil
|
||||
}
|
||||
|
||||
func (c *handler) GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error) {
|
||||
func (c *restHandler) GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error) {
|
||||
url := c.host + endpoint
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
@@ -177,7 +174,7 @@ func (c *handler) GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Hea
|
||||
|
||||
// Post sends a POST request and decodes the response body as a JSON object into the passed in object.
|
||||
// If an HTTP error is returned, the body is decoded as a DefaultJsonError JSON object and returned as the first return value.
|
||||
func (c *handler) Post(
|
||||
func (c *restHandler) Post(
|
||||
ctx context.Context,
|
||||
apiEndpoint string,
|
||||
headers map[string]string,
|
||||
@@ -213,7 +210,7 @@ func (c *handler) Post(
|
||||
}
|
||||
|
||||
// PostSSZ sends a POST request and prefers an SSZ (application/octet-stream) response body.
|
||||
func (c *handler) PostSSZ(
|
||||
func (c *restHandler) PostSSZ(
|
||||
ctx context.Context,
|
||||
apiEndpoint string,
|
||||
headers map[string]string,
|
||||
@@ -314,6 +311,6 @@ func decodeResp(httpResp *http.Response, resp any) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *handler) SwitchHost(host string) {
|
||||
func (c *restHandler) SwitchHost(host string) {
|
||||
c.host = host
|
||||
}
|
||||
|
||||
@@ -150,36 +150,3 @@ type ActiveSetChanges struct {
|
||||
EjectedPublicKeys []string `json:"ejected_public_keys"`
|
||||
EjectedIndices []string `json:"ejected_indices"`
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// GLOAS Fork Types - Execution Payload Envelope
|
||||
// =============================================================================
|
||||
// Note: Block-related GLOAS types (BeaconBlockGloas, ExecutionPayloadBid, etc.)
|
||||
// are defined in block.go. This file contains only the envelope types used for
|
||||
// the validator API endpoints.
|
||||
|
||||
// ExecutionPayloadEnvelope represents an execution payload envelope in the GLOAS fork.
|
||||
// This wraps the full execution payload with builder metadata for separate signing
|
||||
// and broadcasting by validators.
|
||||
type ExecutionPayloadEnvelope struct {
|
||||
Payload json.RawMessage `json:"payload"` // ExecutionPayloadDeneb
|
||||
ExecutionRequests json.RawMessage `json:"execution_requests"` // ExecutionRequests
|
||||
BuilderIndex string `json:"builder_index"` // uint64 as string
|
||||
BeaconBlockRoot string `json:"beacon_block_root"` // hex encoded 32 bytes
|
||||
Slot string `json:"slot"` // uint64 as string
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"` // list of hex encoded 48-byte commitments
|
||||
StateRoot string `json:"state_root"` // hex encoded 32 bytes
|
||||
}
|
||||
|
||||
// SignedExecutionPayloadEnvelope wraps an execution payload envelope with a BLS signature.
|
||||
// The signature is provided by the validator after retrieving the envelope from the beacon node.
|
||||
type SignedExecutionPayloadEnvelope struct {
|
||||
Message *ExecutionPayloadEnvelope `json:"message"`
|
||||
Signature string `json:"signature"` // hex encoded 96-byte BLS signature
|
||||
}
|
||||
|
||||
// GetExecutionPayloadEnvelopeResponse is the response for retrieving a cached execution payload envelope.
|
||||
type GetExecutionPayloadEnvelopeResponse struct {
|
||||
Version string `json:"version"`
|
||||
Data *ExecutionPayloadEnvelope `json:"data"`
|
||||
}
|
||||
|
||||
@@ -85,7 +85,6 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//io/logs:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
|
||||
@@ -110,7 +110,7 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c
|
||||
ckzgCells := make([]ckzg4844.Cell, len(cells))
|
||||
|
||||
for i := range cells {
|
||||
copy(ckzgCells[i][:], cells[i][:])
|
||||
ckzgCells[i] = ckzg4844.Cell(cells[i])
|
||||
}
|
||||
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/io/logs"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v7/time"
|
||||
@@ -88,45 +87,36 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesis time.Time, daWaitedTime time.Duration) error {
|
||||
startTime, err := slots.StartTime(genesis, block.Slot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get slot start time")
|
||||
return err
|
||||
}
|
||||
parentRoot := block.ParentRoot()
|
||||
blkRoot := fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8])
|
||||
finalizedRoot := fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8])
|
||||
sinceSlotStartTime := prysmTime.Now().Sub(startTime)
|
||||
|
||||
lessFields := logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"block": blkRoot,
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": finalizedRoot,
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"sinceSlotStartTime": sinceSlotStartTime,
|
||||
}
|
||||
moreFields := logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||
"block": blkRoot,
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"justifiedEpoch": justified.Epoch,
|
||||
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": finalizedRoot,
|
||||
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
|
||||
"version": version.String(block.Version()),
|
||||
"sinceSlotStartTime": sinceSlotStartTime,
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
|
||||
"dataAvailabilityWaitedTime": daWaitedTime,
|
||||
}
|
||||
|
||||
level := logs.PackageVerbosity("beacon-chain/blockchain")
|
||||
level := log.Logger.GetLevel()
|
||||
if level >= logrus.DebugLevel {
|
||||
log.WithFields(moreFields).Info("Synced new block")
|
||||
return nil
|
||||
parentRoot := block.ParentRoot()
|
||||
lf := logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"justifiedEpoch": justified.Epoch,
|
||||
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
|
||||
"version": version.String(block.Version()),
|
||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
|
||||
"dataAvailabilityWaitedTime": daWaitedTime,
|
||||
}
|
||||
log.WithFields(lf).Debug("Synced new block")
|
||||
} else {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
}).Info("Synced new block")
|
||||
}
|
||||
|
||||
log.WithFields(lessFields).WithField(logs.LogTargetField, logs.LogTargetUser).Info("Synced new block")
|
||||
log.WithFields(moreFields).WithField(logs.LogTargetField, logs.LogTargetEphemeral).Info("Synced new block")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -89,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn) error {
|
||||
func (mb *mockBroadcaster) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn, _ []blocks.PartialDataColumn) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
4
beacon-chain/cache/BUILD.bazel
vendored
4
beacon-chain/cache/BUILD.bazel
vendored
@@ -15,7 +15,6 @@ go_library(
|
||||
"common.go",
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"execution_payload_envelope.go",
|
||||
"interfaces.go",
|
||||
"log.go",
|
||||
"payload_id.go",
|
||||
@@ -52,7 +51,6 @@ go_library(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
@@ -78,7 +76,6 @@ go_test(
|
||||
"checkpoint_state_test.go",
|
||||
"committee_fuzz_test.go",
|
||||
"committee_test.go",
|
||||
"execution_payload_envelope_test.go",
|
||||
"payload_id_test.go",
|
||||
"private_access_test.go",
|
||||
"proposer_indices_test.go",
|
||||
@@ -100,7 +97,6 @@ go_test(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls/blst:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
|
||||
111
beacon-chain/cache/execution_payload_envelope.go
vendored
111
beacon-chain/cache/execution_payload_envelope.go
vendored
@@ -1,111 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// ExecutionPayloadEnvelopeKey uniquely identifies a cached execution payload envelope.
|
||||
type ExecutionPayloadEnvelopeKey struct {
|
||||
Slot primitives.Slot
|
||||
BuilderIndex primitives.BuilderIndex
|
||||
}
|
||||
|
||||
// executionPayloadEnvelopeCacheEntry holds an execution payload envelope and
|
||||
// the associated blobs bundle from the EL. The blobs bundle is needed later
|
||||
// when proposing the block to build and broadcast blob sidecars.
|
||||
type executionPayloadEnvelopeCacheEntry struct {
|
||||
envelope *ethpb.ExecutionPayloadEnvelope
|
||||
blobsBundle enginev1.BlobsBundler
|
||||
}
|
||||
|
||||
// ExecutionPayloadEnvelopeCache stores execution payload envelopes produced during
|
||||
// GLOAS block building for later retrieval by validators. When a beacon node
|
||||
// produces a GLOAS block, it caches the execution payload envelope so the validator
|
||||
// can retrieve it, sign it, and broadcast it separately from the beacon block.
|
||||
// The blobs bundle from the EL is also cached alongside, since blobs are only
|
||||
// persisted to the DB after they are broadcast as sidecars during block proposal.
|
||||
type ExecutionPayloadEnvelopeCache struct {
|
||||
cache map[ExecutionPayloadEnvelopeKey]*executionPayloadEnvelopeCacheEntry
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// NewExecutionPayloadEnvelopeCache creates a new execution payload envelope cache.
|
||||
func NewExecutionPayloadEnvelopeCache() *ExecutionPayloadEnvelopeCache {
|
||||
return &ExecutionPayloadEnvelopeCache{
|
||||
cache: make(map[ExecutionPayloadEnvelopeKey]*executionPayloadEnvelopeCacheEntry),
|
||||
}
|
||||
}
|
||||
|
||||
// Get retrieves an execution payload envelope by slot and builder index.
|
||||
// Returns the envelope and true if found, nil and false otherwise.
|
||||
func (c *ExecutionPayloadEnvelopeCache) Get(slot primitives.Slot, builderIndex primitives.BuilderIndex) (*ethpb.ExecutionPayloadEnvelope, bool) {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
key := ExecutionPayloadEnvelopeKey{
|
||||
Slot: slot,
|
||||
BuilderIndex: builderIndex,
|
||||
}
|
||||
entry, ok := c.cache[key]
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
return entry.envelope, true
|
||||
}
|
||||
|
||||
// GetBlobsBundle retrieves a cached blobs bundle by slot and builder index.
|
||||
// Returns the blobs bundle and true if found, nil and false otherwise.
|
||||
func (c *ExecutionPayloadEnvelopeCache) GetBlobsBundle(slot primitives.Slot, builderIndex primitives.BuilderIndex) (enginev1.BlobsBundler, bool) {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
key := ExecutionPayloadEnvelopeKey{
|
||||
Slot: slot,
|
||||
BuilderIndex: builderIndex,
|
||||
}
|
||||
entry, ok := c.cache[key]
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
return entry.blobsBundle, true
|
||||
}
|
||||
|
||||
// Set stores an execution payload envelope and its associated blobs bundle in the cache.
|
||||
// The envelope's slot and builder_index fields are used as the cache key.
|
||||
// Old entries are automatically pruned to prevent unbounded growth.
|
||||
func (c *ExecutionPayloadEnvelopeCache) Set(envelope *ethpb.ExecutionPayloadEnvelope, blobsBundle enginev1.BlobsBundler) {
|
||||
if envelope == nil {
|
||||
return
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
slot := envelope.Slot
|
||||
if slot > 2 {
|
||||
c.prune(slot - 2)
|
||||
}
|
||||
|
||||
key := ExecutionPayloadEnvelopeKey{
|
||||
Slot: slot,
|
||||
BuilderIndex: envelope.BuilderIndex,
|
||||
}
|
||||
c.cache[key] = &executionPayloadEnvelopeCacheEntry{
|
||||
envelope: envelope,
|
||||
blobsBundle: blobsBundle,
|
||||
}
|
||||
}
|
||||
|
||||
// prune removes all entries with slots older than the given slot.
|
||||
// Must be called with the lock held.
|
||||
func (c *ExecutionPayloadEnvelopeCache) prune(slot primitives.Slot) {
|
||||
for key := range c.cache {
|
||||
if key.Slot < slot {
|
||||
delete(c.cache, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,146 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestExecutionPayloadEnvelopeCache_GetSet(t *testing.T) {
|
||||
cache := NewExecutionPayloadEnvelopeCache()
|
||||
|
||||
// Test empty cache returns false
|
||||
_, ok := cache.Get(1, 0)
|
||||
require.Equal(t, false, ok, "expected empty cache to return false")
|
||||
|
||||
// Create test envelope
|
||||
envelope := ðpb.ExecutionPayloadEnvelope{
|
||||
Slot: primitives.Slot(100),
|
||||
BuilderIndex: primitives.BuilderIndex(5),
|
||||
StateRoot: make([]byte, 32),
|
||||
}
|
||||
|
||||
// Set and retrieve
|
||||
cache.Set(envelope, nil)
|
||||
retrieved, ok := cache.Get(100, 5)
|
||||
require.Equal(t, true, ok, "expected to find cached envelope")
|
||||
require.Equal(t, envelope.Slot, retrieved.Slot)
|
||||
require.Equal(t, envelope.BuilderIndex, retrieved.BuilderIndex)
|
||||
|
||||
// Different builder index should not find it
|
||||
_, ok = cache.Get(100, 6)
|
||||
require.Equal(t, false, ok, "expected different builder index to return false")
|
||||
|
||||
// Different slot should not find it
|
||||
_, ok = cache.Get(101, 5)
|
||||
require.Equal(t, false, ok, "expected different slot to return false")
|
||||
}
|
||||
|
||||
func TestExecutionPayloadEnvelopeCache_Prune(t *testing.T) {
|
||||
cache := NewExecutionPayloadEnvelopeCache()
|
||||
|
||||
// Add envelopes at slots 10, 11, 12 (close enough that none get pruned).
|
||||
// Prune removes entries with slot < (new_slot - 2), so inserting 10, 11, 12
|
||||
// keeps all three: slot 12 prunes < 10, but 10 is not < 10.
|
||||
for i := primitives.Slot(10); i <= 12; i++ {
|
||||
envelope := ðpb.ExecutionPayloadEnvelope{
|
||||
Slot: i,
|
||||
BuilderIndex: 0,
|
||||
StateRoot: make([]byte, 32),
|
||||
}
|
||||
cache.Set(envelope, nil)
|
||||
}
|
||||
|
||||
// Verify all are present
|
||||
for i := primitives.Slot(10); i <= 12; i++ {
|
||||
_, ok := cache.Get(i, 0)
|
||||
require.Equal(t, true, ok, "expected envelope at slot %d", i)
|
||||
}
|
||||
|
||||
// Add envelope at slot 20, should prune slots < 18
|
||||
envelope := ðpb.ExecutionPayloadEnvelope{
|
||||
Slot: 20,
|
||||
BuilderIndex: 0,
|
||||
StateRoot: make([]byte, 32),
|
||||
}
|
||||
cache.Set(envelope, nil)
|
||||
|
||||
// Slots 10-12 should be pruned (all < 18)
|
||||
for i := primitives.Slot(10); i <= 12; i++ {
|
||||
_, ok := cache.Get(i, 0)
|
||||
require.Equal(t, false, ok, "expected envelope at slot %d to be pruned", i)
|
||||
}
|
||||
|
||||
// Slot 20 should still be present
|
||||
_, ok := cache.Get(20, 0)
|
||||
require.Equal(t, true, ok, "expected envelope at slot 20 to be present")
|
||||
}
|
||||
|
||||
func TestExecutionPayloadEnvelopeCache_NilEnvelope(t *testing.T) {
|
||||
cache := NewExecutionPayloadEnvelopeCache()
|
||||
|
||||
// Setting nil should not panic or add entry
|
||||
cache.Set(nil, nil)
|
||||
|
||||
// Cache should still be empty
|
||||
require.Equal(t, 0, len(cache.cache))
|
||||
}
|
||||
|
||||
func TestExecutionPayloadEnvelopeCache_MultipleBuilders(t *testing.T) {
|
||||
cache := NewExecutionPayloadEnvelopeCache()
|
||||
|
||||
slot := primitives.Slot(100)
|
||||
|
||||
// Add envelopes from multiple builders at same slot
|
||||
for i := range primitives.BuilderIndex(3) {
|
||||
envelope := ðpb.ExecutionPayloadEnvelope{
|
||||
Slot: slot,
|
||||
BuilderIndex: i,
|
||||
StateRoot: make([]byte, 32),
|
||||
}
|
||||
cache.Set(envelope, nil)
|
||||
}
|
||||
|
||||
// All should be retrievable
|
||||
for i := range primitives.BuilderIndex(3) {
|
||||
retrieved, ok := cache.Get(slot, i)
|
||||
require.Equal(t, true, ok, "expected to find envelope for builder %d", i)
|
||||
require.Equal(t, i, retrieved.BuilderIndex)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecutionPayloadEnvelopeCache_BlobsBundle(t *testing.T) {
|
||||
cache := NewExecutionPayloadEnvelopeCache()
|
||||
|
||||
slot := primitives.Slot(100)
|
||||
builderIndex := primitives.BuilderIndex(5)
|
||||
|
||||
envelope := ðpb.ExecutionPayloadEnvelope{
|
||||
Slot: slot,
|
||||
BuilderIndex: builderIndex,
|
||||
StateRoot: make([]byte, 32),
|
||||
}
|
||||
bundle := &enginev1.BlobsBundle{
|
||||
KzgCommitments: [][]byte{{1, 2, 3}},
|
||||
Proofs: [][]byte{{4, 5, 6}},
|
||||
Blobs: [][]byte{{7, 8, 9}},
|
||||
}
|
||||
|
||||
cache.Set(envelope, bundle)
|
||||
|
||||
// Retrieve blobs bundle
|
||||
retrieved, ok := cache.GetBlobsBundle(slot, builderIndex)
|
||||
require.Equal(t, true, ok, "expected to find cached blobs bundle")
|
||||
require.NotNil(t, retrieved)
|
||||
b, ok := retrieved.(*enginev1.BlobsBundle)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, 1, len(b.KzgCommitments))
|
||||
require.DeepEqual(t, []byte{1, 2, 3}, b.KzgCommitments[0])
|
||||
|
||||
// Nil blobs bundle for missing key
|
||||
_, ok = cache.GetBlobsBundle(slot, 99)
|
||||
require.Equal(t, false, ok, "expected missing key to return false")
|
||||
}
|
||||
@@ -17,50 +17,27 @@ import (
|
||||
)
|
||||
|
||||
// ProcessExecutionPayloadBid processes a signed execution payload bid in the Gloas fork.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// process_execution_payload_bid(state: BeaconState, block: BeaconBlock):
|
||||
//
|
||||
// <spec fn="process_execution_payload_bid" fork="gloas" hash="6dc696bb">
|
||||
// def process_execution_payload_bid(state: BeaconState, block: BeaconBlock) -> None:
|
||||
// signed_bid = block.body.signed_execution_payload_bid
|
||||
// bid = signed_bid.message
|
||||
// builder_index = bid.builder_index
|
||||
// amount = bid.value
|
||||
//
|
||||
// # For self-builds, amount must be zero regardless of withdrawal credential prefix
|
||||
// if builder_index == BUILDER_INDEX_SELF_BUILD:
|
||||
// assert amount == 0
|
||||
// assert signed_bid.signature == bls.G2_POINT_AT_INFINITY
|
||||
// else:
|
||||
// # Verify that the builder is active
|
||||
// assert is_active_builder(state, builder_index)
|
||||
// # Verify that the builder has funds to cover the bid
|
||||
// assert can_builder_cover_bid(state, builder_index, amount)
|
||||
// # Verify that the bid signature is valid
|
||||
// assert verify_execution_payload_bid_signature(state, signed_bid)
|
||||
//
|
||||
// # Verify that the bid is for the current slot
|
||||
// assert bid.slot == block.slot
|
||||
// # Verify that the bid is for the right parent block
|
||||
// assert bid.parent_block_hash == state.latest_block_hash
|
||||
// assert bid.parent_block_root == block.parent_root
|
||||
// assert bid.prev_randao == get_randao_mix(state, get_current_epoch(state))
|
||||
//
|
||||
// # Record the pending payment if there is some payment
|
||||
// if amount > 0:
|
||||
// pending_payment = BuilderPendingPayment(
|
||||
// weight=0,
|
||||
// withdrawal=BuilderPendingWithdrawal(
|
||||
// fee_recipient=bid.fee_recipient,
|
||||
// amount=amount,
|
||||
// builder_index=builder_index,
|
||||
// ),
|
||||
// )
|
||||
// state.builder_pending_payments[SLOTS_PER_EPOCH + bid.slot % SLOTS_PER_EPOCH] = (
|
||||
// pending_payment
|
||||
// )
|
||||
//
|
||||
// # Cache the signed execution payload bid
|
||||
// state.latest_execution_payload_bid = bid
|
||||
// </spec>
|
||||
// signed_bid = block.body.signed_execution_payload_bid
|
||||
// bid = signed_bid.message
|
||||
// builder_index = bid.builder_index
|
||||
// amount = bid.value
|
||||
// if builder_index == BUILDER_INDEX_SELF_BUILD:
|
||||
// assert amount == 0
|
||||
// assert signed_bid.signature == G2_POINT_AT_INFINITY
|
||||
// else:
|
||||
// assert is_active_builder(state, builder_index)
|
||||
// assert can_builder_cover_bid(state, builder_index, amount)
|
||||
// assert verify_execution_payload_bid_signature(state, signed_bid)
|
||||
// assert bid.slot == block.slot
|
||||
// assert bid.parent_block_hash == state.latest_block_hash
|
||||
// assert bid.parent_block_root == block.parent_root
|
||||
// assert bid.prev_randao == get_randao_mix(state, get_current_epoch(state))
|
||||
// if amount > 0:
|
||||
// state.builder_pending_payments[...] = BuilderPendingPayment(weight=0, withdrawal=BuilderPendingWithdrawal(fee_recipient=bid.fee_recipient, amount=amount, builder_index=builder_index))
|
||||
// state.latest_execution_payload_bid = bid
|
||||
func ProcessExecutionPayloadBid(st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) error {
|
||||
signedBid, err := block.Body().SignedExecutionPayloadBid()
|
||||
if err != nil {
|
||||
|
||||
@@ -24,21 +24,14 @@ import (
|
||||
)
|
||||
|
||||
// ProcessPayloadAttestations validates payload attestations in a block body.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// process_payload_attestation(state: BeaconState, payload_attestation: PayloadAttestation):
|
||||
//
|
||||
// <spec fn="process_payload_attestation" fork="gloas" hash="f46bf0b0">
|
||||
// def process_payload_attestation(
|
||||
// state: BeaconState, payload_attestation: PayloadAttestation
|
||||
// ) -> None:
|
||||
// data = payload_attestation.data
|
||||
//
|
||||
// # Check that the attestation is for the parent beacon block
|
||||
// assert data.beacon_block_root == state.latest_block_header.parent_root
|
||||
// # Check that the attestation is for the previous slot
|
||||
// assert data.slot + 1 == state.slot
|
||||
// # Verify signature
|
||||
// indexed_payload_attestation = get_indexed_payload_attestation(state, payload_attestation)
|
||||
// assert is_valid_indexed_payload_attestation(state, indexed_payload_attestation)
|
||||
// </spec>
|
||||
// data = payload_attestation.data
|
||||
// assert data.beacon_block_root == state.latest_block_header.parent_root
|
||||
// assert data.slot + 1 == state.slot
|
||||
// indexed = get_indexed_payload_attestation(state, data.slot, payload_attestation)
|
||||
// assert is_valid_indexed_payload_attestation(state, indexed)
|
||||
func ProcessPayloadAttestations(ctx context.Context, st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||
atts, err := body.PayloadAttestations()
|
||||
if err != nil {
|
||||
@@ -97,24 +90,17 @@ func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState
|
||||
}
|
||||
|
||||
// payloadCommittee returns the payload timeliness committee for a given slot for the state.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
|
||||
//
|
||||
// <spec fn="get_ptc" fork="gloas" hash="ae15f761">
|
||||
// def get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
|
||||
// """
|
||||
// Get the payload timeliness committee for the given ``slot``.
|
||||
// """
|
||||
// epoch = compute_epoch_at_slot(slot)
|
||||
// seed = hash(get_seed(state, epoch, DOMAIN_PTC_ATTESTER) + uint_to_bytes(slot))
|
||||
// indices: List[ValidatorIndex] = []
|
||||
// # Concatenate all committees for this slot in order
|
||||
// committees_per_slot = get_committee_count_per_slot(state, epoch)
|
||||
// for i in range(committees_per_slot):
|
||||
// committee = get_beacon_committee(state, slot, CommitteeIndex(i))
|
||||
// indices.extend(committee)
|
||||
// return compute_balance_weighted_selection(
|
||||
// state, indices, seed, size=PTC_SIZE, shuffle_indices=False
|
||||
// )
|
||||
// </spec>
|
||||
// epoch = compute_epoch_at_slot(slot)
|
||||
// seed = hash(get_seed(state, epoch, DOMAIN_PTC_ATTESTER) + uint_to_bytes(slot))
|
||||
// indices = []
|
||||
// committees_per_slot = get_committee_count_per_slot(state, epoch)
|
||||
// for i in range(committees_per_slot):
|
||||
// committee = get_beacon_committee(state, slot, CommitteeIndex(i))
|
||||
// indices.extend(committee)
|
||||
// return compute_balance_weighted_selection(state, indices, seed, size=PTC_SIZE, shuffle_indices=False)
|
||||
func payloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
|
||||
epoch := slots.ToEpoch(slot)
|
||||
seed, err := ptcSeed(st, epoch, slot)
|
||||
@@ -166,35 +152,17 @@ func ptcSeed(st state.ReadOnlyBeaconState, epoch primitives.Epoch, slot primitiv
|
||||
}
|
||||
|
||||
// selectByBalance selects a balance-weighted subset of input candidates.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// compute_balance_weighted_selection(state, indices, seed, size, shuffle_indices):
|
||||
// Note: shuffle_indices is false for PTC.
|
||||
//
|
||||
// <spec fn="compute_balance_weighted_selection" fork="gloas" hash="2c9f1c23">
|
||||
// def compute_balance_weighted_selection(
|
||||
// state: BeaconState,
|
||||
// indices: Sequence[ValidatorIndex],
|
||||
// seed: Bytes32,
|
||||
// size: uint64,
|
||||
// shuffle_indices: bool,
|
||||
// ) -> Sequence[ValidatorIndex]:
|
||||
// """
|
||||
// Return ``size`` indices sampled by effective balance, using ``indices``
|
||||
// as candidates. If ``shuffle_indices`` is ``True``, candidate indices
|
||||
// are themselves sampled from ``indices`` by shuffling it, otherwise
|
||||
// ``indices`` is traversed in order.
|
||||
// """
|
||||
// total = uint64(len(indices))
|
||||
// assert total > 0
|
||||
// selected: List[ValidatorIndex] = []
|
||||
// i = uint64(0)
|
||||
// while len(selected) < size:
|
||||
// next_index = i % total
|
||||
// if shuffle_indices:
|
||||
// next_index = compute_shuffled_index(next_index, total, seed)
|
||||
// candidate_index = indices[next_index]
|
||||
// if compute_balance_weighted_acceptance(state, candidate_index, seed, i):
|
||||
// selected.append(candidate_index)
|
||||
// i += 1
|
||||
// return selected
|
||||
// </spec>
|
||||
// total = len(indices); selected = []; i = 0
|
||||
// while len(selected) < size:
|
||||
// next = i % total
|
||||
// if shuffle_indices: next = compute_shuffled_index(next, total, seed)
|
||||
// if compute_balance_weighted_acceptance(state, indices[next], seed, i):
|
||||
// selected.append(indices[next])
|
||||
// i += 1
|
||||
func selectByBalanceFill(
|
||||
ctx context.Context,
|
||||
st state.ReadOnlyBeaconState,
|
||||
@@ -231,22 +199,15 @@ func selectByBalanceFill(
|
||||
}
|
||||
|
||||
// acceptByBalance determines if a validator is accepted based on its effective balance.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// compute_balance_weighted_acceptance(state, index, seed, i):
|
||||
//
|
||||
// <spec fn="compute_balance_weighted_acceptance" fork="gloas" hash="9954dcd0">
|
||||
// def compute_balance_weighted_acceptance(
|
||||
// state: BeaconState, index: ValidatorIndex, seed: Bytes32, i: uint64
|
||||
// ) -> bool:
|
||||
// """
|
||||
// Return whether to accept the selection of the validator ``index``, with probability
|
||||
// proportional to its ``effective_balance``, and randomness given by ``seed`` and ``i``.
|
||||
// """
|
||||
// MAX_RANDOM_VALUE = 2**16 - 1
|
||||
// random_bytes = hash(seed + uint_to_bytes(i // 16))
|
||||
// offset = i % 16 * 2
|
||||
// random_value = bytes_to_uint64(random_bytes[offset : offset + 2])
|
||||
// effective_balance = state.validators[index].effective_balance
|
||||
// return effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value
|
||||
// </spec>
|
||||
// MAX_RANDOM_VALUE = 2**16 - 1
|
||||
// random_bytes = hash(seed + uint_to_bytes(i // 16))
|
||||
// offset = i % 16 * 2
|
||||
// random_value = bytes_to_uint64(random_bytes[offset:offset+2])
|
||||
// effective_balance = state.validators[index].effective_balance
|
||||
// return effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value
|
||||
func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex, seedBuf []byte, hashFunc func([]byte) [32]byte, maxBalance uint64, round uint64) (bool, error) {
|
||||
// Reuse the seed buffer by overwriting the last 8 bytes with the round counter.
|
||||
binary.LittleEndian.PutUint64(seedBuf[len(seedBuf)-8:], round/16)
|
||||
@@ -263,26 +224,16 @@ func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex
|
||||
}
|
||||
|
||||
// validIndexedPayloadAttestation verifies the signature of an indexed payload attestation.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// is_valid_indexed_payload_attestation(state: BeaconState, indexed_payload_attestation: IndexedPayloadAttestation) -> bool:
|
||||
//
|
||||
// <spec fn="is_valid_indexed_payload_attestation" fork="gloas" hash="cf1e65b5">
|
||||
// def is_valid_indexed_payload_attestation(
|
||||
// state: BeaconState, indexed_payload_attestation: IndexedPayloadAttestation
|
||||
// ) -> bool:
|
||||
// """
|
||||
// Check if ``indexed_payload_attestation`` is non-empty, has sorted indices, and has
|
||||
// a valid aggregate signature.
|
||||
// """
|
||||
// # Verify indices are non-empty and sorted
|
||||
// indices = indexed_payload_attestation.attesting_indices
|
||||
// if len(indices) == 0 or not indices == sorted(indices):
|
||||
// return False
|
||||
//
|
||||
// # Verify aggregate signature
|
||||
// pubkeys = [state.validators[i].pubkey for i in indices]
|
||||
// domain = get_domain(state, DOMAIN_PTC_ATTESTER, None)
|
||||
// signing_root = compute_signing_root(indexed_payload_attestation.data, domain)
|
||||
// return bls.FastAggregateVerify(pubkeys, signing_root, indexed_payload_attestation.signature)
|
||||
// </spec>
|
||||
// indices = indexed_payload_attestation.attesting_indices
|
||||
// return len(indices) > 0 and indices == sorted(indices) and
|
||||
// bls.FastAggregateVerify(
|
||||
// [state.validators[i].pubkey for i in indices],
|
||||
// compute_signing_root(indexed_payload_attestation.data, get_domain(state, DOMAIN_PTC_ATTESTER, compute_epoch_at_slot(attestation.data.slot)),
|
||||
// indexed_payload_attestation.signature,
|
||||
// )
|
||||
func validIndexedPayloadAttestation(st state.ReadOnlyBeaconState, att *consensus_types.IndexedPayloadAttestation) error {
|
||||
indices := att.AttestingIndices
|
||||
if len(indices) == 0 || !slices.IsSorted(indices) {
|
||||
|
||||
@@ -10,21 +10,17 @@ import (
|
||||
)
|
||||
|
||||
// ProcessBuilderPendingPayments processes the builder pending payments from the previous epoch.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// def process_builder_pending_payments(state: BeaconState) -> None:
|
||||
//
|
||||
// <spec fn="process_builder_pending_payments" fork="gloas" hash="10da48dd">
|
||||
// def process_builder_pending_payments(state: BeaconState) -> None:
|
||||
// """
|
||||
// Processes the builder pending payments from the previous epoch.
|
||||
// """
|
||||
// quorum = get_builder_payment_quorum_threshold(state)
|
||||
// for payment in state.builder_pending_payments[:SLOTS_PER_EPOCH]:
|
||||
// if payment.weight >= quorum:
|
||||
// state.builder_pending_withdrawals.append(payment.withdrawal)
|
||||
// quorum = get_builder_payment_quorum_threshold(state)
|
||||
// for payment in state.builder_pending_payments[:SLOTS_PER_EPOCH]:
|
||||
// if payment.weight >= quorum:
|
||||
// state.builder_pending_withdrawals.append(payment.withdrawal)
|
||||
//
|
||||
// old_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:]
|
||||
// new_payments = [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
|
||||
// state.builder_pending_payments = old_payments + new_payments
|
||||
// </spec>
|
||||
// old_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:]
|
||||
// new_payments = [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
|
||||
// state.builder_pending_payments = old_payments + new_payments
|
||||
func ProcessBuilderPendingPayments(state state.BeaconState) error {
|
||||
quorum, err := builderQuorumThreshold(state)
|
||||
if err != nil {
|
||||
@@ -57,16 +53,12 @@ func ProcessBuilderPendingPayments(state state.BeaconState) error {
|
||||
}
|
||||
|
||||
// builderQuorumThreshold calculates the quorum threshold for builder payments.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// def get_builder_payment_quorum_threshold(state: BeaconState) -> uint64:
|
||||
//
|
||||
// <spec fn="get_builder_payment_quorum_threshold" fork="gloas" hash="a64b7ffb">
|
||||
// def get_builder_payment_quorum_threshold(state: BeaconState) -> uint64:
|
||||
// """
|
||||
// Calculate the quorum threshold for builder payments.
|
||||
// """
|
||||
// per_slot_balance = get_total_active_balance(state) // SLOTS_PER_EPOCH
|
||||
// quorum = per_slot_balance * BUILDER_PAYMENT_THRESHOLD_NUMERATOR
|
||||
// return uint64(quorum // BUILDER_PAYMENT_THRESHOLD_DENOMINATOR)
|
||||
// </spec>
|
||||
// per_slot_balance = get_total_active_balance(state) // SLOTS_PER_EPOCH
|
||||
// quorum = per_slot_balance * BUILDER_PAYMENT_THRESHOLD_NUMERATOR
|
||||
// return uint64(quorum // BUILDER_PAYMENT_THRESHOLD_DENOMINATOR)
|
||||
func builderQuorumThreshold(state state.ReadOnlyBeaconState) (primitives.Gwei, error) {
|
||||
activeBalance, err := helpers.TotalActiveBalance(state)
|
||||
if err != nil {
|
||||
|
||||
@@ -11,20 +11,16 @@ import (
|
||||
)
|
||||
|
||||
// RemoveBuilderPendingPayment removes the pending builder payment for the proposal slot.
|
||||
// Spec v1.7.0 (pseudocode):
|
||||
//
|
||||
// <spec fn="process_proposer_slashing" fork="gloas" lines="22-32" hash="4da721ef">
|
||||
// # [New in Gloas:EIP7732]
|
||||
// # Remove the BuilderPendingPayment corresponding to
|
||||
// # this proposal if it is still in the 2-epoch window.
|
||||
// slot = header_1.slot
|
||||
// proposal_epoch = compute_epoch_at_slot(slot)
|
||||
// if proposal_epoch == get_current_epoch(state):
|
||||
// payment_index = SLOTS_PER_EPOCH + slot % SLOTS_PER_EPOCH
|
||||
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
|
||||
// payment_index = SLOTS_PER_EPOCH + slot % SLOTS_PER_EPOCH
|
||||
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
|
||||
// elif proposal_epoch == get_previous_epoch(state):
|
||||
// payment_index = slot % SLOTS_PER_EPOCH
|
||||
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
|
||||
// </spec>
|
||||
// payment_index = slot % SLOTS_PER_EPOCH
|
||||
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
|
||||
func RemoveBuilderPendingPayment(st state.BeaconState, header *eth.BeaconBlockHeader) error {
|
||||
proposalEpoch := slots.ToEpoch(header.Slot)
|
||||
currentEpoch := time.CurrentEpoch(st)
|
||||
|
||||
@@ -33,6 +33,7 @@ go_library(
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
stderrors "errors"
|
||||
"iter"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/container/trie"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -16,6 +20,7 @@ var (
|
||||
ErrIndexTooLarge = errors.New("column index is larger than the specified columns count")
|
||||
ErrNoKzgCommitments = errors.New("no KZG commitments found")
|
||||
ErrMismatchLength = errors.New("mismatch in the length of the column, commitments or proofs")
|
||||
ErrEmptySegment = errors.New("empty segment in batch")
|
||||
ErrInvalidKZGProof = errors.New("invalid KZG proof")
|
||||
ErrBadRootLength = errors.New("bad root length")
|
||||
ErrInvalidInclusionProof = errors.New("invalid inclusion proof")
|
||||
@@ -57,80 +62,113 @@ func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyDataColumnsSidecarKZGProofs verifies if the KZG proofs are correct.
|
||||
// CellProofBundleSegment is returned when a batch fails. The caller can call
|
||||
// the `.Verify` method to verify just this segment.
|
||||
type CellProofBundleSegment struct {
|
||||
indices []uint64
|
||||
commitments []kzg.Bytes48
|
||||
cells []kzg.Cell
|
||||
proofs []kzg.Bytes48
|
||||
}
|
||||
|
||||
// Verify verifies this segment without batching.
|
||||
func (s CellProofBundleSegment) Verify() error {
|
||||
if len(s.cells) == 0 {
|
||||
return ErrEmptySegment
|
||||
}
|
||||
verified, err := kzg.VerifyCellKZGProofBatch(s.commitments, s.indices, s.cells, s.proofs)
|
||||
if err != nil {
|
||||
return stderrors.Join(err, ErrInvalidKZGProof)
|
||||
}
|
||||
if !verified {
|
||||
return ErrInvalidKZGProof
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func VerifyDataColumnsCellsKZGProofs(sizeHint int, cellProofsIter iter.Seq[blocks.CellProofBundle]) error {
|
||||
// ignore the failed segment list since we are just passing in one segment.
|
||||
_, err := BatchVerifyDataColumnsCellsKZGProofs(sizeHint, []iter.Seq[blocks.CellProofBundle]{cellProofsIter})
|
||||
return err
|
||||
}
|
||||
|
||||
// BatchVerifyDataColumnsCellsKZGProofs verifies if the KZG proofs are correct.
|
||||
// Note: We are slightly deviating from the specification here:
|
||||
// The specification verifies the KZG proofs for each sidecar separately,
|
||||
// while we are verifying all the KZG proofs from multiple sidecars in a batch.
|
||||
// This is done to improve performance since the internal KZG library is way more
|
||||
// efficient when verifying in batch.
|
||||
// efficient when verifying in batch. If the batch fails, the failed segments
|
||||
// are returned to the caller so that they may try segment by segment without
|
||||
// batching. On success the failed segment list is empty.
|
||||
//
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
|
||||
func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
|
||||
// Compute the total count.
|
||||
count := 0
|
||||
for _, sidecar := range sidecars {
|
||||
count += len(sidecar.Column)
|
||||
}
|
||||
func BatchVerifyDataColumnsCellsKZGProofs(sizeHint int, cellProofsIters []iter.Seq[blocks.CellProofBundle]) ( /* failed segment list */ []CellProofBundleSegment, error) {
|
||||
commitments := make([]kzg.Bytes48, 0, sizeHint)
|
||||
indices := make([]uint64, 0, sizeHint)
|
||||
cells := make([]kzg.Cell, 0, sizeHint)
|
||||
proofs := make([]kzg.Bytes48, 0, sizeHint)
|
||||
|
||||
commitments := make([]kzg.Bytes48, 0, count)
|
||||
indices := make([]uint64, 0, count)
|
||||
cells := make([]kzg.Cell, 0, count)
|
||||
proofs := make([]kzg.Bytes48, 0, count)
|
||||
|
||||
for _, sidecar := range sidecars {
|
||||
for i := range sidecar.Column {
|
||||
var anySegmentEmpty bool
|
||||
var segments []CellProofBundleSegment
|
||||
for _, cellProofsIter := range cellProofsIters {
|
||||
startIdx := len(cells)
|
||||
for bundle := range cellProofsIter {
|
||||
var (
|
||||
commitment kzg.Bytes48
|
||||
cell kzg.Cell
|
||||
proof kzg.Bytes48
|
||||
)
|
||||
|
||||
commitmentBytes := sidecar.KzgCommitments[i]
|
||||
cellBytes := sidecar.Column[i]
|
||||
proofBytes := sidecar.KzgProofs[i]
|
||||
|
||||
if len(commitmentBytes) != len(commitment) ||
|
||||
len(cellBytes) != len(cell) ||
|
||||
len(proofBytes) != len(proof) {
|
||||
return ErrMismatchLength
|
||||
if len(bundle.Commitment) != len(commitment) ||
|
||||
len(bundle.Cell) != len(cell) ||
|
||||
len(bundle.Proof) != len(proof) {
|
||||
return nil, ErrMismatchLength
|
||||
}
|
||||
|
||||
copy(commitment[:], commitmentBytes)
|
||||
copy(cell[:], cellBytes)
|
||||
copy(proof[:], proofBytes)
|
||||
copy(commitment[:], bundle.Commitment)
|
||||
copy(cell[:], bundle.Cell)
|
||||
copy(proof[:], bundle.Proof)
|
||||
|
||||
commitments = append(commitments, commitment)
|
||||
indices = append(indices, sidecar.Index)
|
||||
indices = append(indices, bundle.ColumnIndex)
|
||||
cells = append(cells, cell)
|
||||
proofs = append(proofs, proof)
|
||||
}
|
||||
if len(cells[startIdx:]) == 0 {
|
||||
anySegmentEmpty = true
|
||||
}
|
||||
segments = append(segments, CellProofBundleSegment{
|
||||
indices: indices[startIdx:],
|
||||
commitments: commitments[startIdx:],
|
||||
cells: cells[startIdx:],
|
||||
proofs: proofs[startIdx:],
|
||||
})
|
||||
}
|
||||
|
||||
if anySegmentEmpty {
|
||||
return segments, ErrEmptySegment
|
||||
}
|
||||
|
||||
// Batch verify that the cells match the corresponding commitments and proofs.
|
||||
verified, err := kzg.VerifyCellKZGProofBatch(commitments, indices, cells, proofs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "verify cell KZG proof batch")
|
||||
return segments, stderrors.Join(err, ErrInvalidKZGProof)
|
||||
}
|
||||
|
||||
if !verified {
|
||||
return ErrInvalidKZGProof
|
||||
return segments, ErrInvalidKZGProof
|
||||
}
|
||||
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// VerifyDataColumnSidecarInclusionProof verifies if the given KZG commitments included in the given beacon block.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_inclusion_proof
|
||||
func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
|
||||
if sidecar.SignedBlockHeader == nil || sidecar.SignedBlockHeader.Header == nil {
|
||||
return ErrNilBlockHeader
|
||||
}
|
||||
|
||||
root := sidecar.SignedBlockHeader.Header.BodyRoot
|
||||
if len(root) != fieldparams.RootLength {
|
||||
// verifyKzgCommitmentsInclusionProof is the shared implementation for inclusion proof verification.
|
||||
func verifyKzgCommitmentsInclusionProof(bodyRoot []byte, kzgCommitments [][]byte, inclusionProof [][]byte) error {
|
||||
if len(bodyRoot) != fieldparams.RootLength {
|
||||
return ErrBadRootLength
|
||||
}
|
||||
|
||||
leaves := blocks.LeavesFromCommitments(sidecar.KzgCommitments)
|
||||
leaves := blocks.LeavesFromCommitments(kzgCommitments)
|
||||
|
||||
sparse, err := trie.GenerateTrieFromItems(leaves, fieldparams.LogMaxBlobCommitments)
|
||||
if err != nil {
|
||||
@@ -142,7 +180,7 @@ func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
|
||||
return errors.Wrap(err, "hash tree root")
|
||||
}
|
||||
|
||||
verified := trie.VerifyMerkleProof(root, hashTreeRoot[:], kzgPosition, sidecar.KzgCommitmentsInclusionProof)
|
||||
verified := trie.VerifyMerkleProof(bodyRoot, hashTreeRoot[:], kzgPosition, inclusionProof)
|
||||
if !verified {
|
||||
return ErrInvalidInclusionProof
|
||||
}
|
||||
@@ -150,6 +188,31 @@ func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyDataColumnSidecarInclusionProof verifies if the given KZG commitments included in the given beacon block.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_inclusion_proof
|
||||
func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
|
||||
if sidecar.SignedBlockHeader == nil || sidecar.SignedBlockHeader.Header == nil {
|
||||
return ErrNilBlockHeader
|
||||
}
|
||||
return verifyKzgCommitmentsInclusionProof(
|
||||
sidecar.SignedBlockHeader.Header.BodyRoot,
|
||||
sidecar.KzgCommitments,
|
||||
sidecar.KzgCommitmentsInclusionProof,
|
||||
)
|
||||
}
|
||||
|
||||
// VerifyPartialDataColumnHeaderInclusionProof verifies if the KZG commitments are included in the beacon block.
|
||||
func VerifyPartialDataColumnHeaderInclusionProof(header *ethpb.PartialDataColumnHeader) error {
|
||||
if header.SignedBlockHeader == nil || header.SignedBlockHeader.Header == nil {
|
||||
return ErrNilBlockHeader
|
||||
}
|
||||
return verifyKzgCommitmentsInclusionProof(
|
||||
header.SignedBlockHeader.Header.BodyRoot,
|
||||
header.KzgCommitments,
|
||||
header.KzgCommitmentsInclusionProof,
|
||||
)
|
||||
}
|
||||
|
||||
// ComputeSubnetForDataColumnSidecar computes the subnet for a data column sidecar.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#compute_subnet_for_data_column_sidecar
|
||||
func ComputeSubnetForDataColumnSidecar(columnIndex uint64) uint64 {
|
||||
|
||||
@@ -3,6 +3,7 @@ package peerdas_test
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"iter"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
@@ -72,7 +73,7 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
||||
sidecars := generateRandomSidecars(t, seed, blobCount)
|
||||
sidecars[0].Column[0] = sidecars[0].Column[0][:len(sidecars[0].Column[0])-1] // Remove one byte to create size mismatch
|
||||
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(sidecars))
|
||||
require.ErrorIs(t, err, peerdas.ErrMismatchLength)
|
||||
})
|
||||
|
||||
@@ -80,14 +81,15 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
||||
sidecars := generateRandomSidecars(t, seed, blobCount)
|
||||
sidecars[0].Column[0][0]++ // It is OK to overflow
|
||||
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(sidecars))
|
||||
require.ErrorIs(t, err, peerdas.ErrInvalidKZGProof)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
sidecars := generateRandomSidecars(t, seed, blobCount)
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||
failedSegments, err := peerdas.BatchVerifyDataColumnsCellsKZGProofs(blobCount, []iter.Seq[blocks.CellProofBundle]{blocks.RODataColumnsToCellProofBundles(sidecars)})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(failedSegments))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -273,7 +275,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_SameCommitments_NoBatch(b *testin
|
||||
for _, sidecar := range sidecars {
|
||||
sidecars := []blocks.RODataColumn{sidecar}
|
||||
b.StartTimer()
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(sidecars))
|
||||
b.StopTimer()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -308,7 +310,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch(b *testing.
|
||||
}
|
||||
|
||||
b.StartTimer()
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(allSidecars)
|
||||
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(allSidecars))
|
||||
b.StopTimer()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -341,7 +343,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch4(b *testing
|
||||
|
||||
for _, sidecars := range allSidecars {
|
||||
b.StartTimer()
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||
err := peerdas.VerifyDataColumnsCellsKZGProofs(len(allSidecars), blocks.RODataColumnsToCellProofBundles(sidecars))
|
||||
b.StopTimer()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
@@ -339,7 +340,8 @@ func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg
|
||||
}
|
||||
|
||||
// ComputeCellsAndProofsFromStructured computes the cells and proofs from blobs and cell proofs.
|
||||
func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([][]kzg.Cell, [][]kzg.Proof, error) {
|
||||
// commitmentCount is required to return the correct sized bitlist even if we see a nil slice of blobsAndProofs.
|
||||
func ComputeCellsAndProofsFromStructured(commitmentCount uint64, blobsAndProofs []*pb.BlobAndProofV2) (bitfield.Bitlist /* parts included */, [][]kzg.Cell, [][]kzg.Proof, error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
cellsAndProofsFromStructuredComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
@@ -347,14 +349,24 @@ func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([
|
||||
|
||||
var wg errgroup.Group
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, len(blobsAndProofs))
|
||||
proofsPerBlob := make([][]kzg.Proof, len(blobsAndProofs))
|
||||
var blobsPresent int
|
||||
for _, blobAndProof := range blobsAndProofs {
|
||||
if blobAndProof != nil {
|
||||
blobsPresent++
|
||||
}
|
||||
}
|
||||
cellsPerBlob := make([][]kzg.Cell, blobsPresent)
|
||||
proofsPerBlob := make([][]kzg.Proof, blobsPresent)
|
||||
included := bitfield.NewBitlist(commitmentCount)
|
||||
|
||||
var j int
|
||||
for i, blobAndProof := range blobsAndProofs {
|
||||
if blobAndProof == nil {
|
||||
return nil, nil, ErrNilBlobAndProof
|
||||
continue
|
||||
}
|
||||
included.SetBitAt(uint64(i), true)
|
||||
|
||||
compactIndex := j
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
||||
@@ -381,17 +393,18 @@ func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([
|
||||
kzgProofs = append(kzgProofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsPerBlob[i] = cells
|
||||
proofsPerBlob[i] = kzgProofs
|
||||
cellsPerBlob[compactIndex] = cells
|
||||
proofsPerBlob[compactIndex] = kzgProofs
|
||||
return nil
|
||||
})
|
||||
j++
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
return cellsPerBlob, proofsPerBlob, nil
|
||||
return included, cellsPerBlob, proofsPerBlob, nil
|
||||
}
|
||||
|
||||
// ReconstructBlobs reconstructs blobs from data column sidecars without computing KZG proofs or creating sidecars.
|
||||
|
||||
@@ -479,8 +479,9 @@ func TestComputeCellsAndProofsFromFlat(t *testing.T) {
|
||||
|
||||
func TestComputeCellsAndProofsFromStructured(t *testing.T) {
|
||||
t.Run("nil blob and proof", func(t *testing.T) {
|
||||
_, _, err := peerdas.ComputeCellsAndProofsFromStructured([]*pb.BlobAndProofV2{nil})
|
||||
require.ErrorIs(t, err, peerdas.ErrNilBlobAndProof)
|
||||
included, _, _, err := peerdas.ComputeCellsAndProofsFromStructured(0, []*pb.BlobAndProofV2{nil})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), included.Count())
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
@@ -533,7 +534,8 @@ func TestComputeCellsAndProofsFromStructured(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test ComputeCellsAndProofs
|
||||
actualCellsPerBlob, actualProofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(blobsAndProofs)
|
||||
included, actualCellsPerBlob, actualProofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(uint64(len(blobsAndProofs)), blobsAndProofs)
|
||||
require.Equal(t, included.Count(), uint64(len(actualCellsPerBlob)))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blobCount, len(actualCellsPerBlob))
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package peerdas
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
beaconState "github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -23,13 +24,11 @@ var (
|
||||
var (
|
||||
_ ConstructionPopulator = (*BlockReconstructionSource)(nil)
|
||||
_ ConstructionPopulator = (*SidecarReconstructionSource)(nil)
|
||||
_ ConstructionPopulator = (*EnvelopeReconstructionSource)(nil)
|
||||
)
|
||||
|
||||
const (
|
||||
BlockType = "BeaconBlock"
|
||||
SidecarType = "DataColumnSidecar"
|
||||
EnvelopeType = "ExecutionPayloadEnvelope"
|
||||
BlockType = "BeaconBlock"
|
||||
SidecarType = "DataColumnSidecar"
|
||||
)
|
||||
|
||||
type (
|
||||
@@ -51,20 +50,11 @@ type (
|
||||
blocks.ROBlock
|
||||
}
|
||||
|
||||
// SidecarReconstructionSource is a ConstructionPopulator that uses a data column sidecar as the source of data
|
||||
// DataColumnSidecar is a ConstructionPopulator that uses a data column sidecar as the source of data
|
||||
SidecarReconstructionSource struct {
|
||||
blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
// EnvelopeReconstructionSource is a ConstructionPopulator for GLOAS+ where
|
||||
// blob KZG commitments are in the execution payload envelope rather than the
|
||||
// block body. It uses the block for the header and root, but overrides
|
||||
// commitments with those from the envelope.
|
||||
EnvelopeReconstructionSource struct {
|
||||
blocks.ROBlock
|
||||
commitments [][]byte
|
||||
}
|
||||
|
||||
blockInfo struct {
|
||||
signedBlockHeader *ethpb.SignedBeaconBlockHeader
|
||||
kzgCommitments [][]byte
|
||||
@@ -82,13 +72,6 @@ func PopulateFromSidecar(sidecar blocks.VerifiedRODataColumn) *SidecarReconstruc
|
||||
return &SidecarReconstructionSource{VerifiedRODataColumn: sidecar}
|
||||
}
|
||||
|
||||
// PopulateFromEnvelope creates an EnvelopeReconstructionSource from a block and
|
||||
// the KZG commitments from the execution payload envelope. This is used for GLOAS+
|
||||
// where commitments are in the envelope rather than the block body.
|
||||
func PopulateFromEnvelope(block blocks.ROBlock, commitments [][]byte) *EnvelopeReconstructionSource {
|
||||
return &EnvelopeReconstructionSource{ROBlock: block, commitments: commitments}
|
||||
}
|
||||
|
||||
// ValidatorsCustodyRequirement returns the number of custody groups regarding the validator indices attached to the beacon node.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#validator-custody
|
||||
func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validatorsIndex map[primitives.ValidatorIndex]bool) (uint64, error) {
|
||||
@@ -161,6 +144,40 @@ func DataColumnSidecars(cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof,
|
||||
return roSidecars, nil
|
||||
}
|
||||
|
||||
func PartialColumns(included bitfield.Bitlist, cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof, src ConstructionPopulator) ([]blocks.PartialDataColumn, error) {
|
||||
start := time.Now()
|
||||
const numberOfColumns = uint64(fieldparams.NumberOfColumns)
|
||||
cells, proofs, err := rotateRowsToCols(cellsPerBlob, proofsPerBlob, numberOfColumns)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "rotate cells and proofs")
|
||||
}
|
||||
info, err := src.extract()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "extract block info")
|
||||
}
|
||||
|
||||
dataColumns := make([]blocks.PartialDataColumn, 0, numberOfColumns)
|
||||
for idx := range numberOfColumns {
|
||||
dc, err := blocks.NewPartialDataColumn(info.signedBlockHeader, idx, info.kzgCommitments, info.kzgInclusionProof)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new ro data column")
|
||||
}
|
||||
|
||||
for i := range len(info.kzgCommitments) {
|
||||
if !included.BitAt(uint64(i)) {
|
||||
continue
|
||||
}
|
||||
dc.ExtendFromVerfifiedCell(uint64(i), cells[idx][0], proofs[idx][0])
|
||||
cells[idx] = cells[idx][1:]
|
||||
proofs[idx] = proofs[idx][1:]
|
||||
}
|
||||
dataColumns = append(dataColumns, dc)
|
||||
}
|
||||
|
||||
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
return dataColumns, nil
|
||||
}
|
||||
|
||||
// Slot returns the slot of the source
|
||||
func (s *BlockReconstructionSource) Slot() primitives.Slot {
|
||||
return s.Block().Slot()
|
||||
@@ -272,46 +289,3 @@ func (s *SidecarReconstructionSource) extract() (*blockInfo, error) {
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// Slot returns the slot of the source
|
||||
func (s *EnvelopeReconstructionSource) Slot() primitives.Slot {
|
||||
return s.Block().Slot()
|
||||
}
|
||||
|
||||
// ProposerIndex returns the proposer index of the source
|
||||
func (s *EnvelopeReconstructionSource) ProposerIndex() primitives.ValidatorIndex {
|
||||
return s.Block().ProposerIndex()
|
||||
}
|
||||
|
||||
// Commitments returns the blob KZG commitments from the envelope.
|
||||
func (s *EnvelopeReconstructionSource) Commitments() ([][]byte, error) {
|
||||
return s.commitments, nil
|
||||
}
|
||||
|
||||
// Type returns the type of the source
|
||||
func (s *EnvelopeReconstructionSource) Type() string {
|
||||
return EnvelopeType
|
||||
}
|
||||
|
||||
// extract extracts the block information from the source, using commitments
|
||||
// from the envelope rather than the block body.
|
||||
func (s *EnvelopeReconstructionSource) extract() (*blockInfo, error) {
|
||||
header, err := s.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "header")
|
||||
}
|
||||
|
||||
// TODO: Implement proper merkle proof for envelope commitments.
|
||||
// In GLOAS, commitments are in the envelope not the block body,
|
||||
// so the inclusion proof structure differs from pre-GLOAS forks.
|
||||
inclusionProof := make([][]byte, 4)
|
||||
for i := range inclusionProof {
|
||||
inclusionProof[i] = make([]byte, 32)
|
||||
}
|
||||
|
||||
return &blockInfo{
|
||||
signedBlockHeader: header,
|
||||
kzgCommitments: s.commitments,
|
||||
kzgInclusionProof: inclusionProof,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -143,11 +143,10 @@ func ProcessSlot(ctx context.Context, state state.BeaconState) (state.BeaconStat
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// <spec fn="process_slot" fork="gloas" lines="11-13" hash="62b28839">
|
||||
// Spec v1.6.1 (pseudocode):
|
||||
// # [New in Gloas:EIP7732]
|
||||
// # Unset the next payload availability
|
||||
// state.execution_payload_availability[(state.slot + 1) % SLOTS_PER_HISTORICAL_ROOT] = 0b0
|
||||
// </spec>
|
||||
if state.Version() >= version.Gloas {
|
||||
index := uint64((state.Slot() + 1) % params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
if err := state.UpdateExecutionPayloadAvailabilityAtIndex(index, 0x0); err != nil {
|
||||
|
||||
@@ -73,6 +73,7 @@ go_library(
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_k8s_client_go//tools/cache:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/execution/types"
|
||||
@@ -58,6 +59,7 @@ var (
|
||||
fuluEngineEndpoints = []string{
|
||||
GetPayloadMethodV5,
|
||||
GetBlobsV2,
|
||||
GetBlobsV3,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -99,6 +101,8 @@ const (
|
||||
GetBlobsV1 = "engine_getBlobsV1"
|
||||
// GetBlobsV2 request string for JSON-RPC.
|
||||
GetBlobsV2 = "engine_getBlobsV2"
|
||||
// GetBlobsV3 request string for JSON-RPC.
|
||||
GetBlobsV3 = "engine_getBlobsV3"
|
||||
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
|
||||
defaultEngineTimeout = time.Second
|
||||
)
|
||||
@@ -122,7 +126,7 @@ type Reconstructor interface {
|
||||
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([]interfaces.SignedBeaconBlock, error)
|
||||
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
|
||||
ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error)
|
||||
ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, []blocks.PartialDataColumn, error)
|
||||
}
|
||||
|
||||
// EngineCaller defines a client that can interact with an Ethereum
|
||||
@@ -553,6 +557,22 @@ func (s *Service) GetBlobsV2(ctx context.Context, versionedHashes []common.Hash)
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
func (s *Service) GetBlobsV3(ctx context.Context, versionedHashes []common.Hash) ([]*pb.BlobAndProofV2, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobsV3")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
|
||||
if !s.capabilityCache.has(GetBlobsV3) {
|
||||
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV3))
|
||||
}
|
||||
|
||||
getBlobsV3RequestsTotal.Inc()
|
||||
result := make([]*pb.BlobAndProofV2, len(versionedHashes))
|
||||
err := s.rpcClient.CallContext(ctx, &result, GetBlobsV3, versionedHashes)
|
||||
getBlobsV3Latency.Observe(time.Since(start).Seconds())
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
// ReconstructFullBlock takes in a blinded beacon block and reconstructs
|
||||
// a beacon block with a full execution payload via the engine API.
|
||||
func (s *Service) ReconstructFullBlock(
|
||||
@@ -663,40 +683,47 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
||||
return verifiedBlobs, nil
|
||||
}
|
||||
|
||||
func (s *Service) ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error) {
|
||||
func (s *Service) ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, []blocks.PartialDataColumn, error) {
|
||||
root := populator.Root()
|
||||
|
||||
// Fetch cells and proofs from the execution client using the KZG commitments from the sidecar.
|
||||
commitments, err := populator.Commitments()
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, root, "commitments")
|
||||
return nil, nil, wrapWithBlockRoot(err, root, "commitments")
|
||||
}
|
||||
|
||||
cellsPerBlob, proofsPerBlob, err := s.fetchCellsAndProofsFromExecution(ctx, commitments)
|
||||
included, cellsPerBlob, proofsPerBlob, err := s.fetchCellsAndProofsFromExecution(ctx, commitments)
|
||||
log.Info("Received cells and proofs from execution client", "included", included, "cells count", len(cellsPerBlob), "err", err)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, root, "fetch cells and proofs from execution client")
|
||||
return nil, nil, wrapWithBlockRoot(err, root, "fetch cells and proofs from execution client")
|
||||
}
|
||||
|
||||
// Return early if nothing is returned from the EL.
|
||||
if len(cellsPerBlob) == 0 {
|
||||
return nil, nil
|
||||
partialColumns, err := peerdas.PartialColumns(included, cellsPerBlob, proofsPerBlob, populator)
|
||||
haveAllBlobs := included.Count() == uint64(len(commitments))
|
||||
log.Info("Constructed partial columns", "haveAllBlobs", haveAllBlobs)
|
||||
|
||||
if haveAllBlobs {
|
||||
// Construct data column sidears from the signed block and cells and proofs.
|
||||
roSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, populator)
|
||||
if err != nil {
|
||||
return nil, nil, wrapWithBlockRoot(err, populator.Root(), "data column sidcars from column sidecar")
|
||||
}
|
||||
|
||||
// Upgrade the sidecars to verified sidecars.
|
||||
// We trust the execution layer we are connected to, so we can upgrade the sidecar into a verified one.
|
||||
verifiedROSidecars := upgradeSidecarsToVerifiedSidecars(roSidecars)
|
||||
|
||||
return verifiedROSidecars, partialColumns, nil
|
||||
}
|
||||
|
||||
// Construct data column sidears from the signed block and cells and proofs.
|
||||
roSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, populator)
|
||||
if err != nil {
|
||||
return nil, wrapWithBlockRoot(err, populator.Root(), "data column sidcars from column sidecar")
|
||||
return nil, nil, wrapWithBlockRoot(err, populator.Root(), "partial columns from column sidecar")
|
||||
}
|
||||
|
||||
// Upgrade the sidecars to verified sidecars.
|
||||
// We trust the execution layer we are connected to, so we can upgrade the sidecar into a verified one.
|
||||
verifiedROSidecars := upgradeSidecarsToVerifiedSidecars(roSidecars)
|
||||
|
||||
return verifiedROSidecars, nil
|
||||
return nil, partialColumns, nil
|
||||
}
|
||||
|
||||
// fetchCellsAndProofsFromExecution fetches cells and proofs from the execution client (using engine_getBlobsV2 execution API method)
|
||||
func (s *Service) fetchCellsAndProofsFromExecution(ctx context.Context, kzgCommitments [][]byte) ([][]kzg.Cell, [][]kzg.Proof, error) {
|
||||
func (s *Service) fetchCellsAndProofsFromExecution(ctx context.Context, kzgCommitments [][]byte) (bitfield.Bitlist /* included parts */, [][]kzg.Cell, [][]kzg.Proof, error) {
|
||||
// Collect KZG hashes for all blobs.
|
||||
versionedHashes := make([]common.Hash, 0, len(kzgCommitments))
|
||||
for _, commitment := range kzgCommitments {
|
||||
@@ -704,24 +731,34 @@ func (s *Service) fetchCellsAndProofsFromExecution(ctx context.Context, kzgCommi
|
||||
versionedHashes = append(versionedHashes, versionedHash)
|
||||
}
|
||||
|
||||
var blobAndProofs []*pb.BlobAndProofV2
|
||||
|
||||
// Fetch all blobsAndCellsProofs from the execution client.
|
||||
blobAndProofV2s, err := s.GetBlobsV2(ctx, versionedHashes)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "get blobs V2")
|
||||
var err error
|
||||
useV3 := s.capabilityCache.has(GetBlobsV3)
|
||||
if useV3 {
|
||||
// v3 can return a partial response. V2 is all or nothing
|
||||
blobAndProofs, err = s.GetBlobsV3(ctx, versionedHashes)
|
||||
} else {
|
||||
blobAndProofs, err = s.GetBlobsV2(ctx, versionedHashes)
|
||||
}
|
||||
|
||||
// Return early if nothing is returned from the EL.
|
||||
if len(blobAndProofV2s) == 0 {
|
||||
return nil, nil, nil
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrapf(err, "get blobs V2/3")
|
||||
}
|
||||
|
||||
// Compute cells and proofs from the blobs and cell proofs.
|
||||
cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(blobAndProofV2s)
|
||||
included, cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(uint64(len(kzgCommitments)), blobAndProofs)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "compute cells and proofs")
|
||||
return nil, nil, nil, errors.Wrap(err, "compute cells and proofs")
|
||||
}
|
||||
if included.Count() == uint64(len(kzgCommitments)) {
|
||||
getBlobsV3CompleteResponsesTotal.Inc()
|
||||
} else if included.Count() > 0 {
|
||||
getBlobsV3PartialResponsesTotal.Inc()
|
||||
}
|
||||
|
||||
return cellsPerBlob, proofsPerBlob, nil
|
||||
return included, cellsPerBlob, proofsPerBlob, nil
|
||||
}
|
||||
|
||||
// upgradeSidecarsToVerifiedSidecars upgrades a list of data column sidecars into verified data column sidecars.
|
||||
|
||||
@@ -2587,7 +2587,7 @@ func TestConstructDataColumnSidecars(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("GetBlobsV2 is not supported", func(t *testing.T) {
|
||||
_, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||
_, _, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||
require.ErrorContains(t, "engine_getBlobsV2 is not supported", err)
|
||||
})
|
||||
|
||||
@@ -2598,7 +2598,7 @@ func TestConstructDataColumnSidecars(t *testing.T) {
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||
dataColumns, _, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(dataColumns))
|
||||
})
|
||||
@@ -2611,7 +2611,7 @@ func TestConstructDataColumnSidecars(t *testing.T) {
|
||||
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||
dataColumns, _, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 128, len(dataColumns))
|
||||
})
|
||||
|
||||
@@ -34,6 +34,25 @@ var (
|
||||
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
||||
},
|
||||
)
|
||||
getBlobsV3RequestsTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "beacon_engine_getBlobsV3_requests_total",
|
||||
Help: "Total number of engine_getBlobsV3 requests sent",
|
||||
})
|
||||
getBlobsV3CompleteResponsesTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "beacon_engine_getBlobsV3_complete_responses_total",
|
||||
Help: "Total number of complete engine_getBlobsV3 successful responses received",
|
||||
})
|
||||
getBlobsV3PartialResponsesTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "beacon_engine_getBlobsV3_partial_responses_total",
|
||||
Help: "Total number of engine_getBlobsV3 partial responses received",
|
||||
})
|
||||
getBlobsV3Latency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_engine_getBlobsV3_request_duration_seconds",
|
||||
Help: "Duration of engine_getBlobsV3 requests in seconds",
|
||||
Buckets: []float64{0.025, 0.05, 0.1, 0.2, 0.5, 1, 2, 4},
|
||||
},
|
||||
)
|
||||
errParseCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_parse_error_count",
|
||||
Help: "The number of errors that occurred while parsing execution payload",
|
||||
|
||||
@@ -118,8 +118,8 @@ func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadO
|
||||
}
|
||||
|
||||
// ConstructDataColumnSidecars is a mock implementation of the ConstructDataColumnSidecars method.
|
||||
func (e *EngineClient) ConstructDataColumnSidecars(context.Context, peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error) {
|
||||
return e.DataColumnSidecars, e.ErrorDataColumnSidecars
|
||||
func (e *EngineClient) ConstructDataColumnSidecars(context.Context, peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, []blocks.PartialDataColumn, error) {
|
||||
return e.DataColumnSidecars, nil, e.ErrorDataColumnSidecars
|
||||
}
|
||||
|
||||
// GetTerminalBlockHash --
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
"doc.go",
|
||||
"errors.go",
|
||||
"forkchoice.go",
|
||||
"last_root.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"node.go",
|
||||
@@ -50,6 +51,7 @@ go_test(
|
||||
srcs = [
|
||||
"ffg_update_test.go",
|
||||
"forkchoice_test.go",
|
||||
"last_root_test.go",
|
||||
"no_vote_test.go",
|
||||
"node_test.go",
|
||||
"on_tick_test.go",
|
||||
|
||||
@@ -32,6 +32,7 @@ func New() *ForkChoice {
|
||||
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
proposerBoostRoot: [32]byte{},
|
||||
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
slashedIndices: make(map[primitives.ValidatorIndex]bool),
|
||||
receivedBlocksLastEpoch: [fieldparams.SlotsPerEpoch]primitives.Slot{},
|
||||
}
|
||||
|
||||
26
beacon-chain/forkchoice/doubly-linked-tree/last_root.go
Normal file
26
beacon-chain/forkchoice/doubly-linked-tree/last_root.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package doublylinkedtree
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
)
|
||||
|
||||
// LastRoot returns the last canonical block root in the given epoch
|
||||
func (f *ForkChoice) LastRoot(epoch primitives.Epoch) [32]byte {
|
||||
head := f.store.headNode
|
||||
headEpoch := slots.ToEpoch(head.slot)
|
||||
epochEnd, err := slots.EpochEnd(epoch)
|
||||
if err != nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
if headEpoch <= epoch {
|
||||
return head.root
|
||||
}
|
||||
for head != nil && head.slot > epochEnd {
|
||||
head = head.parent
|
||||
}
|
||||
if head == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
return head.root
|
||||
}
|
||||
38
beacon-chain/forkchoice/doubly-linked-tree/last_root_test.go
Normal file
38
beacon-chain/forkchoice/doubly-linked-tree/last_root_test.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package doublylinkedtree
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestLastRoot(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := t.Context()
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, [32]byte{'1'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'2'}, [32]byte{'1'}, [32]byte{'2'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
st, root, err = prepareForkchoiceState(ctx, 3, [32]byte{'3'}, [32]byte{'1'}, [32]byte{'3'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
st, root, err = prepareForkchoiceState(ctx, 32, [32]byte{'4'}, [32]byte{'3'}, [32]byte{'4'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
st, root, err = prepareForkchoiceState(ctx, 33, [32]byte{'5'}, [32]byte{'2'}, [32]byte{'5'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
st, root, err = prepareForkchoiceState(ctx, 34, [32]byte{'6'}, [32]byte{'5'}, [32]byte{'6'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
headNode := f.store.nodeByRoot[[32]byte{'6'}]
|
||||
f.store.headNode = headNode
|
||||
require.Equal(t, [32]byte{'6'}, f.store.headNode.root)
|
||||
require.Equal(t, [32]byte{'2'}, f.LastRoot(0))
|
||||
require.Equal(t, [32]byte{'6'}, f.LastRoot(1))
|
||||
require.Equal(t, [32]byte{'6'}, f.LastRoot(2))
|
||||
}
|
||||
@@ -94,5 +94,6 @@ func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node, invalidRo
|
||||
s.previousProposerBoostScore = 0
|
||||
}
|
||||
delete(s.nodeByRoot, node.root)
|
||||
delete(s.nodeByPayload, node.payloadHash)
|
||||
return invalidRoots, nil
|
||||
}
|
||||
|
||||
@@ -113,6 +113,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
}
|
||||
}
|
||||
|
||||
s.nodeByPayload[payloadHash] = n
|
||||
s.nodeByRoot[root] = n
|
||||
if parent == nil {
|
||||
if s.treeRootNode == nil {
|
||||
@@ -121,6 +122,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
s.highestReceivedNode = n
|
||||
} else {
|
||||
delete(s.nodeByRoot, root)
|
||||
delete(s.nodeByPayload, payloadHash)
|
||||
return nil, errInvalidParentRoot
|
||||
}
|
||||
} else {
|
||||
@@ -189,6 +191,7 @@ func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalized
|
||||
|
||||
node.children = nil
|
||||
delete(s.nodeByRoot, node.root)
|
||||
delete(s.nodeByPayload, node.payloadHash)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -270,6 +273,21 @@ func (f *ForkChoice) HighestReceivedBlockSlot() primitives.Slot {
|
||||
return f.store.highestReceivedNode.slot
|
||||
}
|
||||
|
||||
// HighestReceivedBlockDelay returns the number of slots that the highest
|
||||
// received block was late when receiving it. For example, a block was late by 12 slots,
|
||||
// then this method is expected to return 12.
|
||||
func (f *ForkChoice) HighestReceivedBlockDelay() primitives.Slot {
|
||||
n := f.store.highestReceivedNode
|
||||
if n == nil {
|
||||
return 0
|
||||
}
|
||||
sss, err := slots.SinceSlotStart(n.slot, f.store.genesisTime, n.timestamp)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return primitives.Slot(uint64(sss/time.Second) / params.BeaconConfig().SecondsPerSlot)
|
||||
}
|
||||
|
||||
// ReceivedBlocksLastEpoch returns the number of blocks received in the last epoch
|
||||
func (f *ForkChoice) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
count := uint64(0)
|
||||
|
||||
@@ -128,9 +128,10 @@ func TestStore_Insert(t *testing.T) {
|
||||
// The new node does not have a parent.
|
||||
treeRootNode := &Node{slot: 0, root: indexToHash(0)}
|
||||
nodeByRoot := map[[32]byte]*Node{indexToHash(0): treeRootNode}
|
||||
nodeByPayload := map[[32]byte]*Node{indexToHash(0): treeRootNode}
|
||||
jc := &forkchoicetypes.Checkpoint{Epoch: 0}
|
||||
fc := &forkchoicetypes.Checkpoint{Epoch: 0}
|
||||
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
|
||||
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, nodeByPayload: nodeByPayload, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
|
||||
payloadHash := [32]byte{'a'}
|
||||
ctx := t.Context()
|
||||
_, blk, err := prepareForkchoiceState(ctx, 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1)
|
||||
@@ -237,6 +238,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
s.finalizedCheckpoint.Root = indexToHash(1)
|
||||
require.NoError(t, s.prune(t.Context()))
|
||||
require.Equal(t, len(s.nodeByRoot), 1)
|
||||
require.Equal(t, len(s.nodeByPayload), 1)
|
||||
}
|
||||
|
||||
// This test starts with the following branching diagram
|
||||
@@ -317,6 +319,8 @@ func TestStore_PruneMapsNodes(t *testing.T) {
|
||||
s.finalizedCheckpoint.Root = indexToHash(1)
|
||||
require.NoError(t, s.prune(t.Context()))
|
||||
require.Equal(t, len(s.nodeByRoot), 1)
|
||||
require.Equal(t, len(s.nodeByPayload), 1)
|
||||
|
||||
}
|
||||
|
||||
func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
@@ -335,6 +339,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), count)
|
||||
require.Equal(t, primitives.Slot(1), f.HighestReceivedBlockSlot())
|
||||
require.Equal(t, primitives.Slot(0), f.HighestReceivedBlockDelay())
|
||||
|
||||
// 64
|
||||
// Received block last epoch is 1
|
||||
@@ -347,6 +352,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), count)
|
||||
require.Equal(t, primitives.Slot(64), f.HighestReceivedBlockSlot())
|
||||
require.Equal(t, primitives.Slot(0), f.HighestReceivedBlockDelay())
|
||||
|
||||
// 64 65
|
||||
// Received block last epoch is 2
|
||||
@@ -359,6 +365,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(2), count)
|
||||
require.Equal(t, primitives.Slot(65), f.HighestReceivedBlockSlot())
|
||||
require.Equal(t, primitives.Slot(1), f.HighestReceivedBlockDelay())
|
||||
|
||||
// 64 65 66
|
||||
// Received block last epoch is 3
|
||||
@@ -710,3 +717,17 @@ func TestStore_CleanupInserting(t *testing.T) {
|
||||
require.NotNil(t, f.InsertNode(ctx, st, blk))
|
||||
require.Equal(t, false, f.HasNode(blk.Root()))
|
||||
}
|
||||
|
||||
func TestStore_HighestReceivedBlockDelay(t *testing.T) {
|
||||
f := ForkChoice{
|
||||
store: &Store{
|
||||
genesisTime: time.Unix(0, 0),
|
||||
highestReceivedNode: &Node{
|
||||
slot: 10,
|
||||
timestamp: time.Unix(int64(((10 + 12) * params.BeaconConfig().SecondsPerSlot)), 0), // 12 slots late
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
require.Equal(t, primitives.Slot(12), f.HighestReceivedBlockDelay())
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@ type Store struct {
|
||||
treeRootNode *Node // the root node of the store tree.
|
||||
headNode *Node // last head Node
|
||||
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
|
||||
nodeByPayload map[[fieldparams.RootLength]byte]*Node // nodes indexed by payload Hash
|
||||
slashedIndices map[primitives.ValidatorIndex]bool // the list of equivocating validator indices
|
||||
originRoot [fieldparams.RootLength]byte // The genesis block root
|
||||
genesisTime time.Time
|
||||
|
||||
@@ -67,11 +67,13 @@ type FastGetter interface {
|
||||
HasNode([32]byte) bool
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
HighestReceivedBlockRoot() [32]byte
|
||||
HighestReceivedBlockDelay() primitives.Slot
|
||||
IsCanonical(root [32]byte) bool
|
||||
IsOptimistic(root [32]byte) (bool, error)
|
||||
IsViableForCheckpoint(*forkchoicetypes.Checkpoint) (bool, error)
|
||||
JustifiedCheckpoint() *forkchoicetypes.Checkpoint
|
||||
JustifiedPayloadBlockHash() [32]byte
|
||||
LastRoot(primitives.Epoch) [32]byte
|
||||
NodeCount() int
|
||||
PreviousJustifiedCheckpoint() *forkchoicetypes.Checkpoint
|
||||
ProposerBoost() [fieldparams.RootLength]byte
|
||||
|
||||
@@ -121,6 +121,13 @@ func (ro *ROForkChoice) HighestReceivedBlockRoot() [32]byte {
|
||||
return ro.getter.HighestReceivedBlockRoot()
|
||||
}
|
||||
|
||||
// HighestReceivedBlockDelay delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) HighestReceivedBlockDelay() primitives.Slot {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.HighestReceivedBlockDelay()
|
||||
}
|
||||
|
||||
// ReceivedBlocksLastEpoch delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
ro.l.RLock()
|
||||
@@ -156,6 +163,13 @@ func (ro *ROForkChoice) Slot(root [32]byte) (primitives.Slot, error) {
|
||||
return ro.getter.Slot(root)
|
||||
}
|
||||
|
||||
// LastRoot delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) LastRoot(e primitives.Epoch) [32]byte {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.LastRoot(e)
|
||||
}
|
||||
|
||||
// DependentRoot delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) DependentRoot(epoch primitives.Epoch) ([32]byte, error) {
|
||||
ro.l.RLock()
|
||||
|
||||
@@ -30,6 +30,7 @@ const (
|
||||
nodeCountCalled
|
||||
highestReceivedBlockSlotCalled
|
||||
highestReceivedBlockRootCalled
|
||||
highestReceivedBlockDelayCalled
|
||||
receivedBlocksLastEpochCalled
|
||||
weightCalled
|
||||
isOptimisticCalled
|
||||
@@ -117,6 +118,11 @@ func TestROLocking(t *testing.T) {
|
||||
call: highestReceivedBlockSlotCalled,
|
||||
cb: func(g FastGetter) { g.HighestReceivedBlockSlot() },
|
||||
},
|
||||
{
|
||||
name: "highestReceivedBlockDelayCalled",
|
||||
call: highestReceivedBlockDelayCalled,
|
||||
cb: func(g FastGetter) { g.HighestReceivedBlockDelay() },
|
||||
},
|
||||
{
|
||||
name: "receivedBlocksLastEpochCalled",
|
||||
call: receivedBlocksLastEpochCalled,
|
||||
@@ -142,6 +148,11 @@ func TestROLocking(t *testing.T) {
|
||||
call: slotCalled,
|
||||
cb: func(g FastGetter) { _, err := g.Slot([32]byte{}); _discard(t, err) },
|
||||
},
|
||||
{
|
||||
name: "lastRootCalled",
|
||||
call: lastRootCalled,
|
||||
cb: func(g FastGetter) { g.LastRoot(0) },
|
||||
},
|
||||
{
|
||||
name: "targetRootForEpochCalled",
|
||||
call: targetRootForEpochCalled,
|
||||
@@ -254,6 +265,11 @@ func (ro *mockROForkchoice) HighestReceivedBlockRoot() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) HighestReceivedBlockDelay() primitives.Slot {
|
||||
ro.calls = append(ro.calls, highestReceivedBlockDelayCalled)
|
||||
return 0
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
ro.calls = append(ro.calls, receivedBlocksLastEpochCalled)
|
||||
return 0, nil
|
||||
@@ -279,6 +295,11 @@ func (ro *mockROForkchoice) Slot(_ [32]byte) (primitives.Slot, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) LastRoot(_ primitives.Epoch) [32]byte {
|
||||
ro.calls = append(ro.calls, lastRootCalled)
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// DependentRoot impoements FastGetter.
|
||||
func (ro *mockROForkchoice) DependentRoot(_ primitives.Epoch) ([32]byte, error) {
|
||||
ro.calls = append(ro.calls, dependentRootCalled)
|
||||
|
||||
@@ -678,6 +678,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
DB: b.db,
|
||||
StateGen: b.stateGen,
|
||||
ClockWaiter: b.ClockWaiter,
|
||||
PartialDataColumns: b.cliCtx.Bool(flags.PartialDataColumns.Name),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -52,6 +52,7 @@ go_library(
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/partialdatacolumnbroadcaster:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
|
||||
@@ -343,7 +343,7 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
||||
// there is at least one peer in each needed subnet. If not, it will attempt to find one before broadcasting.
|
||||
// This function is non-blocking. It stops trying to broadcast a given sidecar when more than one slot has passed, or the context is
|
||||
// cancelled (whichever comes first).
|
||||
func (s *Service) BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn) error {
|
||||
func (s *Service) BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn, partialColumns []blocks.PartialDataColumn) error {
|
||||
// Increase the number of broadcast attempts.
|
||||
dataColumnSidecarBroadcastAttempts.Add(float64(len(sidecars)))
|
||||
|
||||
@@ -353,16 +353,15 @@ func (s *Service) BroadcastDataColumnSidecars(ctx context.Context, sidecars []bl
|
||||
return errors.Wrap(err, "current fork digest")
|
||||
}
|
||||
|
||||
go s.broadcastDataColumnSidecars(ctx, forkDigest, sidecars)
|
||||
go s.broadcastDataColumnSidecars(ctx, forkDigest, sidecars, partialColumns)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// broadcastDataColumnSidecars broadcasts multiple data column sidecars to the p2p network.
|
||||
// For sidecars with available peers, it uses batch publishing.
|
||||
// For sidecars without peers, it finds peers first and then publishes individually.
|
||||
// Both paths run in parallel. It returns when all broadcasts are complete, or the context is cancelled.
|
||||
func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [fieldparams.VersionLength]byte, sidecars []blocks.VerifiedRODataColumn) {
|
||||
// broadcastDataColumnSidecars broadcasts multiple data column sidecars to the p2p network, after ensuring
|
||||
// there is at least one peer in each needed subnet. If not, it will attempt to find one before broadcasting.
|
||||
// It returns when all broadcasts are complete, or the context is cancelled (whichever comes first).
|
||||
func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [fieldparams.VersionLength]byte, sidecars []blocks.VerifiedRODataColumn, partialColumns []blocks.PartialDataColumn) {
|
||||
type rootAndIndex struct {
|
||||
root [fieldparams.RootLength]byte
|
||||
index uint64
|
||||
@@ -372,8 +371,8 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
||||
logLevel := logrus.GetLevel()
|
||||
slotPerRoot := make(map[[fieldparams.RootLength]byte]primitives.Slot, 1)
|
||||
|
||||
topicFunc := func(sidecar blocks.VerifiedRODataColumn) (topic string, wrappedSubIdx uint64, subnet uint64) {
|
||||
subnet = peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
|
||||
topicFunc := func(dcIndex uint64) (topic string, wrappedSubIdx uint64, subnet uint64) {
|
||||
subnet = peerdas.ComputeSubnetForDataColumnSidecar(dcIndex)
|
||||
topic = dataColumnSubnetToTopic(subnet, forkDigest)
|
||||
wrappedSubIdx = subnet + dataColumnSubnetVal
|
||||
return
|
||||
@@ -386,7 +385,7 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
||||
for _, sidecar := range sidecars {
|
||||
slotPerRoot[sidecar.BlockRoot()] = sidecar.Slot()
|
||||
|
||||
topic, wrappedSubIdx, _ := topicFunc(sidecar)
|
||||
topic, wrappedSubIdx, _ := topicFunc(sidecar.Index)
|
||||
// Check if we have a peer for this subnet (use RLock for read-only check).
|
||||
mu := s.subnetLocker(wrappedSubIdx)
|
||||
mu.RLock()
|
||||
@@ -411,7 +410,7 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
||||
ctx := trace.NewContext(s.ctx, span)
|
||||
defer span.End()
|
||||
|
||||
topic, _, _ := topicFunc(sidecar)
|
||||
topic, _, _ := topicFunc(sidecar.Index)
|
||||
|
||||
if err := s.batchObject(ctx, &messageBatch, sidecar, topic); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -419,6 +418,10 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
||||
return
|
||||
}
|
||||
|
||||
// Increase the number of successful broadcasts.
|
||||
dataColumnSidecarBroadcasts.Inc()
|
||||
|
||||
// Record the timing for log purposes.
|
||||
if logLevel >= logrus.DebugLevel {
|
||||
root := sidecar.BlockRoot()
|
||||
timings.Store(rootAndIndex{root: root, index: sidecar.Index}, time.Now())
|
||||
@@ -433,7 +436,7 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
||||
ctx := trace.NewContext(s.ctx, span)
|
||||
defer span.End()
|
||||
|
||||
topic, wrappedSubIdx, subnet := topicFunc(sidecar)
|
||||
topic, wrappedSubIdx, subnet := topicFunc(sidecar.Index)
|
||||
|
||||
// Find peers for this sidecar's subnet.
|
||||
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, subnet); err != nil {
|
||||
@@ -458,6 +461,32 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
||||
})
|
||||
}
|
||||
|
||||
if s.partialColumnBroadcaster != nil {
|
||||
// Note: There is not batch publish for partial columns.
|
||||
for _, partialColumn := range partialColumns {
|
||||
individualWg.Go(func() {
|
||||
_, span := trace.StartSpan(ctx, "p2p.broadcastPartialDataColumn")
|
||||
ctx := trace.NewContext(s.ctx, span)
|
||||
defer span.End()
|
||||
|
||||
topic, wrappedSubIdx, subnet := topicFunc(partialColumn.Index)
|
||||
|
||||
// Find peers for this sidecar's subnet.
|
||||
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, subnet); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
log.WithError(err).Error("Cannot find peers if needed")
|
||||
return
|
||||
}
|
||||
|
||||
fullTopicStr := topic + s.Encoding().ProtocolSuffix()
|
||||
if err := s.partialColumnBroadcaster.Publish(fullTopicStr, partialColumn); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
log.WithError(err).Error("Cannot partial broadcast data column sidecar")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for batch to be populated, then publish.
|
||||
batchWg.Wait()
|
||||
if len(sidecarsWithPeers) > 0 {
|
||||
|
||||
@@ -803,7 +803,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||
|
||||
// Broadcast to peers and wait.
|
||||
err = service.BroadcastDataColumnSidecars(ctx, []blocks.VerifiedRODataColumn{verifiedRoSidecar})
|
||||
err = service.BroadcastDataColumnSidecars(ctx, []blocks.VerifiedRODataColumn{verifiedRoSidecar}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Receive the message.
|
||||
@@ -867,7 +867,7 @@ func (*rpcOrderTracer) DeliverMessage(*pubsub.Message) {}
|
||||
func (*rpcOrderTracer) RejectMessage(*pubsub.Message, string) {}
|
||||
func (*rpcOrderTracer) DuplicateMessage(*pubsub.Message) {}
|
||||
func (*rpcOrderTracer) ThrottlePeer(peer.ID) {}
|
||||
func (*rpcOrderTracer) RecvRPC(*pubsub.RPC) {}
|
||||
func (*rpcOrderTracer) RecvRPC(*pubsub.RPC, peer.ID) {}
|
||||
func (*rpcOrderTracer) DropRPC(*pubsub.RPC, peer.ID) {}
|
||||
func (*rpcOrderTracer) UndeliverableMessage(*pubsub.Message) {}
|
||||
|
||||
@@ -969,7 +969,7 @@ func TestService_BroadcastDataColumnRoundRobin(t *testing.T) {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Broadcast all sidecars.
|
||||
err = service.BroadcastDataColumnSidecars(ctx, verifiedRoSidecars)
|
||||
err = service.BroadcastDataColumnSidecars(ctx, verifiedRoSidecars, nil)
|
||||
require.NoError(t, err)
|
||||
// Give some time for messages to be sent.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
@@ -26,6 +26,7 @@ const (
|
||||
// Config for the p2p service. These parameters are set from application level flags
|
||||
// to initialize the p2p service.
|
||||
type Config struct {
|
||||
PartialDataColumns bool
|
||||
NoDiscovery bool
|
||||
EnableUPnP bool
|
||||
StaticPeerID bool
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
@@ -28,6 +29,7 @@ type (
|
||||
Broadcaster
|
||||
SetStreamHandler
|
||||
PubSubProvider
|
||||
PartialColumnBroadcasterProvider
|
||||
PubSubTopicUser
|
||||
SenderEncoder
|
||||
PeerManager
|
||||
@@ -52,7 +54,7 @@ type (
|
||||
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
|
||||
BroadcastLightClientOptimisticUpdate(ctx context.Context, update interfaces.LightClientOptimisticUpdate) error
|
||||
BroadcastLightClientFinalityUpdate(ctx context.Context, update interfaces.LightClientFinalityUpdate) error
|
||||
BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn) error
|
||||
BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn, partialColumns []blocks.PartialDataColumn) error
|
||||
}
|
||||
|
||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||
@@ -92,6 +94,11 @@ type (
|
||||
PubSub() *pubsub.PubSub
|
||||
}
|
||||
|
||||
// PubSubProvider provides the p2p pubsub protocol.
|
||||
PartialColumnBroadcasterProvider interface {
|
||||
PartialColumnBroadcaster() *partialdatacolumnbroadcaster.PartialColumnBroadcaster
|
||||
}
|
||||
|
||||
// PeerManager abstracts some peer management methods from libp2p.
|
||||
PeerManager interface {
|
||||
Disconnect(peer.ID) error
|
||||
|
||||
@@ -157,6 +157,11 @@ var (
|
||||
Help: "The number of publish messages received via rpc for a particular topic",
|
||||
},
|
||||
[]string{"topic"})
|
||||
pubsubRPCPubRecvSize = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_rpc_recv_pub_size_total",
|
||||
Help: "The total size of publish messages received via rpc for a particular topic",
|
||||
},
|
||||
[]string{"topic", "is_partial"})
|
||||
pubsubRPCDrop = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_rpc_drop_total",
|
||||
Help: "The number of messages dropped via rpc for a particular control message",
|
||||
@@ -171,6 +176,11 @@ var (
|
||||
Help: "The number of publish messages dropped via rpc for a particular topic",
|
||||
},
|
||||
[]string{"topic"})
|
||||
pubsubRPCPubDropSize = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_rpc_drop_pub_size_total",
|
||||
Help: "The total size of publish messages dropped via rpc for a particular topic",
|
||||
},
|
||||
[]string{"topic", "is_partial"})
|
||||
pubsubRPCSent = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "p2p_pubsub_rpc_sent_total",
|
||||
Help: "The number of messages sent via rpc for a particular control message",
|
||||
@@ -185,6 +195,16 @@ var (
|
||||
Help: "The number of publish messages sent via rpc for a particular topic",
|
||||
},
|
||||
[]string{"topic"})
|
||||
pubsubRPCPubSentSize = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "gossipsub_pubsub_rpc_sent_pub_size_total",
|
||||
Help: "The total size of publish messages sent via rpc for a particular topic",
|
||||
},
|
||||
[]string{"topic", "is_partial"})
|
||||
pubsubMeshPeers = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "gossipsub_mesh_peers",
|
||||
Help: "The number of capable peers in mesh",
|
||||
},
|
||||
[]string{"topic", "supports_partial"})
|
||||
)
|
||||
|
||||
func (s *Service) updateMetrics() {
|
||||
|
||||
27
beacon-chain/p2p/partialdatacolumnbroadcaster/BUILD.bazel
Normal file
27
beacon-chain/p2p/partialdatacolumnbroadcaster/BUILD.bazel
Normal file
@@ -0,0 +1,27 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"partial.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//internal/logrusadapter:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//partialmessages:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//partialmessages/bitmap:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -0,0 +1,25 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_test")
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "medium",
|
||||
srcs = ["two_node_test.go"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/partialdatacolumnbroadcaster:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//x/simlibp2p:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
|
||||
"@com_github_marcopolo_simnet//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -0,0 +1,239 @@
|
||||
package integrationtest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"testing"
|
||||
"testing/synctest"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
simlibp2p "github.com/libp2p/go-libp2p/x/simlibp2p"
|
||||
"github.com/marcopolo/simnet"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// TestTwoNodePartialColumnExchange tests that two nodes can exchange partial columns
|
||||
// and reconstruct the complete column. Node 1 has cells 0-2, Node 2 has cells 3-5.
|
||||
// After exchange, both should have all cells.
|
||||
func TestTwoNodePartialColumnExchange(t *testing.T) {
|
||||
synctest.Test(t, func(t *testing.T) {
|
||||
// Create a simulated libp2p network
|
||||
latency := time.Millisecond * 10
|
||||
network, meta, err := simlibp2p.SimpleLibp2pNetwork([]simlibp2p.NodeLinkSettingsAndCount{
|
||||
{LinkSettings: simnet.NodeBiDiLinkSettings{
|
||||
Downlink: simnet.LinkSettings{BitsPerSecond: 20 * simlibp2p.OneMbps, Latency: latency / 2},
|
||||
Uplink: simnet.LinkSettings{BitsPerSecond: 20 * simlibp2p.OneMbps, Latency: latency / 2},
|
||||
}, Count: 2},
|
||||
}, simlibp2p.NetworkSettings{UseBlankHost: true})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, network.Start())
|
||||
defer func() {
|
||||
require.NoError(t, network.Close())
|
||||
}()
|
||||
defer func() {
|
||||
for _, node := range meta.Nodes {
|
||||
err := node.Close()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
h1 := meta.Nodes[0]
|
||||
h2 := meta.Nodes[1]
|
||||
|
||||
logger := logrus.New()
|
||||
logger.SetLevel(logrus.DebugLevel)
|
||||
broadcaster1 := partialdatacolumnbroadcaster.NewBroadcaster(logger)
|
||||
broadcaster2 := partialdatacolumnbroadcaster.NewBroadcaster(logger)
|
||||
|
||||
opts1 := broadcaster1.AppendPubSubOpts([]pubsub.Option{
|
||||
pubsub.WithMessageSigning(false),
|
||||
pubsub.WithStrictSignatureVerification(false),
|
||||
})
|
||||
opts2 := broadcaster2.AppendPubSubOpts([]pubsub.Option{
|
||||
pubsub.WithMessageSigning(false),
|
||||
pubsub.WithStrictSignatureVerification(false),
|
||||
})
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
ps1, err := pubsub.NewGossipSub(ctx, h1, opts1...)
|
||||
require.NoError(t, err)
|
||||
ps2, err := pubsub.NewGossipSub(ctx, h2, opts2...)
|
||||
require.NoError(t, err)
|
||||
|
||||
go broadcaster1.Start()
|
||||
go broadcaster2.Start()
|
||||
defer func() {
|
||||
broadcaster1.Stop()
|
||||
broadcaster2.Stop()
|
||||
}()
|
||||
|
||||
// Generate Test Data
|
||||
var blockRoot [fieldparams.RootLength]byte
|
||||
copy(blockRoot[:], []byte("test-block-root"))
|
||||
|
||||
numCells := 6
|
||||
commitments := make([][]byte, numCells)
|
||||
cells := make([][]byte, numCells)
|
||||
proofs := make([][]byte, numCells)
|
||||
|
||||
for i := range numCells {
|
||||
commitments[i] = make([]byte, 48)
|
||||
|
||||
cells[i] = make([]byte, 2048)
|
||||
_, err := rand.Read(cells[i])
|
||||
require.NoError(t, err)
|
||||
proofs[i] = make([]byte, 48)
|
||||
_ = fmt.Appendf(proofs[i][:0], "proof %d", i)
|
||||
}
|
||||
|
||||
roDC, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
|
||||
{
|
||||
BodyRoot: blockRoot[:],
|
||||
KzgCommitments: commitments,
|
||||
Column: cells,
|
||||
KzgProofs: proofs,
|
||||
},
|
||||
})
|
||||
|
||||
pc1, err := blocks.NewPartialDataColumn(roDC[0].DataColumnSidecar.SignedBlockHeader, roDC[0].Index, roDC[0].KzgCommitments, roDC[0].KzgCommitmentsInclusionProof)
|
||||
require.NoError(t, err)
|
||||
pc2, err := blocks.NewPartialDataColumn(roDC[0].DataColumnSidecar.SignedBlockHeader, roDC[0].Index, roDC[0].KzgCommitments, roDC[0].KzgCommitmentsInclusionProof)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Split data
|
||||
for i := range numCells {
|
||||
if i%2 == 0 {
|
||||
pc1.ExtendFromVerfifiedCell(uint64(i), roDC[0].Column[i], roDC[0].KzgProofs[i])
|
||||
} else {
|
||||
pc2.ExtendFromVerfifiedCell(uint64(i), roDC[0].Column[i], roDC[0].KzgProofs[i])
|
||||
}
|
||||
}
|
||||
|
||||
// Setup Topic and Subscriptions
|
||||
digest := params.ForkDigest(0)
|
||||
columnIndex := uint64(12)
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(columnIndex)
|
||||
topicStr := fmt.Sprintf(p2p.DataColumnSubnetTopicFormat, digest, subnet) +
|
||||
encoder.SszNetworkEncoder{}.ProtocolSuffix()
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
topic1, err := ps1.Join(topicStr, pubsub.RequestPartialMessages())
|
||||
require.NoError(t, err)
|
||||
topic2, err := ps2.Join(topicStr, pubsub.RequestPartialMessages())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Header validator that verifies the inclusion proof
|
||||
headerValidator := func(header *ethpb.PartialDataColumnHeader) (reject bool, err error) {
|
||||
if header == nil {
|
||||
return false, fmt.Errorf("nil header")
|
||||
}
|
||||
if header.SignedBlockHeader == nil || header.SignedBlockHeader.Header == nil {
|
||||
return true, fmt.Errorf("nil signed block header")
|
||||
}
|
||||
if len(header.KzgCommitments) == 0 {
|
||||
return true, fmt.Errorf("empty kzg commitments")
|
||||
}
|
||||
// Verify inclusion proof
|
||||
if err := peerdas.VerifyPartialDataColumnHeaderInclusionProof(header); err != nil {
|
||||
return true, fmt.Errorf("invalid inclusion proof: %w", err)
|
||||
}
|
||||
t.Log("Header validation passed")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
cellValidator := func(_ []blocks.CellProofBundle) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
node1Complete := make(chan blocks.VerifiedRODataColumn, 1)
|
||||
node2Complete := make(chan blocks.VerifiedRODataColumn, 1)
|
||||
|
||||
handler1 := func(topic string, col blocks.VerifiedRODataColumn) {
|
||||
t.Logf("Node 1: Completed! Column has %d cells", len(col.Column))
|
||||
node1Complete <- col
|
||||
}
|
||||
|
||||
handler2 := func(topic string, col blocks.VerifiedRODataColumn) {
|
||||
t.Logf("Node 2: Completed! Column has %d cells", len(col.Column))
|
||||
node2Complete <- col
|
||||
}
|
||||
|
||||
// Connect hosts
|
||||
err = h1.Connect(context.Background(), peer.AddrInfo{
|
||||
ID: h2.ID(),
|
||||
Addrs: h2.Addrs(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
|
||||
// Subscribe to regular GossipSub (critical for partial message RPC exchange!)
|
||||
sub1, err := topic1.Subscribe()
|
||||
require.NoError(t, err)
|
||||
defer sub1.Cancel()
|
||||
|
||||
sub2, err := topic2.Subscribe()
|
||||
require.NoError(t, err)
|
||||
defer sub2.Cancel()
|
||||
|
||||
err = broadcaster1.Subscribe(topic1, headerValidator, cellValidator, handler1)
|
||||
require.NoError(t, err)
|
||||
err = broadcaster2.Subscribe(topic2, headerValidator, cellValidator, handler2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for mesh to form
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// Publish
|
||||
t.Log("Publishing from Node 1")
|
||||
err = broadcaster1.Publish(topicStr, pc1)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
t.Log("Publishing from Node 2")
|
||||
err = broadcaster2.Publish(topicStr, pc2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for Completion
|
||||
timeout := time.After(10 * time.Second)
|
||||
var col1, col2 blocks.VerifiedRODataColumn
|
||||
receivedCount := 0
|
||||
|
||||
for receivedCount < 2 {
|
||||
select {
|
||||
case col1 = <-node1Complete:
|
||||
t.Log("Node 1 completed reconstruction")
|
||||
receivedCount++
|
||||
case col2 = <-node2Complete:
|
||||
t.Log("Node 2 completed reconstruction")
|
||||
receivedCount++
|
||||
case <-timeout:
|
||||
t.Fatalf("Timeout: Only %d/2 nodes completed", receivedCount)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify both columns have all cells
|
||||
assert.Equal(t, numCells, len(col1.Column), "Node 1 should have all cells")
|
||||
assert.Equal(t, numCells, len(col2.Column), "Node 2 should have all cells")
|
||||
assert.DeepSSZEqual(t, cells, col1.Column, "Node 1 cell mismatch")
|
||||
assert.DeepSSZEqual(t, cells, col2.Column, "Node 2 cell mismatch")
|
||||
})
|
||||
}
|
||||
9
beacon-chain/p2p/partialdatacolumnbroadcaster/log.go
Normal file
9
beacon-chain/p2p/partialdatacolumnbroadcaster/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package partialdatacolumnbroadcaster
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/p2p/partialdatacolumnbroadcaster")
|
||||
18
beacon-chain/p2p/partialdatacolumnbroadcaster/metrics.go
Normal file
18
beacon-chain/p2p/partialdatacolumnbroadcaster/metrics.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package partialdatacolumnbroadcaster
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
partialMessageUsefulCellsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "beacon_partial_message_useful_cells_total",
|
||||
Help: "Number of useful cells received via a partial message",
|
||||
}, []string{"column_index"})
|
||||
|
||||
partialMessageCellsReceivedTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "beacon_partial_message_cells_received_total",
|
||||
Help: "Number of total cells received via a partial message",
|
||||
}, []string{"column_index"})
|
||||
)
|
||||
540
beacon-chain/p2p/partialdatacolumnbroadcaster/partial.go
Normal file
540
beacon-chain/p2p/partialdatacolumnbroadcaster/partial.go
Normal file
@@ -0,0 +1,540 @@
|
||||
package partialdatacolumnbroadcaster
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log/slog"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/internal/logrusadapter"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p-pubsub/partialmessages"
|
||||
"github.com/libp2p/go-libp2p-pubsub/partialmessages/bitmap"
|
||||
pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// TODOs:
|
||||
// different eager push strategies:
|
||||
// - no eager push
|
||||
// - full column eager push
|
||||
// - With debouncing - some factor of RTT
|
||||
// - eager push missing cells
|
||||
|
||||
const TTLInSlots = 3
|
||||
const maxConcurrentValidators = 128
|
||||
|
||||
var dataColumnTopicRegex = regexp.MustCompile(`data_column_sidecar_(\d+)`)
|
||||
|
||||
func extractColumnIndexFromTopic(topic string) (uint64, error) {
|
||||
matches := dataColumnTopicRegex.FindStringSubmatch(topic)
|
||||
if len(matches) < 2 {
|
||||
return 0, errors.New("could not extract column index from topic")
|
||||
}
|
||||
return strconv.ParseUint(matches[1], 10, 64)
|
||||
}
|
||||
|
||||
// HeaderValidator validates a PartialDataColumnHeader.
|
||||
// Returns (reject, err) where:
|
||||
// - reject=true, err!=nil: REJECT - peer should be penalized
|
||||
// - reject=false, err!=nil: IGNORE - don't penalize, just ignore
|
||||
// - reject=false, err=nil: valid header
|
||||
type HeaderValidator func(header *ethpb.PartialDataColumnHeader) (reject bool, err error)
|
||||
type ColumnValidator func(cells []blocks.CellProofBundle) error
|
||||
|
||||
type PartialColumnBroadcaster struct {
|
||||
logger *logrus.Logger
|
||||
|
||||
ps *pubsub.PubSub
|
||||
stop chan struct{}
|
||||
|
||||
// map topic -> headerValidators
|
||||
headerValidators map[string]HeaderValidator
|
||||
// map topic -> Validator
|
||||
validators map[string]ColumnValidator
|
||||
|
||||
// map topic -> handler
|
||||
handlers map[string]SubHandler
|
||||
|
||||
// map topic -> *pubsub.Topic
|
||||
topics map[string]*pubsub.Topic
|
||||
|
||||
concurrentValidatorSemaphore chan struct{}
|
||||
|
||||
// map topic -> map[groupID]PartialColumn
|
||||
partialMsgStore map[string]map[string]*blocks.PartialDataColumn
|
||||
|
||||
groupTTL map[string]int8
|
||||
|
||||
// validHeaderCache caches validated headers by group ID (works across topics)
|
||||
validHeaderCache map[string]*ethpb.PartialDataColumnHeader
|
||||
|
||||
incomingReq chan request
|
||||
}
|
||||
|
||||
type requestKind uint8
|
||||
|
||||
const (
|
||||
requestKindPublish requestKind = iota
|
||||
requestKindSubscribe
|
||||
requestKindUnsubscribe
|
||||
requestKindHandleIncomingRPC
|
||||
requestKindCellsValidated
|
||||
)
|
||||
|
||||
type request struct {
|
||||
kind requestKind
|
||||
response chan error
|
||||
sub subscribe
|
||||
unsub unsubscribe
|
||||
publish publish
|
||||
incomingRPC rpcWithFrom
|
||||
cellsValidated *cellsValidated
|
||||
}
|
||||
|
||||
type publish struct {
|
||||
topic string
|
||||
c blocks.PartialDataColumn
|
||||
}
|
||||
|
||||
type subscribe struct {
|
||||
t *pubsub.Topic
|
||||
headerValidator HeaderValidator
|
||||
validator ColumnValidator
|
||||
handler SubHandler
|
||||
}
|
||||
|
||||
type unsubscribe struct {
|
||||
topic string
|
||||
}
|
||||
|
||||
type rpcWithFrom struct {
|
||||
*pubsub_pb.PartialMessagesExtension
|
||||
from peer.ID
|
||||
}
|
||||
|
||||
type cellsValidated struct {
|
||||
validationTook time.Duration
|
||||
topic string
|
||||
group []byte
|
||||
cellIndices []uint64
|
||||
cells []blocks.CellProofBundle
|
||||
}
|
||||
|
||||
func NewBroadcaster(logger *logrus.Logger) *PartialColumnBroadcaster {
|
||||
return &PartialColumnBroadcaster{
|
||||
validators: make(map[string]ColumnValidator),
|
||||
headerValidators: make(map[string]HeaderValidator),
|
||||
handlers: make(map[string]SubHandler),
|
||||
topics: make(map[string]*pubsub.Topic),
|
||||
partialMsgStore: make(map[string]map[string]*blocks.PartialDataColumn),
|
||||
groupTTL: make(map[string]int8),
|
||||
validHeaderCache: make(map[string]*ethpb.PartialDataColumnHeader),
|
||||
// GossipSub sends the messages to this channel. The buffer should be
|
||||
// big enough to avoid dropping messages. We don't want to block the gossipsub event loop for this.
|
||||
incomingReq: make(chan request, 128*16),
|
||||
logger: logger,
|
||||
|
||||
concurrentValidatorSemaphore: make(chan struct{}, maxConcurrentValidators),
|
||||
}
|
||||
}
|
||||
|
||||
// AppendPubSubOpts adds the necessary pubsub options to enable partial messages.
|
||||
func (p *PartialColumnBroadcaster) AppendPubSubOpts(opts []pubsub.Option) []pubsub.Option {
|
||||
slogger := slog.New(logrusadapter.Handler{Logger: p.logger})
|
||||
opts = append(opts,
|
||||
pubsub.WithPartialMessagesExtension(&partialmessages.PartialMessagesExtension{
|
||||
Logger: slogger,
|
||||
MergePartsMetadata: func(topic string, left, right partialmessages.PartsMetadata) partialmessages.PartsMetadata {
|
||||
merged, err := blocks.MergePartsMetadata(left, right)
|
||||
if err != nil {
|
||||
p.logger.Warn("Failed to merge bitfields", "err", err, "left", left, "right", right)
|
||||
return left
|
||||
}
|
||||
return merged
|
||||
},
|
||||
ValidateRPC: func(from peer.ID, rpc *pubsub_pb.PartialMessagesExtension) error {
|
||||
// TODO. Add some basic and fast sanity checks
|
||||
return nil
|
||||
},
|
||||
OnIncomingRPC: func(from peer.ID, rpc *pubsub_pb.PartialMessagesExtension) error {
|
||||
select {
|
||||
case p.incomingReq <- request{
|
||||
kind: requestKindHandleIncomingRPC,
|
||||
incomingRPC: rpcWithFrom{rpc, from},
|
||||
}:
|
||||
default:
|
||||
p.logger.Warn("Dropping incoming partial RPC", "rpc", rpc)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}),
|
||||
func(ps *pubsub.PubSub) error {
|
||||
p.ps = ps
|
||||
return nil
|
||||
},
|
||||
)
|
||||
return opts
|
||||
}
|
||||
|
||||
// Start starts the event loop of the PartialColumnBroadcaster. Should be called
|
||||
// within a goroutine (go p.Start())
|
||||
func (p *PartialColumnBroadcaster) Start() {
|
||||
if p.stop != nil {
|
||||
return
|
||||
}
|
||||
p.stop = make(chan struct{})
|
||||
p.loop()
|
||||
}
|
||||
|
||||
func (p *PartialColumnBroadcaster) loop() {
|
||||
cleanup := time.NewTicker(time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot))
|
||||
defer cleanup.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-p.stop:
|
||||
return
|
||||
case <-cleanup.C:
|
||||
for groupID, ttl := range p.groupTTL {
|
||||
if ttl > 0 {
|
||||
p.groupTTL[groupID] = ttl - 1
|
||||
continue
|
||||
}
|
||||
|
||||
delete(p.groupTTL, groupID)
|
||||
delete(p.validHeaderCache, groupID)
|
||||
for topic, msgStore := range p.partialMsgStore {
|
||||
delete(msgStore, groupID)
|
||||
if len(msgStore) == 0 {
|
||||
delete(p.partialMsgStore, topic)
|
||||
}
|
||||
}
|
||||
}
|
||||
case req := <-p.incomingReq:
|
||||
switch req.kind {
|
||||
case requestKindPublish:
|
||||
req.response <- p.publish(req.publish.topic, req.publish.c)
|
||||
case requestKindSubscribe:
|
||||
req.response <- p.subscribe(req.sub.t, req.sub.headerValidator, req.sub.validator, req.sub.handler)
|
||||
case requestKindUnsubscribe:
|
||||
req.response <- p.unsubscribe(req.unsub.topic)
|
||||
case requestKindHandleIncomingRPC:
|
||||
err := p.handleIncomingRPC(req.incomingRPC)
|
||||
if err != nil {
|
||||
p.logger.Error("Failed to handle incoming partial RPC", "err", err)
|
||||
}
|
||||
case requestKindCellsValidated:
|
||||
err := p.handleCellsValidated(req.cellsValidated)
|
||||
if err != nil {
|
||||
p.logger.Error("Failed to handle cells validated", "err", err)
|
||||
}
|
||||
default:
|
||||
p.logger.Error("Unknown request kind", "kind", req.kind)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PartialColumnBroadcaster) getDataColumn(topic string, group []byte) *blocks.PartialDataColumn {
|
||||
topicStore, ok := p.partialMsgStore[topic]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
msg, ok := topicStore[string(group)]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func (p *PartialColumnBroadcaster) handleIncomingRPC(rpcWithFrom rpcWithFrom) error {
|
||||
if p.ps == nil {
|
||||
return errors.New("pubsub not initialized")
|
||||
}
|
||||
|
||||
hasMessage := len(rpcWithFrom.PartialMessage) > 0
|
||||
|
||||
var message ethpb.PartialDataColumnSidecar
|
||||
if hasMessage {
|
||||
err := message.UnmarshalSSZ(rpcWithFrom.PartialMessage)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to unmarshal partial message data")
|
||||
}
|
||||
}
|
||||
|
||||
topicID := rpcWithFrom.GetTopicID()
|
||||
groupID := rpcWithFrom.GroupID
|
||||
ourDataColumn := p.getDataColumn(topicID, groupID)
|
||||
var shouldRepublish bool
|
||||
|
||||
if ourDataColumn == nil && hasMessage {
|
||||
var header *ethpb.PartialDataColumnHeader
|
||||
// Check cache first for this group
|
||||
if cachedHeader, ok := p.validHeaderCache[string(groupID)]; ok {
|
||||
header = cachedHeader
|
||||
} else {
|
||||
// We haven't seen this group before. Check if we have a valid header.
|
||||
if len(message.Header) == 0 {
|
||||
p.logger.Debug("No partial column found and no header in message, ignoring")
|
||||
return nil
|
||||
}
|
||||
|
||||
header = message.Header[0]
|
||||
headerValidator, ok := p.headerValidators[topicID]
|
||||
if !ok || headerValidator == nil {
|
||||
p.logger.Debug("No header validator registered for topic")
|
||||
return nil
|
||||
}
|
||||
|
||||
reject, err := headerValidator(header)
|
||||
if err != nil {
|
||||
p.logger.Debug("Header validation failed", "err", err, "reject", reject)
|
||||
if reject {
|
||||
// REJECT case: penalize the peer
|
||||
_ = p.ps.PeerFeedback(topicID, rpcWithFrom.from, pubsub.PeerFeedbackInvalidMessage)
|
||||
}
|
||||
// Both REJECT and IGNORE: don't process further
|
||||
return nil
|
||||
}
|
||||
// Cache the valid header
|
||||
p.validHeaderCache[string(groupID)] = header
|
||||
|
||||
// TODO: We now have the information we need to call GetBlobsV3, we should do that to see what we have locally.
|
||||
}
|
||||
|
||||
columnIndex, err := extractColumnIndexFromTopic(topicID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newColumn, err := blocks.NewPartialDataColumn(
|
||||
header.SignedBlockHeader,
|
||||
columnIndex,
|
||||
header.KzgCommitments,
|
||||
header.KzgCommitmentsInclusionProof,
|
||||
)
|
||||
if err != nil {
|
||||
p.logger.WithError(err).WithFields(logrus.Fields{
|
||||
"topic": topicID,
|
||||
"columnIndex": columnIndex,
|
||||
"numCommitments": len(header.KzgCommitments),
|
||||
}).Error("Failed to create partial data column from header")
|
||||
return err
|
||||
}
|
||||
|
||||
// Save to store
|
||||
topicStore, ok := p.partialMsgStore[topicID]
|
||||
if !ok {
|
||||
topicStore = make(map[string]*blocks.PartialDataColumn)
|
||||
p.partialMsgStore[topicID] = topicStore
|
||||
}
|
||||
topicStore[string(newColumn.GroupID())] = &newColumn
|
||||
p.groupTTL[string(newColumn.GroupID())] = TTLInSlots
|
||||
|
||||
ourDataColumn = &newColumn
|
||||
shouldRepublish = true
|
||||
}
|
||||
|
||||
if ourDataColumn == nil {
|
||||
// We don't have a partial column for this. Can happen if we got cells
|
||||
// without a header.
|
||||
return nil
|
||||
}
|
||||
|
||||
logger := p.logger.WithFields(logrus.Fields{
|
||||
"from": rpcWithFrom.from,
|
||||
"topic": topicID,
|
||||
"group": groupID,
|
||||
})
|
||||
|
||||
validator, validatorOK := p.validators[topicID]
|
||||
if len(rpcWithFrom.PartialMessage) > 0 && validatorOK {
|
||||
// TODO: is there any penalty we want to consider for giving us data we didn't request?
|
||||
// Note that we need to be careful around race conditions and eager data.
|
||||
// Also note that protobufs by design allow extra data that we don't parse.
|
||||
// Marco's thoughts. No, we don't need to do anything else here.
|
||||
cellIndices, cellsToVerify, err := ourDataColumn.CellsToVerifyFromPartialMessage(&message)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Track cells received via partial message
|
||||
if len(cellIndices) > 0 {
|
||||
columnIndexStr := strconv.FormatUint(ourDataColumn.Index, 10)
|
||||
partialMessageCellsReceivedTotal.WithLabelValues(columnIndexStr).Add(float64(len(cellIndices)))
|
||||
}
|
||||
if len(cellsToVerify) > 0 {
|
||||
p.concurrentValidatorSemaphore <- struct{}{}
|
||||
go func() {
|
||||
defer func() {
|
||||
<-p.concurrentValidatorSemaphore
|
||||
}()
|
||||
start := time.Now()
|
||||
err := validator(cellsToVerify)
|
||||
if err != nil {
|
||||
logger.Error("failed to validate cells", "err", err)
|
||||
_ = p.ps.PeerFeedback(topicID, rpcWithFrom.from, pubsub.PeerFeedbackInvalidMessage)
|
||||
return
|
||||
}
|
||||
_ = p.ps.PeerFeedback(topicID, rpcWithFrom.from, pubsub.PeerFeedbackUsefulMessage)
|
||||
p.incomingReq <- request{
|
||||
kind: requestKindCellsValidated,
|
||||
cellsValidated: &cellsValidated{
|
||||
validationTook: time.Since(start),
|
||||
topic: topicID,
|
||||
group: groupID,
|
||||
cells: cellsToVerify,
|
||||
cellIndices: cellIndices,
|
||||
},
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
peerHas := bitmap.Bitmap(rpcWithFrom.PartsMetadata)
|
||||
iHave := bitmap.Bitmap(ourDataColumn.PartsMetadata())
|
||||
if !shouldRepublish && len(peerHas) > 0 && !bytes.Equal(peerHas, iHave) {
|
||||
// Either we have something they don't or vice versa
|
||||
shouldRepublish = true
|
||||
logger.Debug("republishing due to parts metadata difference")
|
||||
}
|
||||
|
||||
if shouldRepublish {
|
||||
err := p.ps.PublishPartialMessage(topicID, ourDataColumn, partialmessages.PublishOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PartialColumnBroadcaster) handleCellsValidated(cells *cellsValidated) error {
|
||||
ourDataColumn := p.getDataColumn(cells.topic, cells.group)
|
||||
if ourDataColumn == nil {
|
||||
return errors.New("data column not found for verified cells")
|
||||
}
|
||||
extended := ourDataColumn.ExtendFromVerfifiedCells(cells.cellIndices, cells.cells)
|
||||
p.logger.Debug("Extended partial message", "duration", cells.validationTook, "extended", extended)
|
||||
|
||||
columnIndexStr := strconv.FormatUint(ourDataColumn.Index, 10)
|
||||
if extended {
|
||||
// Track useful cells (cells that extended our data)
|
||||
partialMessageUsefulCellsTotal.WithLabelValues(columnIndexStr).Add(float64(len(cells.cells)))
|
||||
|
||||
// TODO: we could use the heuristic here that if this data was
|
||||
// useful to us, it's likely useful to our peers and we should
|
||||
// republish eagerly
|
||||
|
||||
if col, ok := ourDataColumn.Complete(p.logger); ok {
|
||||
p.logger.Info("Completed partial column", "topic", cells.topic, "group", cells.group)
|
||||
handler, handlerOK := p.handlers[cells.topic]
|
||||
|
||||
if handlerOK {
|
||||
go handler(cells.topic, col)
|
||||
}
|
||||
} else {
|
||||
p.logger.Info("Extended partial column", "topic", cells.topic, "group", cells.group)
|
||||
}
|
||||
|
||||
err := p.ps.PublishPartialMessage(cells.topic, ourDataColumn, partialmessages.PublishOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PartialColumnBroadcaster) Stop() {
|
||||
if p.stop != nil {
|
||||
close(p.stop)
|
||||
p.stop = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Publish publishes the partial column.
|
||||
func (p *PartialColumnBroadcaster) Publish(topic string, c blocks.PartialDataColumn) error {
|
||||
if p.ps == nil {
|
||||
return errors.New("pubsub not initialized")
|
||||
}
|
||||
respCh := make(chan error)
|
||||
p.incomingReq <- request{
|
||||
kind: requestKindPublish,
|
||||
response: respCh,
|
||||
publish: publish{
|
||||
topic: topic,
|
||||
c: c,
|
||||
},
|
||||
}
|
||||
return <-respCh
|
||||
}
|
||||
|
||||
func (p *PartialColumnBroadcaster) publish(topic string, c blocks.PartialDataColumn) error {
|
||||
topicStore, ok := p.partialMsgStore[topic]
|
||||
if !ok {
|
||||
topicStore = make(map[string]*blocks.PartialDataColumn)
|
||||
p.partialMsgStore[topic] = topicStore
|
||||
}
|
||||
topicStore[string(c.GroupID())] = &c
|
||||
p.groupTTL[string(c.GroupID())] = TTLInSlots
|
||||
|
||||
return p.ps.PublishPartialMessage(topic, &c, partialmessages.PublishOptions{})
|
||||
}
|
||||
|
||||
type SubHandler func(topic string, col blocks.VerifiedRODataColumn)
|
||||
|
||||
func (p *PartialColumnBroadcaster) Subscribe(t *pubsub.Topic, headerValidator HeaderValidator, validator ColumnValidator, handler SubHandler) error {
|
||||
respCh := make(chan error)
|
||||
p.incomingReq <- request{
|
||||
kind: requestKindSubscribe,
|
||||
sub: subscribe{
|
||||
t: t,
|
||||
headerValidator: headerValidator,
|
||||
validator: validator,
|
||||
handler: handler,
|
||||
},
|
||||
response: respCh,
|
||||
}
|
||||
return <-respCh
|
||||
}
|
||||
func (p *PartialColumnBroadcaster) subscribe(t *pubsub.Topic, headerValidator HeaderValidator, validator ColumnValidator, handler SubHandler) error {
|
||||
topic := t.String()
|
||||
if _, ok := p.topics[topic]; ok {
|
||||
return errors.New("already subscribed")
|
||||
}
|
||||
|
||||
p.topics[topic] = t
|
||||
p.headerValidators[topic] = headerValidator
|
||||
p.validators[topic] = validator
|
||||
p.handlers[topic] = handler
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PartialColumnBroadcaster) Unsubscribe(topic string) error {
|
||||
respCh := make(chan error)
|
||||
p.incomingReq <- request{
|
||||
kind: requestKindUnsubscribe,
|
||||
unsub: unsubscribe{
|
||||
topic: topic,
|
||||
},
|
||||
response: respCh,
|
||||
}
|
||||
return <-respCh
|
||||
}
|
||||
func (p *PartialColumnBroadcaster) unsubscribe(topic string) error {
|
||||
t, ok := p.topics[topic]
|
||||
if !ok {
|
||||
return errors.New("topic not found")
|
||||
}
|
||||
delete(p.topics, topic)
|
||||
delete(p.partialMsgStore, topic)
|
||||
delete(p.headerValidators, topic)
|
||||
delete(p.validators, topic)
|
||||
delete(p.handlers, topic)
|
||||
|
||||
return t.Close()
|
||||
}
|
||||
@@ -58,7 +58,7 @@ func TestPeerExplicitAdd(t *testing.T) {
|
||||
|
||||
resAddress, err := p.Address(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, address, resAddress, "Unexpected address")
|
||||
assert.Equal(t, address.Equal(resAddress), true, "Unexpected address")
|
||||
|
||||
resDirection, err := p.Direction(id)
|
||||
require.NoError(t, err)
|
||||
@@ -72,7 +72,7 @@ func TestPeerExplicitAdd(t *testing.T) {
|
||||
|
||||
resAddress2, err := p.Address(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, address2, resAddress2, "Unexpected address")
|
||||
assert.Equal(t, address2.Equal(resAddress2), true, "Unexpected address")
|
||||
|
||||
resDirection2, err := p.Direction(id)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -170,7 +170,7 @@ func (s *Service) pubsubOptions() []pubsub.Option {
|
||||
pubsub.WithPeerScore(peerScoringParams(s.cfg.IPColocationWhitelist)),
|
||||
pubsub.WithPeerScoreInspect(s.peerInspector, time.Minute),
|
||||
pubsub.WithGossipSubParams(pubsubGossipParam()),
|
||||
pubsub.WithRawTracer(gossipTracer{host: s.host}),
|
||||
pubsub.WithRawTracer(&gossipTracer{host: s.host}),
|
||||
}
|
||||
|
||||
if len(s.cfg.StaticPeers) > 0 {
|
||||
@@ -181,6 +181,9 @@ func (s *Service) pubsubOptions() []pubsub.Option {
|
||||
}
|
||||
psOpts = append(psOpts, pubsub.WithDirectPeers(directPeersAddrInfos))
|
||||
}
|
||||
if s.partialColumnBroadcaster != nil {
|
||||
psOpts = s.partialColumnBroadcaster.AppendPubSubOpts(psOpts)
|
||||
}
|
||||
|
||||
return psOpts
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
@@ -8,7 +10,7 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var _ = pubsub.RawTracer(gossipTracer{})
|
||||
var _ = pubsub.RawTracer(&gossipTracer{})
|
||||
|
||||
// Initializes the values for the pubsub rpc action.
|
||||
type action int
|
||||
@@ -23,85 +25,146 @@ const (
|
||||
// and broadcasted through gossipsub.
|
||||
type gossipTracer struct {
|
||||
host host.Host
|
||||
|
||||
mu sync.Mutex
|
||||
// map topic -> Set(peerID). Peer is in set if it supports partial messages.
|
||||
partialMessagePeers map[string]map[peer.ID]struct{}
|
||||
// map topic -> Set(peerID). Peer is in set if in the mesh.
|
||||
meshPeers map[string]map[peer.ID]struct{}
|
||||
}
|
||||
|
||||
// AddPeer .
|
||||
func (g gossipTracer) AddPeer(p peer.ID, proto protocol.ID) {
|
||||
func (g *gossipTracer) AddPeer(p peer.ID, proto protocol.ID) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
// RemovePeer .
|
||||
func (g gossipTracer) RemovePeer(p peer.ID) {
|
||||
// no-op
|
||||
func (g *gossipTracer) RemovePeer(p peer.ID) {
|
||||
g.mu.Lock()
|
||||
defer g.mu.Unlock()
|
||||
for _, peers := range g.partialMessagePeers {
|
||||
delete(peers, p)
|
||||
}
|
||||
for topic, peers := range g.meshPeers {
|
||||
if _, ok := peers[p]; ok {
|
||||
delete(peers, p)
|
||||
g.updateMeshPeersMetric(topic)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Join .
|
||||
func (g gossipTracer) Join(topic string) {
|
||||
func (g *gossipTracer) Join(topic string) {
|
||||
pubsubTopicsActive.WithLabelValues(topic).Set(1)
|
||||
g.mu.Lock()
|
||||
defer g.mu.Unlock()
|
||||
if g.partialMessagePeers == nil {
|
||||
g.partialMessagePeers = make(map[string]map[peer.ID]struct{})
|
||||
}
|
||||
if g.partialMessagePeers[topic] == nil {
|
||||
g.partialMessagePeers[topic] = make(map[peer.ID]struct{})
|
||||
}
|
||||
|
||||
if g.meshPeers == nil {
|
||||
g.meshPeers = make(map[string]map[peer.ID]struct{})
|
||||
}
|
||||
if g.meshPeers[topic] == nil {
|
||||
g.meshPeers[topic] = make(map[peer.ID]struct{})
|
||||
}
|
||||
}
|
||||
|
||||
// Leave .
|
||||
func (g gossipTracer) Leave(topic string) {
|
||||
func (g *gossipTracer) Leave(topic string) {
|
||||
pubsubTopicsActive.WithLabelValues(topic).Set(0)
|
||||
g.mu.Lock()
|
||||
defer g.mu.Unlock()
|
||||
delete(g.partialMessagePeers, topic)
|
||||
delete(g.meshPeers, topic)
|
||||
}
|
||||
|
||||
// Graft .
|
||||
func (g gossipTracer) Graft(p peer.ID, topic string) {
|
||||
func (g *gossipTracer) Graft(p peer.ID, topic string) {
|
||||
pubsubTopicsGraft.WithLabelValues(topic).Inc()
|
||||
g.mu.Lock()
|
||||
defer g.mu.Unlock()
|
||||
if m, ok := g.meshPeers[topic]; ok {
|
||||
m[p] = struct{}{}
|
||||
}
|
||||
g.updateMeshPeersMetric(topic)
|
||||
}
|
||||
|
||||
// Prune .
|
||||
func (g gossipTracer) Prune(p peer.ID, topic string) {
|
||||
func (g *gossipTracer) Prune(p peer.ID, topic string) {
|
||||
pubsubTopicsPrune.WithLabelValues(topic).Inc()
|
||||
g.mu.Lock()
|
||||
defer g.mu.Unlock()
|
||||
if m, ok := g.meshPeers[topic]; ok {
|
||||
delete(m, p)
|
||||
}
|
||||
g.updateMeshPeersMetric(topic)
|
||||
}
|
||||
|
||||
// ValidateMessage .
|
||||
func (g gossipTracer) ValidateMessage(msg *pubsub.Message) {
|
||||
func (g *gossipTracer) ValidateMessage(msg *pubsub.Message) {
|
||||
pubsubMessageValidate.WithLabelValues(*msg.Topic).Inc()
|
||||
}
|
||||
|
||||
// DeliverMessage .
|
||||
func (g gossipTracer) DeliverMessage(msg *pubsub.Message) {
|
||||
func (g *gossipTracer) DeliverMessage(msg *pubsub.Message) {
|
||||
pubsubMessageDeliver.WithLabelValues(*msg.Topic).Inc()
|
||||
}
|
||||
|
||||
// RejectMessage .
|
||||
func (g gossipTracer) RejectMessage(msg *pubsub.Message, reason string) {
|
||||
func (g *gossipTracer) RejectMessage(msg *pubsub.Message, reason string) {
|
||||
pubsubMessageReject.WithLabelValues(*msg.Topic, reason).Inc()
|
||||
}
|
||||
|
||||
// DuplicateMessage .
|
||||
func (g gossipTracer) DuplicateMessage(msg *pubsub.Message) {
|
||||
func (g *gossipTracer) DuplicateMessage(msg *pubsub.Message) {
|
||||
pubsubMessageDuplicate.WithLabelValues(*msg.Topic).Inc()
|
||||
}
|
||||
|
||||
// UndeliverableMessage .
|
||||
func (g gossipTracer) UndeliverableMessage(msg *pubsub.Message) {
|
||||
func (g *gossipTracer) UndeliverableMessage(msg *pubsub.Message) {
|
||||
pubsubMessageUndeliverable.WithLabelValues(*msg.Topic).Inc()
|
||||
}
|
||||
|
||||
// ThrottlePeer .
|
||||
func (g gossipTracer) ThrottlePeer(p peer.ID) {
|
||||
func (g *gossipTracer) ThrottlePeer(p peer.ID) {
|
||||
agent := agentFromPid(p, g.host.Peerstore())
|
||||
pubsubPeerThrottle.WithLabelValues(agent).Inc()
|
||||
}
|
||||
|
||||
// RecvRPC .
|
||||
func (g gossipTracer) RecvRPC(rpc *pubsub.RPC) {
|
||||
g.setMetricFromRPC(recv, pubsubRPCSubRecv, pubsubRPCPubRecv, pubsubRPCRecv, rpc)
|
||||
func (g *gossipTracer) RecvRPC(rpc *pubsub.RPC, from peer.ID) {
|
||||
g.setMetricFromRPC(recv, pubsubRPCSubRecv, pubsubRPCPubRecv, pubsubRPCPubRecvSize, pubsubRPCRecv, rpc)
|
||||
|
||||
g.mu.Lock()
|
||||
defer g.mu.Unlock()
|
||||
for _, sub := range rpc.Subscriptions {
|
||||
m, ok := g.partialMessagePeers[sub.GetTopicid()]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if sub.GetSubscribe() && sub.GetRequestsPartial() {
|
||||
m[from] = struct{}{}
|
||||
} else {
|
||||
delete(m, from)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SendRPC .
|
||||
func (g gossipTracer) SendRPC(rpc *pubsub.RPC, p peer.ID) {
|
||||
g.setMetricFromRPC(send, pubsubRPCSubSent, pubsubRPCPubSent, pubsubRPCSent, rpc)
|
||||
func (g *gossipTracer) SendRPC(rpc *pubsub.RPC, p peer.ID) {
|
||||
g.setMetricFromRPC(send, pubsubRPCSubSent, pubsubRPCPubSent, pubsubRPCPubSentSize, pubsubRPCSent, rpc)
|
||||
}
|
||||
|
||||
// DropRPC .
|
||||
func (g gossipTracer) DropRPC(rpc *pubsub.RPC, p peer.ID) {
|
||||
g.setMetricFromRPC(drop, pubsubRPCSubDrop, pubsubRPCPubDrop, pubsubRPCDrop, rpc)
|
||||
func (g *gossipTracer) DropRPC(rpc *pubsub.RPC, p peer.ID) {
|
||||
g.setMetricFromRPC(drop, pubsubRPCSubDrop, pubsubRPCPubDrop, pubsubRPCPubDropSize, pubsubRPCDrop, rpc)
|
||||
}
|
||||
|
||||
func (g gossipTracer) setMetricFromRPC(act action, subCtr prometheus.Counter, pubCtr, ctrlCtr *prometheus.CounterVec, rpc *pubsub.RPC) {
|
||||
func (g *gossipTracer) setMetricFromRPC(act action, subCtr prometheus.Counter, pubCtr, pubSizeCtr, ctrlCtr *prometheus.CounterVec, rpc *pubsub.RPC) {
|
||||
subCtr.Add(float64(len(rpc.Subscriptions)))
|
||||
if rpc.Control != nil {
|
||||
ctrlCtr.WithLabelValues("graft").Add(float64(len(rpc.Control.Graft)))
|
||||
@@ -110,12 +173,41 @@ func (g gossipTracer) setMetricFromRPC(act action, subCtr prometheus.Counter, pu
|
||||
ctrlCtr.WithLabelValues("iwant").Add(float64(len(rpc.Control.Iwant)))
|
||||
ctrlCtr.WithLabelValues("idontwant").Add(float64(len(rpc.Control.Idontwant)))
|
||||
}
|
||||
// For incoming messages from pubsub, we do not record metrics for them as these values
|
||||
// could be junk.
|
||||
if act == recv {
|
||||
return
|
||||
}
|
||||
for _, msg := range rpc.Publish {
|
||||
// For incoming messages from pubsub, we do not record metrics for them as these values
|
||||
// could be junk.
|
||||
if act == recv {
|
||||
continue
|
||||
}
|
||||
pubCtr.WithLabelValues(*msg.Topic).Inc()
|
||||
pubCtr.WithLabelValues(msg.GetTopic()).Inc()
|
||||
pubSizeCtr.WithLabelValues(msg.GetTopic(), "false").Add(float64(msg.Size()))
|
||||
}
|
||||
if rpc.Partial != nil {
|
||||
pubCtr.WithLabelValues(rpc.Partial.GetTopicID()).Inc()
|
||||
pubSizeCtr.WithLabelValues(rpc.Partial.GetTopicID(), "true").Add(float64(rpc.Partial.Size()))
|
||||
}
|
||||
}
|
||||
|
||||
// updateMeshPeersMetric requires the caller to hold the state mutex
|
||||
func (g *gossipTracer) updateMeshPeersMetric(topic string) {
|
||||
meshPeers, ok := g.meshPeers[topic]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
partialPeers, ok := g.partialMessagePeers[topic]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
var supportsPartial, doesNotSupportPartial float64
|
||||
for p := range meshPeers {
|
||||
if _, ok := partialPeers[p]; ok {
|
||||
supportsPartial++
|
||||
} else {
|
||||
doesNotSupportPartial++
|
||||
}
|
||||
}
|
||||
|
||||
pubsubMeshPeers.WithLabelValues(topic, "true").Set(supportsPartial)
|
||||
pubsubMeshPeers.WithLabelValues(topic, "false").Set(doesNotSupportPartial)
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/async"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/types"
|
||||
@@ -77,6 +78,7 @@ type Service struct {
|
||||
privKey *ecdsa.PrivateKey
|
||||
metaData metadata.Metadata
|
||||
pubsub *pubsub.PubSub
|
||||
partialColumnBroadcaster *partialdatacolumnbroadcaster.PartialColumnBroadcaster
|
||||
joinedTopics map[string]*pubsub.Topic
|
||||
joinedTopicsLock sync.RWMutex
|
||||
subnetsLock map[uint64]*sync.RWMutex
|
||||
@@ -147,6 +149,10 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
custodyInfoSet: make(chan struct{}),
|
||||
}
|
||||
|
||||
if cfg.PartialDataColumns {
|
||||
s.partialColumnBroadcaster = partialdatacolumnbroadcaster.NewBroadcaster(log.Logger)
|
||||
}
|
||||
|
||||
ipAddr := prysmnetwork.IPAddr()
|
||||
|
||||
opts, err := s.buildOptions(ipAddr, s.privKey)
|
||||
@@ -305,6 +311,10 @@ func (s *Service) Start() {
|
||||
logExternalDNSAddr(s.host.ID(), p2pHostDNS, p2pTCPPort)
|
||||
}
|
||||
go s.forkWatcher()
|
||||
|
||||
if s.partialColumnBroadcaster != nil {
|
||||
go s.partialColumnBroadcaster.Start()
|
||||
}
|
||||
}
|
||||
|
||||
// Stop the p2p service and terminate all peer connections.
|
||||
@@ -314,6 +324,10 @@ func (s *Service) Stop() error {
|
||||
if s.dv5Listener != nil {
|
||||
s.dv5Listener.Close()
|
||||
}
|
||||
|
||||
if s.partialColumnBroadcaster != nil {
|
||||
s.partialColumnBroadcaster.Stop()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -350,6 +364,10 @@ func (s *Service) PubSub() *pubsub.PubSub {
|
||||
return s.pubsub
|
||||
}
|
||||
|
||||
func (s *Service) PartialColumnBroadcaster() *partialdatacolumnbroadcaster.PartialColumnBroadcaster {
|
||||
return s.partialColumnBroadcaster
|
||||
}
|
||||
|
||||
// Host returns the currently running libp2p
|
||||
// host of the service.
|
||||
func (s *Service) Host() host.Host {
|
||||
|
||||
@@ -21,6 +21,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/partialdatacolumnbroadcaster:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
@@ -108,6 +109,10 @@ func (*FakeP2P) PubSub() *pubsub.PubSub {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*FakeP2P) PartialColumnBroadcaster() *partialdatacolumnbroadcaster.PartialColumnBroadcaster {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MetadataSeq -- fake.
|
||||
func (*FakeP2P) MetadataSeq() uint64 {
|
||||
return 0
|
||||
@@ -169,7 +174,7 @@ func (*FakeP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interfac
|
||||
}
|
||||
|
||||
// BroadcastDataColumnSidecar -- fake.
|
||||
func (*FakeP2P) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn) error {
|
||||
func (*FakeP2P) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn, _ []blocks.PartialDataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ func (m *MockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
||||
}
|
||||
|
||||
// BroadcastDataColumnSidecar broadcasts a data column for mock.
|
||||
func (m *MockBroadcaster) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn) error {
|
||||
func (m *MockBroadcaster) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn, []blocks.PartialDataColumn) error {
|
||||
m.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -242,7 +243,7 @@ func (p *TestP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interf
|
||||
}
|
||||
|
||||
// BroadcastDataColumnSidecar broadcasts a data column for mock.
|
||||
func (p *TestP2P) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn) error {
|
||||
func (p *TestP2P) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn, []blocks.PartialDataColumn) error {
|
||||
p.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
@@ -308,6 +309,10 @@ func (p *TestP2P) PubSub() *pubsub.PubSub {
|
||||
return p.pubsub
|
||||
}
|
||||
|
||||
func (p *TestP2P) PartialColumnBroadcaster() *partialdatacolumnbroadcaster.PartialColumnBroadcaster {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Disconnect from a peer.
|
||||
func (p *TestP2P) Disconnect(pid peer.ID) error {
|
||||
return p.BHost.Network().ClosePeer(pid)
|
||||
|
||||
@@ -5,7 +5,6 @@ go_library(
|
||||
srcs = [
|
||||
"handlers.go",
|
||||
"handlers_block.go",
|
||||
"handlers_gloas.go",
|
||||
"log.go",
|
||||
"server.go",
|
||||
],
|
||||
|
||||
@@ -1,276 +0,0 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/shared"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v7/network/httputil"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/protobuf/types/known/wrapperspb"
|
||||
)
|
||||
|
||||
// ProduceBlockV4 requests a beacon node to produce a valid GLOAS block.
|
||||
// This is the GLOAS-specific block production endpoint that returns a block
|
||||
// containing a signed execution payload bid instead of the full payload.
|
||||
//
|
||||
// The execution payload envelope is cached by the beacon node and can be
|
||||
// retrieved via GetExecutionPayloadEnvelope.
|
||||
//
|
||||
// Endpoint: GET /eth/v4/validator/blocks/{slot}
|
||||
func (s *Server) ProduceBlockV4(w http.ResponseWriter, r *http.Request) {
|
||||
_, span := trace.StartSpan(r.Context(), "validator.ProduceBlockV4")
|
||||
defer span.End()
|
||||
|
||||
if shared.IsSyncing(r.Context(), w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
|
||||
// Parse path parameters
|
||||
segments := strings.Split(r.URL.Path, "/")
|
||||
rawSlot := segments[len(segments)-1]
|
||||
|
||||
slot, valid := shared.ValidateUint(w, "slot", rawSlot)
|
||||
if !valid {
|
||||
return
|
||||
}
|
||||
|
||||
// Parse query parameters
|
||||
rawRandaoReveal := r.URL.Query().Get("randao_reveal")
|
||||
rawGraffiti := r.URL.Query().Get("graffiti")
|
||||
rawSkipRandaoVerification := r.URL.Query().Get("skip_randao_verification")
|
||||
|
||||
var bbFactor *wrapperspb.UInt64Value
|
||||
rawBbFactor, bbValue, ok := shared.UintFromQuery(w, r, "builder_boost_factor", false)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if rawBbFactor != "" {
|
||||
bbFactor = &wrapperspb.UInt64Value{Value: bbValue}
|
||||
}
|
||||
|
||||
// Parse randao reveal
|
||||
var randaoReveal []byte
|
||||
if rawSkipRandaoVerification == "true" {
|
||||
// TODO: Use infinite signature constant
|
||||
randaoReveal = make([]byte, 96)
|
||||
} else {
|
||||
// TODO: Decode randao reveal from hex
|
||||
_ = rawRandaoReveal
|
||||
}
|
||||
|
||||
// Parse graffiti
|
||||
var graffiti []byte
|
||||
if rawGraffiti != "" {
|
||||
// TODO: Decode graffiti from hex
|
||||
}
|
||||
|
||||
// TODO: Implement GLOAS-specific block production
|
||||
//
|
||||
// This handler should:
|
||||
// 1. Verify the slot is in the GLOAS fork
|
||||
// 2. Call v1alpha1 server's getGloasBeaconBlock
|
||||
// 3. Format response with GLOAS-specific headers
|
||||
// 4. Return the block (the envelope is cached server-side)
|
||||
|
||||
_ = bbFactor
|
||||
_ = graffiti
|
||||
_ = randaoReveal
|
||||
_ = slot
|
||||
|
||||
httputil.HandleError(w, "ProduceBlockV4 not yet implemented", http.StatusNotImplemented)
|
||||
}
|
||||
|
||||
// handleProduceGloasV4 handles the response formatting for GLOAS blocks.
|
||||
func handleProduceGloasV4(w http.ResponseWriter, isSSZ bool, block *eth.BeaconBlockGloas, payloadValue, consensusBlockValue string) {
|
||||
// TODO: Implement GLOAS response handling
|
||||
//
|
||||
// Similar to handleProduceFuluV3 but for GLOAS blocks.
|
||||
// The response should NOT include the execution payload envelope,
|
||||
// as that is retrieved separately.
|
||||
|
||||
if isSSZ {
|
||||
// TODO: SSZ serialize the GLOAS block
|
||||
httputil.HandleError(w, "SSZ response not yet implemented for GLOAS", http.StatusNotImplemented)
|
||||
return
|
||||
}
|
||||
|
||||
// JSON response
|
||||
// TODO: Convert GLOAS block to JSON struct
|
||||
resp := &structs.ProduceBlockV3Response{
|
||||
Version: version.String(version.Gloas),
|
||||
ExecutionPayloadBlinded: false, // GLOAS blocks don't have blinded concept in same way
|
||||
ExecutionPayloadValue: payloadValue,
|
||||
ConsensusBlockValue: consensusBlockValue,
|
||||
Data: nil, // TODO: Marshal block to JSON
|
||||
}
|
||||
|
||||
httputil.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
// GetExecutionPayloadEnvelope retrieves a cached execution payload envelope.
|
||||
// Validators call this after receiving a GLOAS block to get the envelope
|
||||
// they need to sign and broadcast.
|
||||
//
|
||||
// Endpoint: GET /eth/v1/validator/execution_payload_envelope/{slot}/{builder_index}
|
||||
func (s *Server) GetExecutionPayloadEnvelope(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "validator.ExecutionPayloadEnvelope")
|
||||
defer span.End()
|
||||
|
||||
// Parse path parameters
|
||||
segments := strings.Split(r.URL.Path, "/")
|
||||
if len(segments) < 2 {
|
||||
httputil.HandleError(w, "missing slot and builder_index in path", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
rawSlot := segments[len(segments)-2]
|
||||
rawBuilderIndex := segments[len(segments)-1]
|
||||
|
||||
slot, valid := shared.ValidateUint(w, "slot", rawSlot)
|
||||
if !valid {
|
||||
return
|
||||
}
|
||||
|
||||
builderIndex, err := strconv.ParseUint(rawBuilderIndex, 10, 64)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, errors.Wrap(err, "invalid builder_index").Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Build gRPC request
|
||||
req := ð.ExecutionPayloadEnvelopeRequest{
|
||||
Slot: primitives.Slot(slot),
|
||||
BuilderIndex: primitives.BuilderIndex(builderIndex),
|
||||
}
|
||||
|
||||
// TODO: The V1Alpha1Server needs to implement the ExecutionPayloadEnvelope method
|
||||
// from the BeaconNodeValidatorServer interface. Currently it's defined but the
|
||||
// interface may need updating to include this method.
|
||||
//
|
||||
// Once implemented, uncomment:
|
||||
// resp, err := s.V1Alpha1Server.ExecutionPayloadEnvelope(ctx, req)
|
||||
// if err != nil {
|
||||
// // Map gRPC error codes to HTTP status codes
|
||||
// if status.Code(err) == codes.NotFound {
|
||||
// httputil.HandleError(w, err.Error(), http.StatusNotFound)
|
||||
// } else {
|
||||
// httputil.HandleError(w, err.Error(), http.StatusInternalServerError)
|
||||
// }
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// // Format and return response
|
||||
// // - Support both JSON and SSZ based on Accept header
|
||||
// // - Set version header
|
||||
// w.Header().Set(api.VersionHeader, version.String(version.Gloas))
|
||||
// httputil.WriteJson(w, &structs.GetExecutionPayloadEnvelopeResponse{
|
||||
// Version: version.String(version.Gloas),
|
||||
// Data: envelopeProtoToJSON(resp.Envelope),
|
||||
// })
|
||||
|
||||
_ = ctx
|
||||
_ = req
|
||||
|
||||
httputil.HandleError(w, "ExecutionPayloadEnvelope not yet implemented", http.StatusNotImplemented)
|
||||
}
|
||||
|
||||
// PublishExecutionPayloadEnvelope broadcasts a signed execution payload envelope.
|
||||
// Validators call this after signing the envelope to broadcast it to the network.
|
||||
//
|
||||
// Endpoint: POST /eth/v1/beacon/execution_payload_envelope
|
||||
func (s *Server) PublishExecutionPayloadEnvelope(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "validator.PublishExecutionPayloadEnvelope")
|
||||
defer span.End()
|
||||
|
||||
// Parse request body
|
||||
var signedEnvelope structs.SignedExecutionPayloadEnvelope
|
||||
if err := json.NewDecoder(r.Body).Decode(&signedEnvelope); err != nil {
|
||||
httputil.HandleError(w, errors.Wrap(err, "failed to decode request body").Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: Convert JSON struct to proto
|
||||
// protoEnvelope, err := signedEnvelope.ToProto()
|
||||
// if err != nil {
|
||||
// httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
// return
|
||||
// }
|
||||
|
||||
// TODO: Call gRPC server
|
||||
// _, err = s.V1Alpha1Server.PublishExecutionPayloadEnvelope(ctx, protoEnvelope)
|
||||
// if err != nil {
|
||||
// // Handle different error types (validation errors vs internal errors)
|
||||
// httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
// return
|
||||
// }
|
||||
|
||||
_ = ctx
|
||||
_ = signedEnvelope
|
||||
|
||||
httputil.HandleError(w, "PublishExecutionPayloadEnvelope not yet implemented", http.StatusNotImplemented)
|
||||
}
|
||||
|
||||
// ExecutionPayloadEnvelopeJSON represents the JSON structure for an execution payload envelope.
|
||||
// This is used for REST API serialization.
|
||||
type ExecutionPayloadEnvelopeJSON struct {
|
||||
Payload json.RawMessage `json:"payload"`
|
||||
ExecutionRequests json.RawMessage `json:"execution_requests"`
|
||||
BuilderIndex string `json:"builder_index"`
|
||||
BeaconBlockRoot string `json:"beacon_block_root"`
|
||||
Slot string `json:"slot"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
StateRoot string `json:"state_root"`
|
||||
}
|
||||
|
||||
// SignedExecutionPayloadEnvelopeJSON represents the JSON structure for a signed envelope.
|
||||
type SignedExecutionPayloadEnvelopeJSON struct {
|
||||
Message *ExecutionPayloadEnvelopeJSON `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
// ExecutionPayloadEnvelopeResponseJSON is the response wrapper for envelope retrieval.
|
||||
type ExecutionPayloadEnvelopeResponseJSON struct {
|
||||
Version string `json:"version"`
|
||||
Data *ExecutionPayloadEnvelopeJSON `json:"data"`
|
||||
}
|
||||
|
||||
// envelopeProtoToJSON converts a proto envelope to JSON representation.
|
||||
func envelopeProtoToJSON(envelope *eth.ExecutionPayloadEnvelope) (*ExecutionPayloadEnvelopeJSON, error) {
|
||||
// TODO: Implement conversion
|
||||
//
|
||||
// Convert each field:
|
||||
// - payload: Marshal ExecutionPayloadDeneb to JSON
|
||||
// - execution_requests: Marshal to JSON
|
||||
// - builder_index: Convert uint64 to string
|
||||
// - beacon_block_root: Hex encode
|
||||
// - slot: Convert uint64 to string
|
||||
// - blob_kzg_commitments: Hex encode each
|
||||
// - state_root: Hex encode
|
||||
|
||||
return nil, fmt.Errorf("envelopeProtoToJSON not yet implemented")
|
||||
}
|
||||
|
||||
// envelopeJSONToProto converts a JSON envelope to proto representation.
|
||||
func envelopeJSONToProto(envelope *ExecutionPayloadEnvelopeJSON) (*eth.ExecutionPayloadEnvelope, error) {
|
||||
// TODO: Implement conversion
|
||||
//
|
||||
// Parse each field:
|
||||
// - payload: Unmarshal from JSON
|
||||
// - execution_requests: Unmarshal from JSON
|
||||
// - builder_index: Parse uint64 from string
|
||||
// - beacon_block_root: Hex decode
|
||||
// - slot: Parse uint64 from string
|
||||
// - blob_kzg_commitments: Hex decode each
|
||||
// - state_root: Hex decode
|
||||
|
||||
return nil, fmt.Errorf("envelopeJSONToProto not yet implemented")
|
||||
}
|
||||
@@ -19,24 +19,23 @@ import (
|
||||
// Server defines a server implementation of the gRPC Validator service,
|
||||
// providing RPC endpoints intended for validator clients.
|
||||
type Server struct {
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
TimeFetcher blockchain.TimeFetcher
|
||||
SyncChecker sync.Checker
|
||||
AttestationCache *cache.AttestationCache
|
||||
AttestationsPool attestations.Pool
|
||||
PeerManager p2p.PeerManager
|
||||
Broadcaster p2p.Broadcaster
|
||||
Stater lookup.Stater
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
V1Alpha1Server eth.BeaconNodeValidatorServer
|
||||
ChainInfoFetcher blockchain.ChainInfoFetcher
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
BlockBuilder builder.BlockBuilder
|
||||
OperationNotifier operation.Notifier
|
||||
CoreService *core.Service
|
||||
BlockRewardFetcher rewards.BlockRewardsFetcher
|
||||
TrackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
PayloadIDCache *cache.PayloadIDCache
|
||||
ExecutionPayloadEnvelopeCache *cache.ExecutionPayloadEnvelopeCache // GLOAS: Cache for execution payload envelopes
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
TimeFetcher blockchain.TimeFetcher
|
||||
SyncChecker sync.Checker
|
||||
AttestationCache *cache.AttestationCache
|
||||
AttestationsPool attestations.Pool
|
||||
PeerManager p2p.PeerManager
|
||||
Broadcaster p2p.Broadcaster
|
||||
Stater lookup.Stater
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
V1Alpha1Server eth.BeaconNodeValidatorServer
|
||||
ChainInfoFetcher blockchain.ChainInfoFetcher
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
BlockBuilder builder.BlockBuilder
|
||||
OperationNotifier operation.Notifier
|
||||
CoreService *core.Service
|
||||
BlockRewardFetcher rewards.BlockRewardsFetcher
|
||||
TrackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
PayloadIDCache *cache.PayloadIDCache
|
||||
}
|
||||
|
||||
@@ -26,7 +26,6 @@ go_library(
|
||||
"proposer_eth1data.go",
|
||||
"proposer_execution_payload.go",
|
||||
"proposer_exits.go",
|
||||
"proposer_gloas.go",
|
||||
"proposer_slashings.go",
|
||||
"proposer_sync_aggregate.go",
|
||||
"server.go",
|
||||
|
||||
@@ -57,8 +57,6 @@ func (vs *Server) constructGenericBeaconBlock(
|
||||
return nil, fmt.Errorf("expected *BlobsBundleV2, got %T", blobsBundler)
|
||||
}
|
||||
return vs.constructFuluBlock(blockProto, isBlinded, bidStr, bundle), nil
|
||||
case version.Gloas:
|
||||
return vs.constructGloasBlock(blockProto), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown block version: %d", sBlk.Version())
|
||||
}
|
||||
@@ -111,13 +109,6 @@ func (vs *Server) constructElectraBlock(blockProto proto.Message, isBlinded bool
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_Electra{Electra: electraContents}, IsBlinded: false, PayloadValue: payloadValue}
|
||||
}
|
||||
|
||||
func (vs *Server) constructGloasBlock(blockProto proto.Message) *ethpb.GenericBeaconBlock {
|
||||
// GLOAS blocks do not carry a separate payload value — the bid is part of the block body.
|
||||
return ðpb.GenericBeaconBlock{
|
||||
Block: ðpb.GenericBeaconBlock_Gloas{Gloas: blockProto.(*ethpb.BeaconBlockGloas)},
|
||||
}
|
||||
}
|
||||
|
||||
func (vs *Server) constructFuluBlock(blockProto proto.Message, isBlinded bool, payloadValue string, bundle *enginev1.BlobsBundleV2) *ethpb.GenericBeaconBlock {
|
||||
if isBlinded {
|
||||
return ðpb.GenericBeaconBlock{Block: ðpb.GenericBeaconBlock_BlindedFulu{BlindedFulu: blockProto.(*ethpb.BlindedBeaconBlockFulu)}, IsBlinded: true, PayloadValue: payloadValue}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
builderapi "github.com/OffchainLabs/prysm/v7/api/client/builder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/builder"
|
||||
@@ -231,13 +232,6 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
|
||||
|
||||
// Set bls to execution change. New in Capella.
|
||||
vs.setBlsToExecData(sBlk, head)
|
||||
|
||||
// Set payload attestations. New in GLOAS.
|
||||
if sBlk.Version() >= version.Gloas {
|
||||
if err := sBlk.SetPayloadAttestations(vs.getPayloadAttestations(ctx, head, sBlk.Block().Slot())); err != nil {
|
||||
log.WithError(err).Error("Could not set payload attestations")
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
winningBid := primitives.ZeroWei()
|
||||
@@ -248,31 +242,24 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
|
||||
return nil, status.Errorf(codes.Internal, "Could not get local payload: %v", err)
|
||||
}
|
||||
|
||||
switch {
|
||||
case sBlk.Version() >= version.Gloas:
|
||||
if err := vs.setGloasExecutionData(ctx, sBlk, local); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not set GLOAS execution data: %v", err)
|
||||
}
|
||||
default:
|
||||
// There's no reason to try to get a builder bid if local override is true.
|
||||
var builderBid builderapi.Bid
|
||||
if !(local.OverrideBuilder || skipMevBoost) {
|
||||
latestHeader, err := head.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get latest execution payload header: %v", err)
|
||||
}
|
||||
parentGasLimit := latestHeader.GasLimit()
|
||||
builderBid, err = vs.getBuilderPayloadAndBlobs(ctx, sBlk.Block().Slot(), sBlk.Block().ProposerIndex(), parentGasLimit)
|
||||
if err != nil {
|
||||
builderGetPayloadMissCount.Inc()
|
||||
log.WithError(err).Error("Could not get builder payload")
|
||||
}
|
||||
}
|
||||
|
||||
winningBid, bundle, err = setExecutionData(ctx, sBlk, local, builderBid, builderBoostFactor)
|
||||
// There's no reason to try to get a builder bid if local override is true.
|
||||
var builderBid builderapi.Bid
|
||||
if !(local.OverrideBuilder || skipMevBoost) {
|
||||
latestHeader, err := head.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not set execution data: %v", err)
|
||||
return nil, status.Errorf(codes.Internal, "Could not get latest execution payload header: %v", err)
|
||||
}
|
||||
parentGasLimit := latestHeader.GasLimit()
|
||||
builderBid, err = vs.getBuilderPayloadAndBlobs(ctx, sBlk.Block().Slot(), sBlk.Block().ProposerIndex(), parentGasLimit)
|
||||
if err != nil {
|
||||
builderGetPayloadMissCount.Inc()
|
||||
log.WithError(err).Error("Could not get builder payload")
|
||||
}
|
||||
}
|
||||
|
||||
winningBid, bundle, err = setExecutionData(ctx, sBlk, local, builderBid, builderBoostFactor)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not set execution data: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -291,6 +278,11 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
|
||||
//
|
||||
// ProposeBeaconBlock handles the proposal of beacon blocks.
|
||||
func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
var (
|
||||
blobSidecars []*ethpb.BlobSidecar
|
||||
dataColumnSidecars []blocks.RODataColumn
|
||||
)
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.ProposeBeaconBlock")
|
||||
defer span.End()
|
||||
|
||||
@@ -307,59 +299,17 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
return nil, status.Errorf(codes.Internal, "Could not hash tree root: %v", err)
|
||||
}
|
||||
|
||||
if block.Version() < version.Gloas {
|
||||
// For post-Fulu blinded blocks, submit to relay and return early.
|
||||
if block.IsBlinded() && slots.ToEpoch(block.Block().Slot()) >= params.BeaconConfig().FuluForkEpoch {
|
||||
err := vs.BlockBuilder.SubmitBlindedBlockPostFulu(ctx, block)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not submit blinded block post-Fulu: %v", err)
|
||||
}
|
||||
return ðpb.ProposeResponse{BlockRoot: root[:]}, nil
|
||||
// For post-Fulu blinded blocks, submit to relay and return early
|
||||
if block.IsBlinded() && slots.ToEpoch(block.Block().Slot()) >= params.BeaconConfig().FuluForkEpoch {
|
||||
err := vs.BlockBuilder.SubmitBlindedBlockPostFulu(ctx, block)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not submit blinded block post-Fulu: %v", err)
|
||||
}
|
||||
return vs.proposeBlockWithSidecars(ctx, block, root, req)
|
||||
return ðpb.ProposeResponse{BlockRoot: root[:]}, nil
|
||||
}
|
||||
|
||||
return vs.proposeBlock(ctx, block, root)
|
||||
}
|
||||
|
||||
// proposeBlock broadcasts and receives a beacon block without sidecars.
|
||||
// Used for GLOAS and beyond where execution data is delivered via a separate envelope.
|
||||
func (vs *Server) proposeBlock(
|
||||
ctx context.Context,
|
||||
block interfaces.SignedBeaconBlock,
|
||||
root [fieldparams.RootLength]byte,
|
||||
) (*ethpb.ProposeResponse, error) {
|
||||
protoBlock, err := block.Proto()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not convert block to proto: %v", err)
|
||||
}
|
||||
if err := vs.P2P.Broadcast(ctx, protoBlock); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast block: %v", err)
|
||||
}
|
||||
vs.BlockNotifier.BlockFeed().Send(&feed.Event{
|
||||
Type: blockfeed.ReceivedBlock,
|
||||
Data: &blockfeed.ReceivedBlockData{SignedBlock: block},
|
||||
})
|
||||
if err := vs.BlockReceiver.ReceiveBlock(ctx, block, root, nil); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not receive block: %v", err)
|
||||
}
|
||||
return ðpb.ProposeResponse{BlockRoot: root[:]}, nil
|
||||
}
|
||||
|
||||
// proposeBlockWithSidecars handles block proposal for forks that carry blob or
|
||||
// data column sidecars alongside the block (Bellatrix through Fulu).
|
||||
func (vs *Server) proposeBlockWithSidecars(
|
||||
ctx context.Context,
|
||||
block interfaces.SignedBeaconBlock,
|
||||
root [fieldparams.RootLength]byte,
|
||||
req *ethpb.GenericSignedBeaconBlock,
|
||||
) (*ethpb.ProposeResponse, error) {
|
||||
var (
|
||||
blobSidecars []*ethpb.BlobSidecar
|
||||
dataColumnSidecars []blocks.RODataColumn
|
||||
)
|
||||
|
||||
rob, err := blocks.NewROBlockWithRoot(block, root)
|
||||
var partialColumns []blocks.PartialDataColumn
|
||||
if block.IsBlinded() {
|
||||
block, blobSidecars, err = vs.handleBlindedBlock(ctx, block)
|
||||
if errors.Is(err, builderapi.ErrBadGateway) {
|
||||
@@ -367,7 +317,7 @@ func (vs *Server) proposeBlockWithSidecars(
|
||||
return ðpb.ProposeResponse{BlockRoot: root[:]}, nil
|
||||
}
|
||||
} else if block.Version() >= version.Deneb {
|
||||
blobSidecars, dataColumnSidecars, err = vs.handleUnblindedBlock(rob, req)
|
||||
blobSidecars, dataColumnSidecars, partialColumns, err = vs.handleUnblindedBlock(rob, req)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
|
||||
@@ -387,7 +337,7 @@ func (vs *Server) proposeBlockWithSidecars(
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if err := vs.broadcastAndReceiveSidecars(ctx, block, root, blobSidecars, dataColumnSidecars); err != nil {
|
||||
if err := vs.broadcastAndReceiveSidecars(ctx, block, root, blobSidecars, dataColumnSidecars, partialColumns); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive sidecars: %v", err)
|
||||
}
|
||||
if err := <-errChan; err != nil {
|
||||
@@ -404,9 +354,10 @@ func (vs *Server) broadcastAndReceiveSidecars(
|
||||
root [fieldparams.RootLength]byte,
|
||||
blobSidecars []*ethpb.BlobSidecar,
|
||||
dataColumnSidecars []blocks.RODataColumn,
|
||||
partialColumns []blocks.PartialDataColumn,
|
||||
) error {
|
||||
if block.Version() >= version.Fulu {
|
||||
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSidecars); err != nil {
|
||||
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSidecars, partialColumns); err != nil {
|
||||
return errors.Wrap(err, "broadcast and receive data columns")
|
||||
}
|
||||
return nil
|
||||
@@ -455,42 +406,41 @@ func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.Signe
|
||||
func (vs *Server) handleUnblindedBlock(
|
||||
block blocks.ROBlock,
|
||||
req *ethpb.GenericSignedBeaconBlock,
|
||||
) ([]*ethpb.BlobSidecar, []blocks.RODataColumn, error) {
|
||||
) ([]*ethpb.BlobSidecar, []blocks.RODataColumn, []blocks.PartialDataColumn, error) {
|
||||
rawBlobs, proofs, err := blobsAndProofs(req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
if block.Version() >= version.Fulu {
|
||||
roDataColumnSidecars, err := buildDataColumnSidecars(rawBlobs, proofs, peerdas.PopulateFromBlock(block))
|
||||
// Compute cells and proofs from the blobs and cell proofs.
|
||||
cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromFlat(rawBlobs, proofs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, errors.Wrap(err, "compute cells and proofs")
|
||||
}
|
||||
return nil, roDataColumnSidecars, nil
|
||||
|
||||
// Construct data column sidecars from the signed block and cells and proofs.
|
||||
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(block))
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "data column sidcars")
|
||||
}
|
||||
|
||||
included := bitfield.NewBitlist(uint64(len(cellsPerBlob)))
|
||||
included = included.Not() // all bits set to 1
|
||||
partialColumns, err := peerdas.PartialColumns(included, cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(block))
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "data column sidcars")
|
||||
}
|
||||
|
||||
return nil, roDataColumnSidecars, partialColumns, nil
|
||||
}
|
||||
|
||||
blobSidecars, err := BuildBlobSidecars(block, rawBlobs, proofs)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "build blob sidecars")
|
||||
return nil, nil, nil, errors.Wrap(err, "build blob sidecars")
|
||||
}
|
||||
|
||||
return blobSidecars, nil, nil
|
||||
}
|
||||
|
||||
// buildDataColumnSidecars computes cells and proofs from blobs and constructs
|
||||
// data column sidecars using the given ConstructionPopulator source.
|
||||
func buildDataColumnSidecars(blobs, proofs [][]byte, src peerdas.ConstructionPopulator) ([]blocks.RODataColumn, error) {
|
||||
cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, proofs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells and proofs")
|
||||
}
|
||||
|
||||
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, src)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidecars")
|
||||
}
|
||||
|
||||
return roDataColumnSidecars, nil
|
||||
return blobSidecars, nil, nil, nil
|
||||
}
|
||||
|
||||
// broadcastReceiveBlock broadcasts a block and handles its reception.
|
||||
@@ -557,7 +507,7 @@ func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethp
|
||||
}
|
||||
|
||||
// broadcastAndReceiveDataColumns handles the broadcasting and reception of data columns sidecars.
|
||||
func (vs *Server) broadcastAndReceiveDataColumns(ctx context.Context, roSidecars []blocks.RODataColumn) error {
|
||||
func (vs *Server) broadcastAndReceiveDataColumns(ctx context.Context, roSidecars []blocks.RODataColumn, partialColumns []blocks.PartialDataColumn) error {
|
||||
// We built this block ourselves, so we can upgrade the read only data column sidecar into a verified one.
|
||||
verifiedSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roSidecars))
|
||||
for _, sidecar := range roSidecars {
|
||||
@@ -566,7 +516,7 @@ func (vs *Server) broadcastAndReceiveDataColumns(ctx context.Context, roSidecars
|
||||
}
|
||||
|
||||
// Broadcast sidecars (non blocking).
|
||||
if err := vs.P2P.BroadcastDataColumnSidecars(ctx, verifiedSidecars); err != nil {
|
||||
if err := vs.P2P.BroadcastDataColumnSidecars(ctx, verifiedSidecars, partialColumns); err != nil {
|
||||
return errors.Wrap(err, "broadcast data column sidecars")
|
||||
}
|
||||
|
||||
|
||||
@@ -16,11 +16,6 @@ func getEmptyBlock(slot primitives.Slot) (interfaces.SignedBeaconBlock, error) {
|
||||
var err error
|
||||
epoch := slots.ToEpoch(slot)
|
||||
switch {
|
||||
case epoch >= params.BeaconConfig().GloasForkEpoch:
|
||||
sBlk, err = blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockGloas{Block: ðpb.BeaconBlockGloas{Body: ðpb.BeaconBlockBodyGloas{}}})
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not initialize block for proposal: %v", err)
|
||||
}
|
||||
case epoch >= params.BeaconConfig().FuluForkEpoch:
|
||||
sBlk, err = blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockFulu{Block: ðpb.BeaconBlockElectra{Body: ðpb.BeaconBlockBodyElectra{}}})
|
||||
if err != nil {
|
||||
|
||||
@@ -1,460 +0,0 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
||||
blockfeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/block"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
consensusblocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/container/trie"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls/common"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
// setGloasExecutionData creates an execution payload bid from the local payload,
|
||||
// sets it on the block, and caches the execution payload envelope for later
|
||||
// retrieval by the validator client.
|
||||
func (vs *Server) setGloasExecutionData(
|
||||
ctx context.Context,
|
||||
sBlk interfaces.SignedBeaconBlock,
|
||||
local *consensusblocks.GetPayloadResponse,
|
||||
) error {
|
||||
_, span := trace.StartSpan(ctx, "ProposerServer.setGloasExecutionData")
|
||||
defer span.End()
|
||||
|
||||
if local == nil || local.ExecutionData == nil {
|
||||
return errors.New("local execution payload is nil")
|
||||
}
|
||||
|
||||
// Create execution payload bid from the local payload.
|
||||
parentRoot := sBlk.Block().ParentRoot()
|
||||
bid, err := vs.createSelfBuildExecutionPayloadBid(
|
||||
local.ExecutionData,
|
||||
primitives.BuilderIndex(sBlk.Block().ProposerIndex()),
|
||||
parentRoot[:],
|
||||
sBlk.Block().Slot(),
|
||||
local.BlobsBundler,
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create execution payload bid")
|
||||
}
|
||||
|
||||
// Per spec, self-build bids must use G2 point-at-infinity as the signature.
|
||||
// Only the execution payload envelope requires a real signature from the proposer.
|
||||
signedBid := ðpb.SignedExecutionPayloadBid{
|
||||
Message: bid,
|
||||
Signature: common.InfiniteSignature[:],
|
||||
}
|
||||
if err := sBlk.SetSignedExecutionPayloadBid(signedBid); err != nil {
|
||||
return errors.Wrap(err, "could not set signed execution payload bid")
|
||||
}
|
||||
|
||||
// Cache the execution payload envelope and blobs bundle for later retrieval.
|
||||
// The envelope is retrieved by the VC to sign and broadcast.
|
||||
// The blobs bundle is needed during block proposal to build and broadcast blob sidecars.
|
||||
envelope := vs.createExecutionPayloadEnvelope(
|
||||
local.ExecutionData,
|
||||
local.ExecutionRequests,
|
||||
primitives.BuilderIndex(sBlk.Block().ProposerIndex()),
|
||||
sBlk.Block().Slot(),
|
||||
local.BlobsBundler,
|
||||
)
|
||||
vs.cacheExecutionPayloadEnvelope(envelope, local.BlobsBundler)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getPayloadAttestations returns payload attestations for inclusion in a GLOAS block.
|
||||
// These attest to the payload timeliness from the previous slot's PTC.
|
||||
func (vs *Server) getPayloadAttestations(ctx context.Context, head state.BeaconState, slot primitives.Slot) []*ethpb.PayloadAttestation {
|
||||
// TODO: Implement payload attestation retrieval from pool.
|
||||
// This requires:
|
||||
// 1. A PayloadAttestationPool to collect PTC votes
|
||||
// 2. Aggregation of individual PayloadAttestationMessages into PayloadAttestations
|
||||
// For now, return empty - blocks are valid without payload attestations.
|
||||
return []*ethpb.PayloadAttestation{}
|
||||
}
|
||||
|
||||
// createSelfBuildExecutionPayloadBid creates an ExecutionPayloadBid for self-building,
|
||||
// where the proposer acts as its own builder. Value and payment are zero, and the
|
||||
// bid fields are derived directly from the local execution payload.
|
||||
func (vs *Server) createSelfBuildExecutionPayloadBid(
|
||||
executionData interfaces.ExecutionData,
|
||||
builderIndex primitives.BuilderIndex,
|
||||
parentBlockRoot []byte,
|
||||
slot primitives.Slot,
|
||||
blobsBundler enginev1.BlobsBundler,
|
||||
) (*ethpb.ExecutionPayloadBid, error) {
|
||||
if executionData == nil || executionData.IsNil() {
|
||||
return nil, errors.New("execution data is nil")
|
||||
}
|
||||
|
||||
// Compute blob_kzg_commitments_root from the blobs bundle.
|
||||
// This is hash_tree_root(List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]).
|
||||
kzgCommitmentsRoot := make([]byte, 32)
|
||||
if blobsBundler != nil {
|
||||
commitments := extractKzgCommitments(blobsBundler)
|
||||
if len(commitments) > 0 {
|
||||
leaves := consensusblocks.LeavesFromCommitments(commitments)
|
||||
commitmentsTree, err := trie.GenerateTrieFromItems(leaves, fieldparams.LogMaxBlobCommitments)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not generate kzg commitments trie")
|
||||
}
|
||||
root, err := commitmentsTree.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute kzg commitments root")
|
||||
}
|
||||
kzgCommitmentsRoot = root[:]
|
||||
}
|
||||
}
|
||||
|
||||
return ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: executionData.ParentHash(),
|
||||
ParentBlockRoot: bytesutil.SafeCopyBytes(parentBlockRoot),
|
||||
BlockHash: executionData.BlockHash(),
|
||||
PrevRandao: executionData.PrevRandao(),
|
||||
FeeRecipient: executionData.FeeRecipient(),
|
||||
GasLimit: executionData.GasLimit(),
|
||||
BuilderIndex: builderIndex,
|
||||
Slot: slot,
|
||||
Value: 0,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: kzgCommitmentsRoot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// createExecutionPayloadEnvelope wraps a full execution payload with metadata.
|
||||
// The envelope is cached by the beacon node during block production for later
|
||||
// retrieval by the validator via GetExecutionPayloadEnvelope.
|
||||
func (vs *Server) createExecutionPayloadEnvelope(
|
||||
executionData interfaces.ExecutionData,
|
||||
executionRequests *enginev1.ExecutionRequests,
|
||||
builderIndex primitives.BuilderIndex,
|
||||
slot primitives.Slot,
|
||||
blobsBundler enginev1.BlobsBundler,
|
||||
) *ethpb.ExecutionPayloadEnvelope {
|
||||
// Extract the underlying ExecutionPayloadDeneb proto
|
||||
var payload *enginev1.ExecutionPayloadDeneb
|
||||
if executionData != nil && !executionData.IsNil() {
|
||||
if p, ok := executionData.Proto().(*enginev1.ExecutionPayloadDeneb); ok {
|
||||
payload = p
|
||||
}
|
||||
}
|
||||
|
||||
commitments := extractKzgCommitments(blobsBundler)
|
||||
|
||||
return ðpb.ExecutionPayloadEnvelope{
|
||||
Payload: payload,
|
||||
ExecutionRequests: executionRequests,
|
||||
BuilderIndex: builderIndex,
|
||||
BeaconBlockRoot: make([]byte, 32), // Populated later when block root is known
|
||||
Slot: slot,
|
||||
BlobKzgCommitments: commitments,
|
||||
StateRoot: make([]byte, 32), // Computed later in GetExecutionPayloadEnvelope
|
||||
}
|
||||
}
|
||||
|
||||
// extractKzgCommitments pulls KZG commitments from a blobs bundler.
|
||||
func extractKzgCommitments(blobsBundler enginev1.BlobsBundler) [][]byte {
|
||||
if blobsBundler == nil {
|
||||
return nil
|
||||
}
|
||||
switch b := blobsBundler.(type) {
|
||||
case *enginev1.BlobsBundle:
|
||||
if b != nil {
|
||||
return b.KzgCommitments
|
||||
}
|
||||
case *enginev1.BlobsBundleV2:
|
||||
if b != nil {
|
||||
return b.KzgCommitments
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// cacheExecutionPayloadEnvelope stores an envelope and its blobs bundle for later retrieval.
|
||||
// The blobs bundle is cached alongside the envelope because blobs from the EL are only
|
||||
// held in memory until they are broadcast as sidecars during block proposal.
|
||||
func (vs *Server) cacheExecutionPayloadEnvelope(envelope *ethpb.ExecutionPayloadEnvelope, blobsBundle enginev1.BlobsBundler) {
|
||||
if vs.ExecutionPayloadEnvelopeCache == nil {
|
||||
log.Warn("ExecutionPayloadEnvelopeCache is nil, envelope will not be cached")
|
||||
return
|
||||
}
|
||||
vs.ExecutionPayloadEnvelopeCache.Set(envelope, blobsBundle)
|
||||
}
|
||||
|
||||
// GetExecutionPayloadEnvelope retrieves a cached execution payload envelope.
|
||||
// This is called by validators after receiving a GLOAS block to get the envelopeF
|
||||
// they need to sign and broadcast.
|
||||
//
|
||||
// gRPC endpoint: /eth/v1alpha1/validator/execution_payload_envelope/{slot}/{builder_index}
|
||||
func (vs *Server) GetExecutionPayloadEnvelope(
|
||||
ctx context.Context,
|
||||
req *ethpb.ExecutionPayloadEnvelopeRequest,
|
||||
) (*ethpb.ExecutionPayloadEnvelopeResponse, error) {
|
||||
if req == nil {
|
||||
return nil, status.Error(codes.InvalidArgument, "request cannot be nil")
|
||||
}
|
||||
|
||||
if slots.ToEpoch(req.Slot) < params.BeaconConfig().GloasForkEpoch {
|
||||
return nil, status.Errorf(codes.InvalidArgument,
|
||||
"execution payload envelopes are not supported before GLOAS fork (slot %d)", req.Slot)
|
||||
}
|
||||
|
||||
if vs.ExecutionPayloadEnvelopeCache == nil {
|
||||
return nil, status.Error(codes.Internal, "execution payload envelope cache not initialized")
|
||||
}
|
||||
|
||||
envelope, found := vs.ExecutionPayloadEnvelopeCache.Get(req.Slot, req.BuilderIndex)
|
||||
if !found {
|
||||
return nil, status.Errorf(
|
||||
codes.NotFound,
|
||||
"execution payload envelope not found for slot %d builder %d",
|
||||
req.Slot,
|
||||
req.BuilderIndex,
|
||||
)
|
||||
}
|
||||
|
||||
// Compute state root if not already set.
|
||||
// Following the pattern from epbs-interop: compute post-payload state root.
|
||||
if len(envelope.StateRoot) == 0 || bytesutil.ZeroRoot(envelope.StateRoot) {
|
||||
stateRoot, err := vs.computePostPayloadStateRoot(ctx, envelope)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Failed to compute post-payload state root")
|
||||
} else {
|
||||
envelope.StateRoot = stateRoot
|
||||
log.WithField("stateRoot", fmt.Sprintf("%#x", stateRoot)).Debug("Computed state root at execution stage")
|
||||
}
|
||||
}
|
||||
|
||||
return ðpb.ExecutionPayloadEnvelopeResponse{
|
||||
Envelope: envelope,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// computePostPayloadStateRoot computes the state root after an execution
|
||||
// payload envelope has been processed through a state transition.
|
||||
func (vs *Server) computePostPayloadStateRoot(ctx context.Context, envelope *ethpb.ExecutionPayloadEnvelope) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.computePostPayloadStateRoot")
|
||||
defer span.End()
|
||||
|
||||
if len(envelope.BeaconBlockRoot) == 0 || bytesutil.ZeroRoot(envelope.BeaconBlockRoot) {
|
||||
return nil, errors.New("beacon block root not set on envelope")
|
||||
}
|
||||
|
||||
blockRoot := bytesutil.ToBytes32(envelope.BeaconBlockRoot)
|
||||
st, err := vs.StateGen.StateByRoot(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get state by block root")
|
||||
}
|
||||
if st == nil {
|
||||
return nil, errors.New("nil state for block root")
|
||||
}
|
||||
|
||||
// Copy the state to avoid mutating the original
|
||||
st = st.Copy()
|
||||
|
||||
// TODO: Process the execution payload envelope through state transition.
|
||||
// This requires implementing ProcessPayloadStateTransition in beacon-chain/core/gloas.
|
||||
// For now, use the state root from the beacon block state as a placeholder.
|
||||
// The correct implementation would:
|
||||
// 1. Call ProcessPayloadStateTransition(ctx, st, envelope) to apply payload effects
|
||||
// 2. Compute HashTreeRoot of the resulting state
|
||||
|
||||
root, err := st.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute state root")
|
||||
}
|
||||
return root[:], nil
|
||||
}
|
||||
|
||||
// envelopeBlockWaitTimeout is the maximum time to wait for the associated beacon block
|
||||
// before giving up on publishing the execution payload envelope.
|
||||
const envelopeBlockWaitTimeout = 4 * time.Second
|
||||
|
||||
// envelopeBlockPollInterval is how often to check for the beacon block while waiting.
|
||||
const envelopeBlockPollInterval = 100 * time.Millisecond
|
||||
|
||||
// PublishExecutionPayloadEnvelope validates and broadcasts a signed execution payload envelope.
|
||||
// This is called by validators after signing the envelope retrieved from GetExecutionPayloadEnvelope.
|
||||
//
|
||||
// The function waits for the associated beacon block to be available before processing,
|
||||
// as the envelope references a beacon_block_root that must exist either from local
|
||||
// production or P2P gossip.
|
||||
//
|
||||
// gRPC endpoint: POST /eth/v1alpha1/validator/execution_payload_envelope
|
||||
func (vs *Server) PublishExecutionPayloadEnvelope(
|
||||
ctx context.Context,
|
||||
req *ethpb.SignedExecutionPayloadEnvelope,
|
||||
) (*emptypb.Empty, error) {
|
||||
if req == nil || req.Message == nil {
|
||||
return nil, status.Error(codes.InvalidArgument, "signed envelope cannot be nil")
|
||||
}
|
||||
|
||||
if slots.ToEpoch(req.Message.Slot) < params.BeaconConfig().GloasForkEpoch {
|
||||
return nil, status.Errorf(codes.InvalidArgument,
|
||||
"execution payload envelopes are not supported before GLOAS fork (slot %d)", req.Message.Slot)
|
||||
}
|
||||
|
||||
beaconBlockRoot := bytesutil.ToBytes32(req.Message.BeaconBlockRoot)
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"slot": req.Message.Slot,
|
||||
"builderIndex": req.Message.BuilderIndex,
|
||||
"beaconBlockRoot": fmt.Sprintf("%#x", beaconBlockRoot[:8]),
|
||||
})
|
||||
log.Info("Publishing signed execution payload envelope")
|
||||
|
||||
// Wait for the associated beacon block to be available.
|
||||
// The block may come from local production or P2P gossip.
|
||||
if err := vs.waitForBeaconBlock(ctx, beaconBlockRoot); err != nil {
|
||||
return nil, status.Errorf(codes.FailedPrecondition,
|
||||
"beacon block %#x not available: %v", beaconBlockRoot[:8], err)
|
||||
}
|
||||
|
||||
// TODO: Validate envelope signature before broadcasting
|
||||
// if err := vs.validateEnvelopeSignature(ctx, req); err != nil {
|
||||
// return nil, status.Errorf(codes.InvalidArgument, "invalid envelope signature: %v", err)
|
||||
// }
|
||||
|
||||
// Build data column sidecars from the cached blobs bundle before broadcasting.
|
||||
// In GLOAS, blob data is delivered alongside the execution payload envelope
|
||||
// rather than with the beacon block (which only carries the bid).
|
||||
dataColumnSidecars, err := vs.buildEnvelopeDataColumns(ctx, req.Message, beaconBlockRoot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to build data column sidecars: %v", err)
|
||||
}
|
||||
|
||||
// Broadcast envelope and data column sidecars concurrently.
|
||||
eg, eCtx := errgroup.WithContext(ctx)
|
||||
eg.Go(func() error {
|
||||
if err := vs.P2P.Broadcast(eCtx, req); err != nil {
|
||||
return errors.Wrap(err, "broadcast signed execution payload envelope")
|
||||
}
|
||||
// TODO: Receive the envelope locally following the broadcastReceiveBlock pattern.
|
||||
// This requires:
|
||||
// 1. blocks.WrappedROSignedExecutionPayloadEnvelope wrapper
|
||||
// 2. BlockReceiver.ReceiveExecutionPayloadEnvelope method
|
||||
// See epbs branch's receive_execution_payload_envelope.go for reference.
|
||||
return nil
|
||||
})
|
||||
if len(dataColumnSidecars) > 0 {
|
||||
eg.Go(func() error {
|
||||
return vs.broadcastAndReceiveDataColumns(eCtx, dataColumnSidecars)
|
||||
})
|
||||
}
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to publish execution payload envelope: %v", err)
|
||||
}
|
||||
|
||||
log.Info("Successfully published execution payload envelope")
|
||||
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
|
||||
// waitForBeaconBlock waits for the beacon block with the given root to be available.
|
||||
// It first checks if the block already exists, then subscribes to block notifications
|
||||
// and polls periodically until the block arrives or the timeout is reached.
|
||||
func (vs *Server) waitForBeaconBlock(ctx context.Context, blockRoot [32]byte) error {
|
||||
// Fast path: check if block already exists
|
||||
if vs.BlockReceiver.HasBlock(ctx, blockRoot) {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot[:8])).
|
||||
Debug("Waiting for beacon block to arrive")
|
||||
|
||||
waitCtx, cancel := context.WithTimeout(ctx, envelopeBlockWaitTimeout)
|
||||
defer cancel()
|
||||
|
||||
blocksChan := make(chan *feed.Event, 1)
|
||||
blockSub := vs.BlockNotifier.BlockFeed().Subscribe(blocksChan)
|
||||
defer blockSub.Unsubscribe()
|
||||
|
||||
ticker := time.NewTicker(envelopeBlockPollInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-waitCtx.Done():
|
||||
return errors.Wrap(waitCtx.Err(), "timeout waiting for beacon block")
|
||||
|
||||
case blockEvent := <-blocksChan:
|
||||
if blockEvent.Type == blockfeed.ReceivedBlock {
|
||||
data, ok := blockEvent.Data.(*blockfeed.ReceivedBlockData)
|
||||
if ok && data != nil && data.SignedBlock != nil {
|
||||
root, err := data.SignedBlock.Block().HashTreeRoot()
|
||||
if err == nil && root == blockRoot {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case <-ticker.C:
|
||||
if vs.BlockReceiver.HasBlock(ctx, blockRoot) {
|
||||
return nil
|
||||
}
|
||||
|
||||
case <-blockSub.Err():
|
||||
return errors.New("block subscription closed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// buildEnvelopeDataColumns retrieves the cached blobs bundle for the envelope's
|
||||
// slot/builder and builds data column sidecars. Returns nil if no blobs to broadcast.
|
||||
func (vs *Server) buildEnvelopeDataColumns(
|
||||
ctx context.Context,
|
||||
envelope *ethpb.ExecutionPayloadEnvelope,
|
||||
blockRoot [32]byte,
|
||||
) ([]consensusblocks.RODataColumn, error) {
|
||||
if vs.ExecutionPayloadEnvelopeCache == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
blobsBundle, found := vs.ExecutionPayloadEnvelopeCache.GetBlobsBundle(envelope.Slot, envelope.BuilderIndex)
|
||||
if !found || blobsBundle == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
blobs := blobsBundle.GetBlobs()
|
||||
proofs := blobsBundle.GetProofs()
|
||||
commitments := envelope.BlobKzgCommitments
|
||||
if len(blobs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Retrieve the beacon block to build the signed block header for sidecars.
|
||||
blk, err := vs.BeaconDB.Block(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get block for data column sidecars")
|
||||
}
|
||||
if blk == nil {
|
||||
return nil, errors.New("block not found for data column sidecars")
|
||||
}
|
||||
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(blk, blockRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create ROBlock")
|
||||
}
|
||||
|
||||
return buildDataColumnSidecars(blobs, proofs, peerdas.PopulateFromEnvelope(roBlock, commitments))
|
||||
}
|
||||
@@ -1,104 +0,0 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
consensusblocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls/common"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestSetGloasExecutionData(t *testing.T) {
|
||||
parentRoot := [32]byte{1, 2, 3}
|
||||
slot := primitives.Slot(100)
|
||||
proposerIndex := primitives.ValidatorIndex(42)
|
||||
|
||||
sBlk, err := consensusblocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: slot,
|
||||
ProposerIndex: proposerIndex,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyGloas{},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
payload := &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
ExtraData: make([]byte, 0),
|
||||
}
|
||||
ed, err := consensusblocks.WrappedExecutionPayloadDeneb(payload)
|
||||
require.NoError(t, err)
|
||||
|
||||
local := &consensusblocks.GetPayloadResponse{
|
||||
ExecutionData: ed,
|
||||
Bid: primitives.ZeroWei(),
|
||||
BlobsBundler: nil,
|
||||
ExecutionRequests: &enginev1.ExecutionRequests{},
|
||||
}
|
||||
|
||||
envelopeCache := cache.NewExecutionPayloadEnvelopeCache()
|
||||
vs := &Server{
|
||||
ExecutionPayloadEnvelopeCache: envelopeCache,
|
||||
}
|
||||
|
||||
err = vs.setGloasExecutionData(t.Context(), sBlk, local)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the signed bid was set on the block.
|
||||
signedBid, err := sBlk.Block().Body().SignedExecutionPayloadBid()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, signedBid)
|
||||
require.NotNil(t, signedBid.Message)
|
||||
|
||||
// Per spec (process_execution_payload_bid): for self-builds,
|
||||
// signature must be G2 point-at-infinity.
|
||||
require.DeepEqual(t, common.InfiniteSignature[:], signedBid.Signature)
|
||||
|
||||
// Verify bid fields.
|
||||
bid := signedBid.Message
|
||||
require.Equal(t, slot, bid.Slot)
|
||||
require.Equal(t, primitives.BuilderIndex(proposerIndex), bid.BuilderIndex)
|
||||
require.DeepEqual(t, parentRoot[:], bid.ParentBlockRoot)
|
||||
require.Equal(t, uint64(0), bid.Value)
|
||||
require.Equal(t, uint64(0), bid.ExecutionPayment)
|
||||
|
||||
// Verify the envelope was cached.
|
||||
envelope, found := envelopeCache.Get(slot, primitives.BuilderIndex(proposerIndex))
|
||||
require.Equal(t, true, found)
|
||||
require.NotNil(t, envelope)
|
||||
require.Equal(t, slot, envelope.Slot)
|
||||
require.Equal(t, primitives.BuilderIndex(proposerIndex), envelope.BuilderIndex)
|
||||
}
|
||||
|
||||
func TestSetGloasExecutionData_NilPayload(t *testing.T) {
|
||||
sBlk, err := consensusblocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: 1,
|
||||
ParentRoot: make([]byte, 32),
|
||||
Body: ðpb.BeaconBlockBodyGloas{},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
vs := &Server{
|
||||
ExecutionPayloadEnvelopeCache: cache.NewExecutionPayloadEnvelopeCache(),
|
||||
}
|
||||
|
||||
err = vs.setGloasExecutionData(t.Context(), sBlk, nil)
|
||||
require.ErrorContains(t, "local execution payload is nil", err)
|
||||
|
||||
err = vs.setGloasExecutionData(t.Context(), sBlk, &consensusblocks.GetPayloadResponse{})
|
||||
require.ErrorContains(t, "local execution payload is nil", err)
|
||||
}
|
||||
@@ -44,46 +44,45 @@ import (
|
||||
// and committees in which particular validators need to perform their responsibilities,
|
||||
// and more.
|
||||
type Server struct {
|
||||
Ctx context.Context
|
||||
PayloadIDCache *cache.PayloadIDCache
|
||||
TrackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
ExecutionPayloadEnvelopeCache *cache.ExecutionPayloadEnvelopeCache // GLOAS: Cache for execution payload envelopes
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
ForkFetcher blockchain.ForkFetcher
|
||||
ForkchoiceFetcher blockchain.ForkchoiceFetcher
|
||||
GenesisFetcher blockchain.GenesisFetcher
|
||||
FinalizationFetcher blockchain.FinalizationFetcher
|
||||
TimeFetcher blockchain.TimeFetcher
|
||||
BlockFetcher execution.POWBlockFetcher
|
||||
DepositFetcher cache.DepositFetcher
|
||||
ChainStartFetcher execution.ChainStartFetcher
|
||||
Eth1InfoFetcher execution.ChainInfoFetcher
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
SyncChecker sync.Checker
|
||||
StateNotifier statefeed.Notifier
|
||||
BlockNotifier blockfeed.Notifier
|
||||
P2P p2p.Broadcaster
|
||||
AttestationCache *cache.AttestationCache
|
||||
AttPool attestations.Pool
|
||||
SlashingsPool slashings.PoolManager
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlobReceiver blockchain.BlobReceiver
|
||||
DataColumnReceiver blockchain.DataColumnReceiver
|
||||
MockEth1Votes bool
|
||||
Eth1BlockFetcher execution.POWBlockFetcher
|
||||
PendingDepositsFetcher depositsnapshot.PendingDepositsFetcher
|
||||
OperationNotifier opfeed.Notifier
|
||||
StateGen stategen.StateManager
|
||||
ReplayerBuilder stategen.ReplayerBuilder
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
ExecutionEngineCaller execution.EngineCaller
|
||||
BlockBuilder builder.BlockBuilder
|
||||
BLSChangesPool blstoexec.PoolManager
|
||||
ClockWaiter startup.ClockWaiter
|
||||
CoreService *core.Service
|
||||
AttestationStateFetcher blockchain.AttestationStateFetcher
|
||||
Ctx context.Context
|
||||
PayloadIDCache *cache.PayloadIDCache
|
||||
TrackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
ForkFetcher blockchain.ForkFetcher
|
||||
ForkchoiceFetcher blockchain.ForkchoiceFetcher
|
||||
GenesisFetcher blockchain.GenesisFetcher
|
||||
FinalizationFetcher blockchain.FinalizationFetcher
|
||||
TimeFetcher blockchain.TimeFetcher
|
||||
BlockFetcher execution.POWBlockFetcher
|
||||
DepositFetcher cache.DepositFetcher
|
||||
ChainStartFetcher execution.ChainStartFetcher
|
||||
Eth1InfoFetcher execution.ChainInfoFetcher
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
SyncChecker sync.Checker
|
||||
StateNotifier statefeed.Notifier
|
||||
BlockNotifier blockfeed.Notifier
|
||||
P2P p2p.Broadcaster
|
||||
AttestationCache *cache.AttestationCache
|
||||
AttPool attestations.Pool
|
||||
SlashingsPool slashings.PoolManager
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlobReceiver blockchain.BlobReceiver
|
||||
DataColumnReceiver blockchain.DataColumnReceiver
|
||||
MockEth1Votes bool
|
||||
Eth1BlockFetcher execution.POWBlockFetcher
|
||||
PendingDepositsFetcher depositsnapshot.PendingDepositsFetcher
|
||||
OperationNotifier opfeed.Notifier
|
||||
StateGen stategen.StateManager
|
||||
ReplayerBuilder stategen.ReplayerBuilder
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
ExecutionEngineCaller execution.EngineCaller
|
||||
BlockBuilder builder.BlockBuilder
|
||||
BLSChangesPool blstoexec.PoolManager
|
||||
ClockWaiter startup.ClockWaiter
|
||||
CoreService *core.Service
|
||||
AttestationStateFetcher blockchain.AttestationStateFetcher
|
||||
}
|
||||
|
||||
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
|
||||
|
||||
@@ -46,20 +46,14 @@ func (b *BeaconState) BuilderPubkey(builderIndex primitives.BuilderIndex) ([fiel
|
||||
}
|
||||
|
||||
// IsActiveBuilder returns true if the builder placement is finalized and it has not initiated exit.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// def is_active_builder(state: BeaconState, builder_index: BuilderIndex) -> bool:
|
||||
//
|
||||
// <spec fn="is_active_builder" fork="gloas" hash="1a599fb2">
|
||||
// def is_active_builder(state: BeaconState, builder_index: BuilderIndex) -> bool:
|
||||
// """
|
||||
// Check if the builder at ``builder_index`` is active for the given ``state``.
|
||||
// """
|
||||
// builder = state.builders[builder_index]
|
||||
// return (
|
||||
// # Placement in builder list is finalized
|
||||
// builder.deposit_epoch < state.finalized_checkpoint.epoch
|
||||
// # Has not initiated exit
|
||||
// and builder.withdrawable_epoch == FAR_FUTURE_EPOCH
|
||||
// )
|
||||
// </spec>
|
||||
// builder = state.builders[builder_index]
|
||||
// return (
|
||||
// builder.deposit_epoch < state.finalized_checkpoint.epoch
|
||||
// and builder.withdrawable_epoch == FAR_FUTURE_EPOCH
|
||||
// )
|
||||
func (b *BeaconState) IsActiveBuilder(builderIndex primitives.BuilderIndex) (bool, error) {
|
||||
if b.version < version.Gloas {
|
||||
return false, errNotSupported("IsActiveBuilder", b.version)
|
||||
@@ -78,18 +72,15 @@ func (b *BeaconState) IsActiveBuilder(builderIndex primitives.BuilderIndex) (boo
|
||||
}
|
||||
|
||||
// CanBuilderCoverBid returns true if the builder has enough balance to cover the given bid amount.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// def can_builder_cover_bid(state: BeaconState, builder_index: BuilderIndex, bid_amount: Gwei) -> bool:
|
||||
//
|
||||
// <spec fn="can_builder_cover_bid" fork="gloas" hash="9e3f2d7c">
|
||||
// def can_builder_cover_bid(
|
||||
// state: BeaconState, builder_index: BuilderIndex, bid_amount: Gwei
|
||||
// ) -> bool:
|
||||
// builder_balance = state.builders[builder_index].balance
|
||||
// pending_withdrawals_amount = get_pending_balance_to_withdraw_for_builder(state, builder_index)
|
||||
// min_balance = MIN_DEPOSIT_AMOUNT + pending_withdrawals_amount
|
||||
// if builder_balance < min_balance:
|
||||
// return False
|
||||
// return builder_balance - min_balance >= bid_amount
|
||||
// </spec>
|
||||
// builder_balance = state.builders[builder_index].balance
|
||||
// pending_withdrawals_amount = get_pending_balance_to_withdraw_for_builder(state, builder_index)
|
||||
// min_balance = MIN_DEPOSIT_AMOUNT + pending_withdrawals_amount
|
||||
// if builder_balance < min_balance:
|
||||
// return False
|
||||
// return builder_balance - min_balance >= bid_amount
|
||||
func (b *BeaconState) CanBuilderCoverBid(builderIndex primitives.BuilderIndex, bidAmount primitives.Gwei) (bool, error) {
|
||||
if b.version < version.Gloas {
|
||||
return false, errNotSupported("CanBuilderCoverBid", b.version)
|
||||
|
||||
@@ -58,6 +58,7 @@ go_library(
|
||||
"validate_bls_to_execution_change.go",
|
||||
"validate_data_column.go",
|
||||
"validate_light_client.go",
|
||||
"validate_partial_header.go",
|
||||
"validate_proposer_slashing.go",
|
||||
"validate_sync_committee_message.go",
|
||||
"validate_sync_contribution_proof.go",
|
||||
@@ -98,6 +99,7 @@ go_library(
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/partialdatacolumnbroadcaster:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/slasher/types:go_default_library",
|
||||
|
||||
@@ -2,8 +2,10 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"iter"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
|
||||
@@ -19,9 +21,16 @@ type signatureVerifier struct {
|
||||
resChan chan error
|
||||
}
|
||||
|
||||
type errorWithSegment struct {
|
||||
err error
|
||||
// segment is only available if the batched verification failed
|
||||
segment peerdas.CellProofBundleSegment
|
||||
}
|
||||
|
||||
type kzgVerifier struct {
|
||||
dataColumns []blocks.RODataColumn
|
||||
resChan chan error
|
||||
sizeHint int
|
||||
cellProofs iter.Seq[blocks.CellProofBundle]
|
||||
resChan chan errorWithSegment
|
||||
}
|
||||
|
||||
// A routine that runs in the background to perform batch
|
||||
|
||||
@@ -256,6 +256,16 @@ var (
|
||||
Help: "Count the number of data column sidecars obtained via the execution layer.",
|
||||
},
|
||||
)
|
||||
|
||||
usefulFullColumnsReceivedTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "beacon_useful_full_columns_received_total",
|
||||
Help: "Number of useful full columns (any cell being useful) received",
|
||||
}, []string{"column_index"})
|
||||
|
||||
partialMessageColumnCompletionsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "beacon_partial_message_column_completions_total",
|
||||
Help: "How often the partial message first completed the column",
|
||||
}, []string{"column_index"})
|
||||
)
|
||||
|
||||
func (s *Service) updateMetrics() {
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -14,11 +16,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
@@ -61,6 +65,15 @@ type subscribeParameters struct {
|
||||
// getSubnetsRequiringPeers is a function that returns all subnets that require peers to be found
|
||||
// but for which no subscriptions are needed.
|
||||
getSubnetsRequiringPeers func(currentSlot primitives.Slot) map[uint64]bool
|
||||
|
||||
partial *partialSubscribeParameters
|
||||
}
|
||||
|
||||
type partialSubscribeParameters struct {
|
||||
broadcaster *partialdatacolumnbroadcaster.PartialColumnBroadcaster
|
||||
validateHeader partialdatacolumnbroadcaster.HeaderValidator
|
||||
validate partialdatacolumnbroadcaster.ColumnValidator
|
||||
handle partialdatacolumnbroadcaster.SubHandler
|
||||
}
|
||||
|
||||
// shortTopic is a less verbose version of topic strings used for logging.
|
||||
@@ -320,6 +333,35 @@ func (s *Service) registerSubscribers(nse params.NetworkScheduleEntry) bool {
|
||||
// New gossip topic in Fulu.
|
||||
if params.BeaconConfig().FuluForkEpoch <= nse.Epoch {
|
||||
s.spawn(func() {
|
||||
var ps *partialSubscribeParameters
|
||||
broadcaster := s.cfg.p2p.PartialColumnBroadcaster()
|
||||
if broadcaster != nil {
|
||||
ps = &partialSubscribeParameters{
|
||||
broadcaster: broadcaster,
|
||||
validateHeader: func(header *ethpb.PartialDataColumnHeader) (bool, error) {
|
||||
return s.validatePartialDataColumnHeader(context.TODO(), header)
|
||||
},
|
||||
validate: func(cellsToVerify []blocks.CellProofBundle) error {
|
||||
return peerdas.VerifyDataColumnsCellsKZGProofs(len(cellsToVerify), slices.Values(cellsToVerify))
|
||||
},
|
||||
handle: func(topic string, col blocks.VerifiedRODataColumn) {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, pubsubMessageTimeout)
|
||||
defer cancel()
|
||||
|
||||
slot := col.SignedBlockHeader.Header.Slot
|
||||
proposerIndex := col.SignedBlockHeader.Header.ProposerIndex
|
||||
if !s.hasSeenDataColumnIndex(slot, proposerIndex, col.Index) {
|
||||
s.setSeenDataColumnIndex(slot, proposerIndex, col.Index)
|
||||
// This column was completed from a partial message.
|
||||
partialMessageColumnCompletionsTotal.WithLabelValues(strconv.FormatUint(col.Index, 10)).Inc()
|
||||
}
|
||||
err := s.verifiedRODataColumnSubscriber(ctx, col)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to handle verified RO data column subscriber")
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.DataColumnSubnetTopicFormat,
|
||||
validate: s.validateDataColumn,
|
||||
@@ -327,6 +369,7 @@ func (s *Service) registerSubscribers(nse params.NetworkScheduleEntry) bool {
|
||||
nse: nse,
|
||||
getSubnetsToJoin: s.dataColumnSubnetIndices,
|
||||
getSubnetsRequiringPeers: s.allDataColumnSubnets,
|
||||
partial: ps,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -365,11 +408,10 @@ func (s *Service) subscribe(topic string, validator wrappedVal, handle subHandle
|
||||
// Impossible condition as it would mean topic does not exist.
|
||||
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topic)) // lint:nopanic -- Impossible condition.
|
||||
}
|
||||
s.subscribeWithBase(s.addDigestToTopic(topic, nse.ForkDigest), validator, handle)
|
||||
s.subscribeWithBase(s.addDigestToTopic(topic, nse.ForkDigest)+s.cfg.p2p.Encoding().ProtocolSuffix(), validator, handle)
|
||||
}
|
||||
|
||||
func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle subHandler) *pubsub.Subscription {
|
||||
topic += s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
log := log.WithField("topic", topic)
|
||||
|
||||
// Do not resubscribe already seen subscriptions.
|
||||
@@ -532,7 +574,11 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
|
||||
func (s *Service) pruneNotWanted(t *subnetTracker, wantedSubnets map[uint64]bool) {
|
||||
for _, subnet := range t.unwanted(wantedSubnets) {
|
||||
t.cancelSubscription(subnet)
|
||||
s.unSubscribeFromTopic(t.fullTopic(subnet, s.cfg.p2p.Encoding().ProtocolSuffix()))
|
||||
topic := t.fullTopic(subnet, s.cfg.p2p.Encoding().ProtocolSuffix())
|
||||
if t.partial != nil {
|
||||
_ = t.partial.broadcaster.Unsubscribe(topic)
|
||||
}
|
||||
s.unSubscribeFromTopic(topic)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -579,9 +625,34 @@ func (s *Service) trySubscribeSubnets(t *subnetTracker) {
|
||||
subnetsToJoin := t.getSubnetsToJoin(s.cfg.clock.CurrentSlot())
|
||||
s.pruneNotWanted(t, subnetsToJoin)
|
||||
for _, subnet := range t.missing(subnetsToJoin) {
|
||||
// TODO: subscribeWithBase appends the protocol suffix, other methods don't. Make this consistent.
|
||||
topic := t.fullTopic(subnet, "")
|
||||
t.track(subnet, s.subscribeWithBase(topic, t.validate, t.handle))
|
||||
topicStr := t.fullTopic(subnet, s.cfg.p2p.Encoding().ProtocolSuffix())
|
||||
topicOpts := make([]pubsub.TopicOpt, 0, 2)
|
||||
|
||||
requestPartial := t.partial != nil
|
||||
|
||||
if requestPartial {
|
||||
// TODO: do we want the ability to support partial messages without requesting them?
|
||||
topicOpts = append(topicOpts, pubsub.RequestPartialMessages())
|
||||
}
|
||||
|
||||
topic, err := s.cfg.p2p.JoinTopic(topicStr, topicOpts...)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to join topic")
|
||||
return
|
||||
}
|
||||
|
||||
if requestPartial {
|
||||
log.Info("Subscribing to partial columns on", topicStr)
|
||||
err = t.partial.broadcaster.Subscribe(topic, t.partial.validateHeader, t.partial.validate, t.partial.handle)
|
||||
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to subscribe to partial column")
|
||||
}
|
||||
}
|
||||
|
||||
// We still need to subscribe to the full columns as well as partial in
|
||||
// case our peers don't support partial messages.
|
||||
t.track(subnet, s.subscribeWithBase(topicStr, t.validate, t.handle))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition/interop"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
@@ -201,6 +202,16 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
return nil, errors.Wrap(err, "column indices to sample")
|
||||
}
|
||||
|
||||
// TODO: the deadline here was removed in https://github.com/OffchainLabs/prysm/pull/16155/files
|
||||
// make sure that reintroducing it does not cause issues.
|
||||
secondsPerHalfSlot := time.Duration(params.BeaconConfig().SecondsPerSlot/2) * time.Second
|
||||
ctx, cancel := context.WithTimeout(ctx, secondsPerHalfSlot)
|
||||
defer cancel()
|
||||
|
||||
digest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", source.Root()),
|
||||
"slot": source.Slot(),
|
||||
@@ -231,11 +242,30 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
}
|
||||
|
||||
// Try to reconstruct data column constructedSidecars from the execution client.
|
||||
constructedSidecars, err := s.cfg.executionReconstructor.ConstructDataColumnSidecars(ctx, source)
|
||||
constructedSidecars, partialColumns, err := s.cfg.executionReconstructor.ConstructDataColumnSidecars(ctx, source)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "reconstruct data column sidecars")
|
||||
}
|
||||
|
||||
partialBroadcaster := s.cfg.p2p.PartialColumnBroadcaster()
|
||||
if partialBroadcaster != nil {
|
||||
log.WithField("len(partialColumns)", len(partialColumns)).Debug("Publishing partial columns")
|
||||
for i := range uint64(len(partialColumns)) {
|
||||
if !columnIndicesToSample[i] {
|
||||
continue
|
||||
}
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(i)
|
||||
topic := fmt.Sprintf(p2p.DataColumnSubnetTopicFormat, digest, subnet) + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
// Publish the partial column. This is idempotent if we republish the same data twice.
|
||||
// Note, the "partial column" may indeed be complete. We still
|
||||
// should publish to help our peers.
|
||||
err = partialBroadcaster.Publish(topic, partialColumns[i])
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Failed to publish partial column")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// No sidecars are retrieved from the EL, retry later
|
||||
constructedCount := uint64(len(constructedSidecars))
|
||||
|
||||
@@ -307,7 +337,7 @@ func (s *Service) broadcastAndReceiveUnseenDataColumnSidecars(
|
||||
}
|
||||
|
||||
// Broadcast all the data column sidecars we reconstructed but did not see via gossip (non blocking).
|
||||
if err := s.cfg.p2p.BroadcastDataColumnSidecars(ctx, unseenSidecars); err != nil {
|
||||
if err := s.cfg.p2p.BroadcastDataColumnSidecars(ctx, unseenSidecars, nil); err != nil {
|
||||
return nil, errors.Wrap(err, "broadcast data column sidecars")
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package sync
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
||||
opfeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
@@ -24,6 +25,13 @@ func (s *Service) dataColumnSubscriber(ctx context.Context, msg proto.Message) e
|
||||
return fmt.Errorf("message was not type blocks.VerifiedRODataColumn, type=%T", msg)
|
||||
}
|
||||
|
||||
// Track useful full columns received via gossip (not previously seen)
|
||||
slot := sidecar.SignedBlockHeader.Header.Slot
|
||||
proposerIndex := sidecar.SignedBlockHeader.Header.ProposerIndex
|
||||
if !s.hasSeenDataColumnIndex(slot, proposerIndex, sidecar.Index) {
|
||||
usefulFullColumnsReceivedTotal.WithLabelValues(strconv.FormatUint(sidecar.Index, 10)).Inc()
|
||||
}
|
||||
|
||||
if err := s.receiveDataColumnSidecar(ctx, sidecar); err != nil {
|
||||
return wrapDataColumnError(sidecar, "receive data column sidecar", err)
|
||||
}
|
||||
@@ -57,6 +65,38 @@ func (s *Service) dataColumnSubscriber(ctx context.Context, msg proto.Message) e
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) verifiedRODataColumnSubscriber(ctx context.Context, sidecar blocks.VerifiedRODataColumn) error {
|
||||
log.WithField("slot", sidecar.Slot()).WithField("column", sidecar.Index).Info("Received data column sidecar")
|
||||
|
||||
if err := s.receiveDataColumnSidecar(ctx, sidecar); err != nil {
|
||||
return errors.Wrap(err, "receive data column sidecar")
|
||||
}
|
||||
|
||||
var wg errgroup.Group
|
||||
wg.Go(func() error {
|
||||
if err := s.processDataColumnSidecarsFromReconstruction(ctx, sidecar); err != nil {
|
||||
return errors.Wrap(err, "process data column sidecars from reconstruction")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
wg.Go(func() error {
|
||||
// Broadcast our complete column for peers that don't use partial messages
|
||||
if err := s.cfg.p2p.BroadcastDataColumnSidecars(ctx, []blocks.VerifiedRODataColumn{sidecar}, nil); err != nil {
|
||||
return errors.Wrap(err, "process data column sidecars from execution")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// receiveDataColumnSidecar receives a single data column sidecar: marks it as seen and saves it to the chain.
|
||||
// Do not loop over this function to receive multiple sidecars, use receiveDataColumnSidecars instead.
|
||||
func (s *Service) receiveDataColumnSidecar(ctx context.Context, sidecar blocks.VerifiedRODataColumn) error {
|
||||
|
||||
@@ -71,6 +71,7 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
roDataColumns := []blocks.RODataColumn{roDataColumn}
|
||||
|
||||
// Create the verifier.
|
||||
// Question(marco): Do we want the multiple columns verifier? Is batching used only for kzg proofs?
|
||||
verifier := s.newColumnsVerifier(roDataColumns, verification.GossipDataColumnSidecarRequirements)
|
||||
|
||||
// Start the verification process.
|
||||
|
||||
143
beacon-chain/sync/validate_partial_header.go
Normal file
143
beacon-chain/sync/validate_partial_header.go
Normal file
@@ -0,0 +1,143 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// REJECT errors - peer should be penalized
|
||||
errHeaderEmptyCommitments = errors.New("header has no kzg commitments")
|
||||
errHeaderParentInvalid = errors.New("header parent invalid")
|
||||
errHeaderSlotNotAfterParent = errors.New("header slot not after parent")
|
||||
errHeaderNotFinalizedDescendant = errors.New("header not finalized descendant")
|
||||
errHeaderInvalidInclusionProof = errors.New("invalid inclusion proof")
|
||||
errHeaderInvalidSignature = errors.New("invalid proposer signature")
|
||||
errHeaderUnexpectedProposer = errors.New("unexpected proposer index")
|
||||
|
||||
// IGNORE errors - don't penalize peer
|
||||
errHeaderNil = errors.New("nil header")
|
||||
errHeaderFromFuture = errors.New("header is from future slot")
|
||||
errHeaderNotAboveFinalized = errors.New("header slot not above finalized")
|
||||
errHeaderParentNotSeen = errors.New("header parent not seen")
|
||||
)
|
||||
|
||||
// validatePartialDataColumnHeader validates a PartialDataColumnHeader per the consensus spec.
|
||||
// Returns (reject, err) where reject=true means the peer should be penalized.
|
||||
// TODO: we should consolidate this with the existing DataColumn validation pipeline.
|
||||
func (s *Service) validatePartialDataColumnHeader(ctx context.Context, header *ethpb.PartialDataColumnHeader) (reject bool, err error) {
|
||||
if header == nil || header.SignedBlockHeader == nil || header.SignedBlockHeader.Header == nil {
|
||||
return false, errHeaderNil // IGNORE
|
||||
}
|
||||
|
||||
blockHeader := header.SignedBlockHeader.Header
|
||||
headerSlot := blockHeader.Slot
|
||||
parentRoot := bytesutil.ToBytes32(blockHeader.ParentRoot)
|
||||
|
||||
// [REJECT] kzg_commitments list is non-empty
|
||||
if len(header.KzgCommitments) == 0 {
|
||||
return true, errHeaderEmptyCommitments
|
||||
}
|
||||
|
||||
// [IGNORE] Not from future slot (with MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance)
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
if headerSlot > currentSlot {
|
||||
maxDisparity := params.BeaconConfig().MaximumGossipClockDisparityDuration()
|
||||
slotStart, err := s.cfg.clock.SlotStart(headerSlot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if s.cfg.clock.Now().Before(slotStart.Add(-maxDisparity)) {
|
||||
return false, errHeaderFromFuture // IGNORE
|
||||
}
|
||||
}
|
||||
|
||||
// [IGNORE] Slot above finalized
|
||||
finalizedCheckpoint := s.cfg.chain.FinalizedCheckpt()
|
||||
startSlot, err := slots.EpochStart(finalizedCheckpoint.Epoch)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if headerSlot <= startSlot {
|
||||
return false, errHeaderNotAboveFinalized // IGNORE
|
||||
}
|
||||
|
||||
// [IGNORE] Parent has been seen
|
||||
if !s.cfg.chain.HasBlock(ctx, parentRoot) {
|
||||
return false, errHeaderParentNotSeen // IGNORE
|
||||
}
|
||||
|
||||
// [REJECT] Parent passes validation (not a bad block)
|
||||
if s.hasBadBlock(parentRoot) {
|
||||
return true, errHeaderParentInvalid
|
||||
}
|
||||
|
||||
// [REJECT] Header slot > parent slot
|
||||
parentSlot, err := s.cfg.chain.RecentBlockSlot(parentRoot)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "get parent slot")
|
||||
}
|
||||
if headerSlot <= parentSlot {
|
||||
return true, errHeaderSlotNotAfterParent
|
||||
}
|
||||
|
||||
// [REJECT] Finalized checkpoint is ancestor (parent is in forkchoice)
|
||||
if !s.cfg.chain.InForkchoice(parentRoot) {
|
||||
return true, errHeaderNotFinalizedDescendant
|
||||
}
|
||||
|
||||
// [REJECT] Inclusion proof valid
|
||||
if err := peerdas.VerifyPartialDataColumnHeaderInclusionProof(header); err != nil {
|
||||
return true, errHeaderInvalidInclusionProof
|
||||
}
|
||||
|
||||
// [REJECT] Valid proposer signature
|
||||
parentState, err := s.cfg.stateGen.StateByRoot(ctx, parentRoot)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "get parent state")
|
||||
}
|
||||
|
||||
proposerIdx := blockHeader.ProposerIndex
|
||||
proposer, err := parentState.ValidatorAtIndex(proposerIdx)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "get proposer")
|
||||
}
|
||||
|
||||
domain, err := signing.Domain(
|
||||
parentState.Fork(),
|
||||
slots.ToEpoch(headerSlot),
|
||||
params.BeaconConfig().DomainBeaconProposer,
|
||||
parentState.GenesisValidatorsRoot(),
|
||||
)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "get domain")
|
||||
}
|
||||
|
||||
if err := signing.VerifyBlockHeaderSigningRoot(
|
||||
blockHeader,
|
||||
proposer.PublicKey,
|
||||
header.SignedBlockHeader.Signature,
|
||||
domain,
|
||||
); err != nil {
|
||||
return true, errHeaderInvalidSignature
|
||||
}
|
||||
|
||||
// [REJECT] Expected proposer for slot
|
||||
expectedProposer, err := helpers.BeaconProposerIndexAtSlot(ctx, parentState, headerSlot)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "compute expected proposer")
|
||||
}
|
||||
if expectedProposer != proposerIdx {
|
||||
return true, errHeaderUnexpectedProposer
|
||||
}
|
||||
|
||||
return false, nil // Valid header
|
||||
}
|
||||
@@ -78,11 +78,21 @@ func (ini *Initializer) NewBlobVerifier(b blocks.ROBlob, reqs []Requirement) *RO
|
||||
// WARNING: The returned verifier is not thread-safe, and should not be used concurrently.
|
||||
func (ini *Initializer) NewDataColumnsVerifier(roDataColumns []blocks.RODataColumn, reqs []Requirement) *RODataColumnsVerifier {
|
||||
return &RODataColumnsVerifier{
|
||||
sharedResources: ini.shared,
|
||||
dataColumns: roDataColumns,
|
||||
results: newResults(reqs...),
|
||||
verifyDataColumnsCommitment: peerdas.VerifyDataColumnsSidecarKZGProofs,
|
||||
stateByRoot: make(map[[fieldparams.RootLength]byte]state.BeaconState),
|
||||
sharedResources: ini.shared,
|
||||
dataColumns: roDataColumns,
|
||||
results: newResults(reqs...),
|
||||
verifyDataColumnsCommitment: func(rc []blocks.RODataColumn) error {
|
||||
if len(rc) == 0 {
|
||||
return nil
|
||||
}
|
||||
var sizeHint int
|
||||
if len(rc) > 0 {
|
||||
sizeHint = len(rc[0].Column)
|
||||
}
|
||||
sizeHint *= len(rc)
|
||||
return peerdas.VerifyDataColumnsCellsKZGProofs(sizeHint, blocks.RODataColumnsToCellProofBundles(rc))
|
||||
},
|
||||
stateByRoot: make(map[[fieldparams.RootLength]byte]state.BeaconState),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
### Fixed
|
||||
|
||||
- Fixed a bug where `cmd/beacon-chain/execution` was being ignored by `hack/gen-logs.sh` due to a `.gitignore` rule.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- Fixed the logging issue described in #16314.
|
||||
@@ -1,11 +0,0 @@
|
||||
### Ignored
|
||||
|
||||
- moved finding healthy node logic to connection provider and other various cleanup on naming.
|
||||
|
||||
### Changed
|
||||
|
||||
- Improved node fallback logs.
|
||||
|
||||
### Fixed
|
||||
|
||||
- a potential race condition when switching hosts quickly and reconnecting to same host on an old connection.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- Improved integrations with ethspecify so specrefs can be used throughout the codebase.
|
||||
3
changelog/marco_add_partial_columns.md
Normal file
3
changelog/marco_add_partial_columns.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Add support for partial columns
|
||||
@@ -1,2 +0,0 @@
|
||||
### Ignored
|
||||
- Remove unused `HighestBlockDelay` method in forkchoice.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Ignored
|
||||
- Remove unused method in forkchoice.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Ignored
|
||||
- Remove unused map in forkchoice.
|
||||
@@ -1,9 +1,5 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package execution
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "cmd/beacon-chain/execution")
|
||||
var log = logrus.WithField("prefix", "execution")
|
||||
|
||||
@@ -368,4 +368,9 @@ var (
|
||||
Usage: "Disables the engine_getBlobsV2 usage.",
|
||||
Hidden: true,
|
||||
}
|
||||
// PartialDataColumns specifies the regex for enabling partial messages on datacolumns
|
||||
PartialDataColumns = &cli.BoolFlag{
|
||||
Name: "partial-data-columns",
|
||||
Usage: "Enable cell-level dissemination for PeerDAS data columns",
|
||||
}
|
||||
)
|
||||
|
||||
@@ -162,6 +162,7 @@ var appFlags = []cli.Flag{
|
||||
flags.BatchVerifierLimit,
|
||||
flags.StateDiffExponents,
|
||||
flags.DisableEphemeralLogFile,
|
||||
flags.PartialDataColumns,
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -188,8 +189,8 @@ func before(ctx *cli.Context) error {
|
||||
return errors.Wrap(err, "failed to parse log vmodule")
|
||||
}
|
||||
|
||||
// set the global logging level and data
|
||||
logs.SetLoggingLevelAndData(verbosityLevel, vmodule, maxLevel, ctx.Bool(flags.DisableEphemeralLogFile.Name))
|
||||
// set the global logging level to allow for the highest verbosity requested
|
||||
logs.SetLoggingLevel(max(verbosityLevel, maxLevel))
|
||||
|
||||
format := ctx.String(cmd.LogFormat.Name)
|
||||
switch format {
|
||||
@@ -210,7 +211,6 @@ func before(ctx *cli.Context) error {
|
||||
Formatter: formatter,
|
||||
Writer: os.Stderr,
|
||||
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
|
||||
Identifier: logs.LogTargetUser,
|
||||
})
|
||||
case "fluentd":
|
||||
f := joonix.NewFormatter()
|
||||
|
||||
@@ -75,6 +75,7 @@ var appHelpFlagGroups = []flagGroup{
|
||||
flags.RPCPort,
|
||||
flags.BatchVerifierLimit,
|
||||
flags.StateDiffExponents,
|
||||
flags.PartialDataColumns,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -164,8 +164,8 @@ func main() {
|
||||
return errors.Wrap(err, "failed to parse log vmodule")
|
||||
}
|
||||
|
||||
// set the global logging level and data
|
||||
logs.SetLoggingLevelAndData(verbosityLevel, vmodule, maxLevel, ctx.Bool(flags.DisableEphemeralLogFile.Name))
|
||||
// set the global logging level to allow for the highest verbosity requested
|
||||
logs.SetLoggingLevel(max(maxLevel, verbosityLevel))
|
||||
|
||||
logFileName := ctx.String(cmd.LogFileName.Name)
|
||||
|
||||
@@ -188,7 +188,6 @@ func main() {
|
||||
Formatter: formatter,
|
||||
Writer: os.Stderr,
|
||||
AllowedLevels: logrus.AllLevels[:max(verbosityLevel, maxLevel)+1],
|
||||
Identifier: logs.LogTargetUser,
|
||||
})
|
||||
case "fluentd":
|
||||
f := joonix.NewFormatter()
|
||||
|
||||
@@ -8,6 +8,8 @@ go_library(
|
||||
"get_payload.go",
|
||||
"getters.go",
|
||||
"kzg.go",
|
||||
"log.go",
|
||||
"partialdatacolumn.go",
|
||||
"proofs.go",
|
||||
"proto.go",
|
||||
"roblob.go",
|
||||
@@ -36,8 +38,11 @@ go_library(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/validator-client:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//partialmessages:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -49,6 +54,7 @@ go_test(
|
||||
"factory_test.go",
|
||||
"getters_test.go",
|
||||
"kzg_test.go",
|
||||
"partialdatacolumn_invariants_test.go",
|
||||
"proofs_test.go",
|
||||
"proto_test.go",
|
||||
"roblob_test.go",
|
||||
@@ -74,6 +80,9 @@ go_test(
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//partialmessages:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package fallback
|
||||
package blocks
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "api/fallback")
|
||||
var log = logrus.WithField("package", "consensus-types/blocks")
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user