Compare commits

..

44 Commits

Author SHA1 Message Date
Marco Munizaga
80e0227bac Partial columns addendum (#16327)
see commits for details. best reviewed commit by commit.
2026-02-10 12:07:27 +04:00
Aarsh Shah
b36bc1fe17 Merge branch 'develop' into rebased-partial-columns 2026-02-04 10:29:56 +04:00
Aarsh Shah
a329a77037 fix partial columns broadcast (#16321)
Fix the data column broadcast logic for partial and full columns .
2026-02-04 10:25:53 +04:00
Aarsh Shah
bfd9ff8651 fix lint 2026-02-03 18:10:35 +04:00
Aarsh Shah
79301d4db6 fix bazel 2026-02-03 18:03:26 +04:00
Aarsh Shah
fbfea6f753 fix log files CI error 2026-02-03 17:54:41 +04:00
Aarsh Shah
562ef25527 fix CI 2026-02-03 17:47:40 +04:00
Aarsh Shah
488971f989 fix CI 2026-02-03 17:44:33 +04:00
Aarsh Shah
b129eaaeb8 fix lint 2026-02-03 17:01:45 +04:00
Aarsh Shah
90302adbd2 fix CI 2026-02-03 16:17:36 +04:00
Aarsh Shah
a6262ba07b go mod tidy and partial columns 2026-02-03 15:23:59 +04:00
Aarsh Shah
7d5c8d6964 Merge branch 'develop' into rebased-partial-columns 2026-02-03 10:20:18 +04:00
Marco Munizaga
38cae3f8de add todos from call
this came from a call with Aarsh and Kasey
2026-02-02 11:05:40 -06:00
Marco Munizaga
a5e6d0a3ac Implement gossipsub_mesh_peers metric 2026-02-02 11:05:40 -06:00
Marco Munizaga
c4a308e598 Track number of peers in mesh
along with tracking which peers request partial messages
2026-02-02 11:05:40 -06:00
Marco Munizaga
ba720c1b4b update go-libp2p-pubsub with new tracer
the new tracer interface provides the peer ID in tracer.RecvRPC.
2026-02-02 11:05:40 -06:00
Marco Munizaga
5a3f45f91f add todo 2026-02-02 11:05:40 -06:00
Marco Munizaga
29010edcd1 return valid if there are no datacolumns to validate 2026-02-02 11:05:40 -06:00
Marco Munizaga
045c29ccce cache partial data column header by group ID 2026-02-02 11:05:40 -06:00
Marco Munizaga
0d80bbe44f Add partial message metrics 2026-02-02 11:05:40 -06:00
Marco Munizaga
14f13ed902 more context around errors 2026-02-02 11:05:40 -06:00
Marco Munizaga
308199c2e7 fix test typo 2026-02-02 11:05:40 -06:00
Marco Munizaga
7a04c6b645 eagerly push the partial data column header 2026-02-02 11:05:40 -06:00
Marco Munizaga
93cda45e18 Include the version byte in the group ID 2026-02-02 11:05:40 -06:00
Marco Munizaga
1b4265ef3f add partial data column header 2026-02-02 11:05:40 -06:00
Marco Munizaga
90d1716fd7 Update go-libp2p-pubsub 2026-02-02 11:05:40 -06:00
Marco Munizaga
c4f1a9ac4f add todo 2026-02-02 11:05:40 -06:00
Marco Munizaga
73d948a710 add partial-data-columns flag 2026-02-02 11:05:38 -06:00
Marco Munizaga
5e2985d36b beacon-chain/sync: subscribe to partial columns 2026-02-02 11:04:53 -06:00
Marco Munizaga
7ac3f3cb68 publish partial columns when proposing a block 2026-02-02 11:04:53 -06:00
Marco Munizaga
0a759c3d15 beacon-chain/execution: return partial columns and use getBlobsV3
... if available
2026-02-02 11:04:53 -06:00
Marco Munizaga
bf7ca00780 core/peerdas: Add PartialColumns helper 2026-02-02 11:04:52 -06:00
Marco Munizaga
efcb98bcaa core/peerdas: support partial responses 2026-02-02 11:04:52 -06:00
Marco Munizaga
7b0de5ad0e beacon-chain/p2p: own and start PartialColumnBroadcaster 2026-02-02 11:04:52 -06:00
Marco Munizaga
2e15cd2068 Add metrics for gossipsub message sizes 2026-02-02 11:04:52 -06:00
Marco Munizaga
2e5192e496 Implement PartialColumnBroadcaster 2026-02-02 11:04:52 -06:00
Marco Munizaga
729c54a300 refactor DataColumn Cell KZG Proof verification 2026-02-02 11:04:52 -06:00
Marco Munizaga
4556aa266a avoid needless copy 2026-02-02 11:04:52 -06:00
Marco Munizaga
2f1cac217d Add PartialDataColumn type 2026-02-02 11:04:52 -06:00
Marco Munizaga
6bfc779ea1 proto: Add PartialDataColumnSidecar 2026-02-02 11:04:52 -06:00
Marco Munizaga
90481d6aa8 clone slice in testing util 2026-02-02 11:04:52 -06:00
Marco Munizaga
9f64007dc1 logrusadapter for slog 2026-02-02 11:04:52 -06:00
Marco Munizaga
879ea624ec fix multiaddr comparison 2026-02-02 11:04:52 -06:00
Marco Munizaga
79841f451c deps: update libp2p deps
for partial message support and simnet support
2026-02-02 11:04:52 -06:00
276 changed files with 7959 additions and 10306 deletions

View File

@@ -12,11 +12,11 @@ jobs:
- name: Check version consistency
run: |
WORKSPACE_VERSION=$(grep 'consensus_spec_version = ' WORKSPACE | sed 's/.*"\(.*\)"/\1/')
ETHSPECIFY_VERSION=$(grep '^version:' .ethspecify.yml | sed 's/version: //')
ETHSPECIFY_VERSION=$(grep '^version:' specrefs/.ethspecify.yml | sed 's/version: //')
if [ "$WORKSPACE_VERSION" != "$ETHSPECIFY_VERSION" ]; then
echo "Version mismatch between WORKSPACE and ethspecify"
echo " WORKSPACE: $WORKSPACE_VERSION"
echo " .ethspecify.yml: $ETHSPECIFY_VERSION"
echo " specrefs/.ethspecify.yml: $ETHSPECIFY_VERSION"
exit 1
else
echo "Versions match: $WORKSPACE_VERSION"
@@ -26,7 +26,7 @@ jobs:
run: python3 -mpip install ethspecify
- name: Update spec references
run: ethspecify
run: ethspecify process --path=specrefs
- name: Check for differences
run: |
@@ -40,4 +40,4 @@ jobs:
fi
- name: Check spec references
run: ethspecify check
run: ethspecify check --path=specrefs

View File

@@ -273,16 +273,16 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.7.0-alpha.2"
consensus_spec_version = "v1.7.0-alpha.1"
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
consensus_spec_tests(
name = "consensus_spec_tests",
flavors = {
"general": "sha256-iGQsGZ1cHah+2CSod9jC3kN8Ku4n6KO0hIwfINrn/po=",
"minimal": "sha256-TgcYt8N8sXSttdHTGvOa+exUZ1zn1UzlAMz0V7i37xc=",
"mainnet": "sha256-LnXyiLoJtrvEvbqLDSAAqpLMdN/lXv92SAgYG8fNjCs=",
"general": "sha256-j5R3jA7Oo4OSDMTvpMuD+8RomaCByeFSwtfkq6fL0Zg=",
"minimal": "sha256-tdTqByoyswOS4r6OxFmo70y2BP7w1TgEok+gf4cbxB0=",
"mainnet": "sha256-5gB4dt6SnSDKzdBc06VedId3NkgvSYyv9n9FRxWKwYI=",
},
version = consensus_spec_version,
)
@@ -298,7 +298,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-Y/67Dg393PksZj5rTFNLntiJ6hNdB7Rxbu5gZE2gebY=",
integrity = "sha256-J+43DrK1pF658kTXTwMS6zGf4KDjvas++m8w2a8swpg=",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -1,19 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"fallback.go",
"log.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/api/fallback",
visibility = ["//visibility:public"],
deps = ["@com_github_sirupsen_logrus//:go_default_library"],
)
go_test(
name = "go_default_test",
srcs = ["fallback_test.go"],
embed = [":go_default_library"],
deps = ["//testing/assert:go_default_library"],
)

View File

@@ -1,66 +0,0 @@
package fallback
import (
"context"
"github.com/sirupsen/logrus"
)
// HostProvider is the subset of connection-provider methods that EnsureReady
// needs. Both grpc.GrpcConnectionProvider and rest.RestConnectionProvider
// satisfy this interface.
type HostProvider interface {
Hosts() []string
CurrentHost() string
SwitchHost(index int) error
}
// ReadyChecker can report whether the current endpoint is ready.
// iface.NodeClient satisfies this implicitly.
type ReadyChecker interface {
IsReady(ctx context.Context) bool
}
// EnsureReady iterates through the configured hosts and returns true as soon as
// one responds as ready. It starts from the provider's current host and wraps
// around using modular arithmetic, performing failover when a host is not ready.
func EnsureReady(ctx context.Context, provider HostProvider, checker ReadyChecker) bool {
hosts := provider.Hosts()
numHosts := len(hosts)
startingHost := provider.CurrentHost()
var attemptedHosts []string
// Find current index
currentIdx := 0
for i, h := range hosts {
if h == startingHost {
currentIdx = i
break
}
}
for i := range numHosts {
if checker.IsReady(ctx) {
if len(attemptedHosts) > 0 {
log.WithFields(logrus.Fields{
"previous": startingHost,
"current": provider.CurrentHost(),
"tried": attemptedHosts,
}).Info("Switched to responsive beacon node")
}
return true
}
attemptedHosts = append(attemptedHosts, provider.CurrentHost())
// Try next host if not the last iteration
if i < numHosts-1 {
nextIdx := (currentIdx + i + 1) % numHosts
if err := provider.SwitchHost(nextIdx); err != nil {
log.WithError(err).Error("Failed to switch host")
}
}
}
log.WithField("tried", attemptedHosts).Warn("No responsive beacon node found")
return false
}

View File

@@ -1,94 +0,0 @@
package fallback
import (
"context"
"testing"
"github.com/OffchainLabs/prysm/v7/testing/assert"
)
// mockHostProvider is a minimal HostProvider for unit tests.
type mockHostProvider struct {
hosts []string
hostIndex int
}
func (m *mockHostProvider) Hosts() []string { return m.hosts }
func (m *mockHostProvider) CurrentHost() string {
return m.hosts[m.hostIndex%len(m.hosts)]
}
func (m *mockHostProvider) SwitchHost(index int) error { m.hostIndex = index; return nil }
// mockReadyChecker records per-call IsReady results in sequence.
type mockReadyChecker struct {
results []bool
idx int
}
func (m *mockReadyChecker) IsReady(_ context.Context) bool {
if m.idx >= len(m.results) {
return false
}
r := m.results[m.idx]
m.idx++
return r
}
func TestEnsureReady_SingleHostReady(t *testing.T) {
provider := &mockHostProvider{hosts: []string{"http://host1:3500"}, hostIndex: 0}
checker := &mockReadyChecker{results: []bool{true}}
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
assert.Equal(t, 0, provider.hostIndex)
}
func TestEnsureReady_SingleHostNotReady(t *testing.T) {
provider := &mockHostProvider{hosts: []string{"http://host1:3500"}, hostIndex: 0}
checker := &mockReadyChecker{results: []bool{false}}
assert.Equal(t, false, EnsureReady(t.Context(), provider, checker))
}
func TestEnsureReady_SingleHostError(t *testing.T) {
provider := &mockHostProvider{hosts: []string{"http://host1:3500"}, hostIndex: 0}
checker := &mockReadyChecker{results: []bool{false}}
assert.Equal(t, false, EnsureReady(t.Context(), provider, checker))
}
func TestEnsureReady_MultipleHostsFirstReady(t *testing.T) {
provider := &mockHostProvider{
hosts: []string{"http://host1:3500", "http://host2:3500"},
hostIndex: 0,
}
checker := &mockReadyChecker{results: []bool{true}}
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
assert.Equal(t, 0, provider.hostIndex)
}
func TestEnsureReady_MultipleHostsFailoverToSecond(t *testing.T) {
provider := &mockHostProvider{
hosts: []string{"http://host1:3500", "http://host2:3500"},
hostIndex: 0,
}
checker := &mockReadyChecker{results: []bool{false, true}}
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
assert.Equal(t, 1, provider.hostIndex)
}
func TestEnsureReady_MultipleHostsNoneReady(t *testing.T) {
provider := &mockHostProvider{
hosts: []string{"http://host1:3500", "http://host2:3500", "http://host3:3500"},
hostIndex: 0,
}
checker := &mockReadyChecker{results: []bool{false, false, false}}
assert.Equal(t, false, EnsureReady(t.Context(), provider, checker))
}
func TestEnsureReady_WrapAroundFromNonZeroIndex(t *testing.T) {
provider := &mockHostProvider{
hosts: []string{"http://host0:3500", "http://host1:3500", "http://host2:3500"},
hostIndex: 1,
}
// host1 (start) fails, host2 fails, host0 succeeds
checker := &mockReadyChecker{results: []bool{false, false, true}}
assert.Equal(t, true, EnsureReady(t.Context(), provider, checker))
assert.Equal(t, 0, provider.hostIndex)
}

View File

@@ -25,11 +25,6 @@ type GrpcConnectionProvider interface {
// SwitchHost switches to the endpoint at the given index.
// The new connection is created lazily on next CurrentConn() call.
SwitchHost(index int) error
// ConnectionCounter returns a monotonically increasing counter that increments
// each time SwitchHost changes the active endpoint. This allows consumers to
// detect connection changes even when the host string returns to a previous value
// (e.g., host0 → host1 → host0).
ConnectionCounter() uint64
// Close closes the current connection.
Close()
}
@@ -43,7 +38,6 @@ type grpcConnectionProvider struct {
// Current connection state (protected by mutex)
currentIndex uint64
conn *grpc.ClientConn
connCounter uint64
mu sync.Mutex
closed bool
@@ -144,7 +138,6 @@ func (p *grpcConnectionProvider) SwitchHost(index int) error {
p.conn = nil // Clear immediately - new connection created lazily
p.currentIndex = uint64(index)
p.connCounter++
// Close old connection asynchronously to avoid blocking the caller
if oldConn != nil {
@@ -162,12 +155,6 @@ func (p *grpcConnectionProvider) SwitchHost(index int) error {
return nil
}
func (p *grpcConnectionProvider) ConnectionCounter() uint64 {
p.mu.Lock()
defer p.mu.Unlock()
return p.connCounter
}
func (p *grpcConnectionProvider) Close() {
p.mu.Lock()
defer p.mu.Unlock()

View File

@@ -4,24 +4,17 @@ import "google.golang.org/grpc"
// MockGrpcProvider implements GrpcConnectionProvider for testing.
type MockGrpcProvider struct {
MockConn *grpc.ClientConn
MockHosts []string
CurrentIndex int
ConnCounter uint64
MockConn *grpc.ClientConn
MockHosts []string
}
func (m *MockGrpcProvider) CurrentConn() *grpc.ClientConn { return m.MockConn }
func (m *MockGrpcProvider) CurrentHost() string {
if len(m.MockHosts) > 0 {
return m.MockHosts[m.CurrentIndex]
return m.MockHosts[0]
}
return ""
}
func (m *MockGrpcProvider) Hosts() []string { return m.MockHosts }
func (m *MockGrpcProvider) SwitchHost(idx int) error {
m.CurrentIndex = idx
m.ConnCounter++
return nil
}
func (m *MockGrpcProvider) ConnectionCounter() uint64 { return m.ConnCounter }
func (m *MockGrpcProvider) Close() {}
func (m *MockGrpcProvider) Hosts() []string { return m.MockHosts }
func (m *MockGrpcProvider) SwitchHost(int) error { return nil }
func (m *MockGrpcProvider) Close() {}

View File

@@ -9,13 +9,13 @@ import (
// MockRestProvider implements RestConnectionProvider for testing.
type MockRestProvider struct {
MockClient *http.Client
MockHandler Handler
MockHandler RestHandler
MockHosts []string
HostIndex int
}
func (m *MockRestProvider) HttpClient() *http.Client { return m.MockClient }
func (m *MockRestProvider) Handler() Handler { return m.MockHandler }
func (m *MockRestProvider) RestHandler() RestHandler { return m.MockHandler }
func (m *MockRestProvider) CurrentHost() string {
if len(m.MockHosts) > 0 {
return m.MockHosts[m.HostIndex%len(m.MockHosts)]
@@ -25,22 +25,25 @@ func (m *MockRestProvider) CurrentHost() string {
func (m *MockRestProvider) Hosts() []string { return m.MockHosts }
func (m *MockRestProvider) SwitchHost(index int) error { m.HostIndex = index; return nil }
// MockHandler implements Handler for testing.
type MockHandler struct {
MockHost string
// MockRestHandler implements RestHandler for testing.
type MockRestHandler struct {
MockHost string
MockClient *http.Client
}
func (m *MockHandler) Get(_ context.Context, _ string, _ any) error { return nil }
func (m *MockHandler) GetStatusCode(_ context.Context, _ string) (int, error) {
func (m *MockRestHandler) Get(_ context.Context, _ string, _ any) error { return nil }
func (m *MockRestHandler) GetStatusCode(_ context.Context, _ string) (int, error) {
return http.StatusOK, nil
}
func (m *MockHandler) GetSSZ(_ context.Context, _ string) ([]byte, http.Header, error) {
func (m *MockRestHandler) GetSSZ(_ context.Context, _ string) ([]byte, http.Header, error) {
return nil, nil, nil
}
func (m *MockHandler) Post(_ context.Context, _ string, _ map[string]string, _ *bytes.Buffer, _ any) error {
func (m *MockRestHandler) Post(_ context.Context, _ string, _ map[string]string, _ *bytes.Buffer, _ any) error {
return nil
}
func (m *MockHandler) PostSSZ(_ context.Context, _ string, _ map[string]string, _ *bytes.Buffer) ([]byte, http.Header, error) {
func (m *MockRestHandler) PostSSZ(_ context.Context, _ string, _ map[string]string, _ *bytes.Buffer) ([]byte, http.Header, error) {
return nil, nil, nil
}
func (m *MockHandler) Host() string { return m.MockHost }
func (m *MockRestHandler) HttpClient() *http.Client { return m.MockClient }
func (m *MockRestHandler) Host() string { return m.MockHost }
func (m *MockRestHandler) SwitchHost(host string) { m.MockHost = host }

View File

@@ -17,8 +17,8 @@ import (
type RestConnectionProvider interface {
// HttpClient returns the configured HTTP client with headers, timeout, and optional tracing.
HttpClient() *http.Client
// Handler returns the REST handler for making API requests.
Handler() Handler
// RestHandler returns the REST handler for making API requests.
RestHandler() RestHandler
// CurrentHost returns the current REST API endpoint URL.
CurrentHost() string
// Hosts returns all configured REST API endpoint URLs.
@@ -54,7 +54,7 @@ func WithTracing() RestConnectionProviderOption {
type restConnectionProvider struct {
endpoints []string
httpClient *http.Client
restHandler *handler
restHandler RestHandler
currentIndex atomic.Uint64
timeout time.Duration
headers map[string][]string
@@ -96,7 +96,7 @@ func NewRestConnectionProvider(endpoint string, opts ...RestConnectionProviderOp
}
// Create the REST handler with the HTTP client and initial host
p.restHandler = newHandler(*p.httpClient, endpoints[0])
p.restHandler = newRestHandler(*p.httpClient, endpoints[0])
log.WithFields(logrus.Fields{
"endpoints": endpoints,
@@ -124,7 +124,7 @@ func (p *restConnectionProvider) HttpClient() *http.Client {
return p.httpClient
}
func (p *restConnectionProvider) Handler() Handler {
func (p *restConnectionProvider) RestHandler() RestHandler {
return p.restHandler
}

View File

@@ -21,35 +21,32 @@ import (
type reqOption func(*http.Request)
// Handler defines the interface for making REST API requests.
type Handler interface {
// RestHandler defines the interface for making REST API requests.
type RestHandler interface {
Get(ctx context.Context, endpoint string, resp any) error
GetStatusCode(ctx context.Context, endpoint string) (int, error)
GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error)
Post(ctx context.Context, endpoint string, headers map[string]string, data *bytes.Buffer, resp any) error
PostSSZ(ctx context.Context, endpoint string, headers map[string]string, data *bytes.Buffer) ([]byte, http.Header, error)
HttpClient() *http.Client
Host() string
SwitchHost(host string)
}
type handler struct {
type restHandler struct {
client http.Client
host string
reqOverrides []reqOption
}
// newHandler returns a *handler for internal use within the rest package.
func newHandler(client http.Client, host string) *handler {
rh := &handler{
client: client,
host: host,
}
rh.appendAcceptOverride()
return rh
// newRestHandler returns a RestHandler (internal use)
func newRestHandler(client http.Client, host string) RestHandler {
return NewRestHandler(client, host)
}
// NewHandler returns a Handler
func NewHandler(client http.Client, host string) Handler {
rh := &handler{
// NewRestHandler returns a RestHandler
func NewRestHandler(client http.Client, host string) RestHandler {
rh := &restHandler{
client: client,
host: host,
}
@@ -60,7 +57,7 @@ func NewHandler(client http.Client, host string) Handler {
// appendAcceptOverride enables the Accept header to be customized at runtime via an environment variable.
// This is specified as an env var because it is a niche option that prysm may use for performance testing or debugging
// bug which users are unlikely to need. Using an env var keeps the set of user-facing flags cleaner.
func (c *handler) appendAcceptOverride() {
func (c *restHandler) appendAcceptOverride() {
if accept := os.Getenv(params.EnvNameOverrideAccept); accept != "" {
c.reqOverrides = append(c.reqOverrides, func(req *http.Request) {
req.Header.Set("Accept", accept)
@@ -69,18 +66,18 @@ func (c *handler) appendAcceptOverride() {
}
// HttpClient returns the underlying HTTP client of the handler
func (c *handler) HttpClient() *http.Client {
func (c *restHandler) HttpClient() *http.Client {
return &c.client
}
// Host returns the underlying HTTP host
func (c *handler) Host() string {
func (c *restHandler) Host() string {
return c.host
}
// Get sends a GET request and decodes the response body as a JSON object into the passed in object.
// If an HTTP error is returned, the body is decoded as a DefaultJsonError JSON object and returned as the first return value.
func (c *handler) Get(ctx context.Context, endpoint string, resp any) error {
func (c *restHandler) Get(ctx context.Context, endpoint string, resp any) error {
url := c.host + endpoint
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
@@ -103,7 +100,7 @@ func (c *handler) Get(ctx context.Context, endpoint string, resp any) error {
// GetStatusCode sends a GET request and returns only the HTTP status code.
// This is useful for endpoints like /eth/v1/node/health that communicate status via HTTP codes
// (200 = ready, 206 = syncing, 503 = unavailable) rather than response bodies.
func (c *handler) GetStatusCode(ctx context.Context, endpoint string) (int, error) {
func (c *restHandler) GetStatusCode(ctx context.Context, endpoint string) (int, error) {
url := c.host + endpoint
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
@@ -122,7 +119,7 @@ func (c *handler) GetStatusCode(ctx context.Context, endpoint string) (int, erro
return httpResp.StatusCode, nil
}
func (c *handler) GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error) {
func (c *restHandler) GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Header, error) {
url := c.host + endpoint
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
@@ -177,7 +174,7 @@ func (c *handler) GetSSZ(ctx context.Context, endpoint string) ([]byte, http.Hea
// Post sends a POST request and decodes the response body as a JSON object into the passed in object.
// If an HTTP error is returned, the body is decoded as a DefaultJsonError JSON object and returned as the first return value.
func (c *handler) Post(
func (c *restHandler) Post(
ctx context.Context,
apiEndpoint string,
headers map[string]string,
@@ -213,7 +210,7 @@ func (c *handler) Post(
}
// PostSSZ sends a POST request and prefers an SSZ (application/octet-stream) response body.
func (c *handler) PostSSZ(
func (c *restHandler) PostSSZ(
ctx context.Context,
apiEndpoint string,
headers map[string]string,
@@ -314,6 +311,6 @@ func decodeResp(httpResp *http.Response, resp any) error {
return nil
}
func (c *handler) SwitchHost(host string) {
func (c *restHandler) SwitchHost(host string) {
c.host = host
}

View File

@@ -509,17 +509,17 @@ func (s *SignedBlindedBeaconBlockFulu) SigString() string {
// ----------------------------------------------------------------------------
type ExecutionPayloadBid struct {
ParentBlockHash string `json:"parent_block_hash"`
ParentBlockRoot string `json:"parent_block_root"`
BlockHash string `json:"block_hash"`
PrevRandao string `json:"prev_randao"`
FeeRecipient string `json:"fee_recipient"`
GasLimit string `json:"gas_limit"`
BuilderIndex string `json:"builder_index"`
Slot string `json:"slot"`
Value string `json:"value"`
ExecutionPayment string `json:"execution_payment"`
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
ParentBlockHash string `json:"parent_block_hash"`
ParentBlockRoot string `json:"parent_block_root"`
BlockHash string `json:"block_hash"`
PrevRandao string `json:"prev_randao"`
FeeRecipient string `json:"fee_recipient"`
GasLimit string `json:"gas_limit"`
BuilderIndex string `json:"builder_index"`
Slot string `json:"slot"`
Value string `json:"value"`
ExecutionPayment string `json:"execution_payment"`
BlobKzgCommitmentsRoot string `json:"blob_kzg_commitments_root"`
}
type SignedExecutionPayloadBid struct {

View File

@@ -2939,22 +2939,18 @@ func SignedExecutionPayloadBidFromConsensus(b *eth.SignedExecutionPayloadBid) *S
}
func ExecutionPayloadBidFromConsensus(b *eth.ExecutionPayloadBid) *ExecutionPayloadBid {
blobKzgCommitments := make([]string, len(b.BlobKzgCommitments))
for i := range b.BlobKzgCommitments {
blobKzgCommitments[i] = hexutil.Encode(b.BlobKzgCommitments[i])
}
return &ExecutionPayloadBid{
ParentBlockHash: hexutil.Encode(b.ParentBlockHash),
ParentBlockRoot: hexutil.Encode(b.ParentBlockRoot),
BlockHash: hexutil.Encode(b.BlockHash),
PrevRandao: hexutil.Encode(b.PrevRandao),
FeeRecipient: hexutil.Encode(b.FeeRecipient),
GasLimit: fmt.Sprintf("%d", b.GasLimit),
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex),
Slot: fmt.Sprintf("%d", b.Slot),
Value: fmt.Sprintf("%d", b.Value),
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment),
BlobKzgCommitments: blobKzgCommitments,
ParentBlockHash: hexutil.Encode(b.ParentBlockHash),
ParentBlockRoot: hexutil.Encode(b.ParentBlockRoot),
BlockHash: hexutil.Encode(b.BlockHash),
PrevRandao: hexutil.Encode(b.PrevRandao),
FeeRecipient: hexutil.Encode(b.FeeRecipient),
GasLimit: fmt.Sprintf("%d", b.GasLimit),
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex),
Slot: fmt.Sprintf("%d", b.Slot),
Value: fmt.Sprintf("%d", b.Value),
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment),
BlobKzgCommitmentsRoot: hexutil.Encode(b.BlobKzgCommitmentsRoot),
}
}
@@ -3191,30 +3187,22 @@ func (b *ExecutionPayloadBid) ToConsensus() (*eth.ExecutionPayloadBid, error) {
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayment")
}
err = slice.VerifyMaxLength(b.BlobKzgCommitments, fieldparams.MaxBlobCommitmentsPerBlock)
blobKzgCommitmentsRoot, err := bytesutil.DecodeHexWithLength(b.BlobKzgCommitmentsRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "BlobKzgCommitments")
}
blobKzgCommitments := make([][]byte, len(b.BlobKzgCommitments))
for i, commitment := range b.BlobKzgCommitments {
kzg, err := bytesutil.DecodeHexWithLength(commitment, fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("BlobKzgCommitments[%d]", i))
}
blobKzgCommitments[i] = kzg
return nil, server.NewDecodeError(err, "BlobKzgCommitmentsRoot")
}
return &eth.ExecutionPayloadBid{
ParentBlockHash: parentBlockHash,
ParentBlockRoot: parentBlockRoot,
BlockHash: blockHash,
PrevRandao: prevRandao,
FeeRecipient: feeRecipient,
GasLimit: gasLimit,
BuilderIndex: primitives.BuilderIndex(builderIndex),
Slot: primitives.Slot(slot),
Value: primitives.Gwei(value),
ExecutionPayment: primitives.Gwei(executionPayment),
BlobKzgCommitments: blobKzgCommitments,
ParentBlockHash: parentBlockHash,
ParentBlockRoot: parentBlockRoot,
BlockHash: blockHash,
PrevRandao: prevRandao,
FeeRecipient: feeRecipient,
GasLimit: gasLimit,
BuilderIndex: primitives.BuilderIndex(builderIndex),
Slot: primitives.Slot(slot),
Value: primitives.Gwei(value),
ExecutionPayment: primitives.Gwei(executionPayment),
BlobKzgCommitmentsRoot: blobKzgCommitmentsRoot,
}, nil
}

View File

@@ -27,7 +27,6 @@ go_library(
"receive_blob.go",
"receive_block.go",
"receive_data_column.go",
"receive_payload_attestation_message.go",
"service.go",
"setup_forkchoice.go",
"tracked_proposer.go",
@@ -86,7 +85,6 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//io/logs:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//monitoring/tracing/trace:go_default_library",

View File

@@ -46,7 +46,6 @@ type ForkchoiceFetcher interface {
HighestReceivedBlockSlot() primitives.Slot
ReceivedBlocksLastEpoch() (uint64, error)
InsertNode(context.Context, state.BeaconState, consensus_blocks.ROBlock) error
InsertPayload(context.Context, interfaces.ROExecutionPayloadEnvelope) error
ForkChoiceDump(context.Context) (*forkchoice.Dump, error)
NewSlot(context.Context, primitives.Slot) error
ProposerBoost() [32]byte

View File

@@ -7,7 +7,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
consensus_blocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/forkchoice"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/runtime/version"
@@ -56,13 +55,6 @@ func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, block co
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, block)
}
// InsertPayload is a wrapper for payload insertion which is self locked
func (s *Service) InsertPayload(ctx context.Context, pe interfaces.ROExecutionPayloadEnvelope) error {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
return s.cfg.ForkChoiceStore.InsertPayload(ctx, pe)
}
// ForkChoiceDump returns the corresponding value from forkchoice
func (s *Service) ForkChoiceDump(ctx context.Context) (*forkchoice.Dump, error) {
s.cfg.ForkChoiceStore.RLock()

View File

@@ -101,16 +101,11 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
if len(lastValidHash) == 0 {
lastValidHash = defaultLatestValidHash
}
// this call has guaranteed to have the `headRoot` with its payload in forkchoice.
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(headPayload.ParentHash()), bytesutil.ToBytes32(lastValidHash))
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(lastValidHash))
if err != nil {
log.WithError(err).Error("Could not set head root to invalid")
return nil, nil
}
// TODO: Gloas, we should not include the head root in this call
if len(invalidRoots) == 0 || invalidRoots[0] != headRoot {
invalidRoots = append([][32]byte{headRoot}, invalidRoots...)
}
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
log.WithError(err).Error("Could not remove invalid block and state")
return nil, nil
@@ -295,10 +290,10 @@ func (s *Service) notifyNewPayload(ctx context.Context, stVersion int, header in
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
}
// pruneInvalidBlock deals with the event that an invalid block was detected by the execution layer
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, parentHash [32]byte, lvh [32]byte) error {
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, lvh [32]byte) error {
newPayloadInvalidNodeCount.Inc()
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, parentHash, lvh)
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, parentRoot, lvh)
if err != nil {
return err
}

View File

@@ -465,9 +465,9 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
require.NoError(t, err)
require.Equal(t, brd, headRoot)
// Ensure F and G's full nodes were removed but their empty (consensus) nodes remain, as does E
require.Equal(t, true, fcs.HasNode(brf))
require.Equal(t, true, fcs.HasNode(brg))
// Ensure F and G where removed but their parent E wasn't
require.Equal(t, false, fcs.HasNode(brf))
require.Equal(t, false, fcs.HasNode(brg))
require.Equal(t, true, fcs.HasNode(bre))
}
@@ -703,13 +703,14 @@ func Test_reportInvalidBlock(t *testing.T) {
require.NoError(t, fcs.InsertNode(ctx, st, root))
require.NoError(t, fcs.SetOptimisticToValid(ctx, [32]byte{'A'}))
err = service.pruneInvalidBlock(ctx, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'c'}, [32]byte{'a'})
err = service.pruneInvalidBlock(ctx, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'a'})
require.Equal(t, IsInvalidBlock(err), true)
require.Equal(t, InvalidBlockLVH(err), [32]byte{'a'})
invalidRoots := InvalidAncestorRoots(err)
require.Equal(t, 2, len(invalidRoots))
require.Equal(t, 3, len(invalidRoots))
require.Equal(t, [32]byte{'D'}, invalidRoots[0])
require.Equal(t, [32]byte{'C'}, invalidRoots[1])
require.Equal(t, [32]byte{'B'}, invalidRoots[2])
}
func Test_GetPayloadAttribute(t *testing.T) {
@@ -784,7 +785,7 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
}
func Test_GetPayloadAttributeV3(t *testing.T) {
testCases := []struct {
var testCases = []struct {
name string
st bstate.BeaconState
}{

View File

@@ -110,7 +110,7 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c
ckzgCells := make([]ckzg4844.Cell, len(cells))
for i := range cells {
copy(ckzgCells[i][:], cells[i][:])
ckzgCells[i] = ckzg4844.Cell(cells[i])
}
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
}

View File

@@ -10,7 +10,6 @@ import (
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/io/logs"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
prysmTime "github.com/OffchainLabs/prysm/v7/time"
@@ -88,45 +87,36 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesis time.Time, daWaitedTime time.Duration) error {
startTime, err := slots.StartTime(genesis, block.Slot())
if err != nil {
return errors.Wrap(err, "failed to get slot start time")
return err
}
parentRoot := block.ParentRoot()
blkRoot := fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8])
finalizedRoot := fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8])
sinceSlotStartTime := prysmTime.Now().Sub(startTime)
lessFields := logrus.Fields{
"slot": block.Slot(),
"block": blkRoot,
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": finalizedRoot,
"epoch": slots.ToEpoch(block.Slot()),
"sinceSlotStartTime": sinceSlotStartTime,
}
moreFields := logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": blkRoot,
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": finalizedRoot,
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": sinceSlotStartTime,
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"dataAvailabilityWaitedTime": daWaitedTime,
}
level := logs.PackageVerbosity("beacon-chain/blockchain")
level := log.Logger.GetLevel()
if level >= logrus.DebugLevel {
log.WithFields(moreFields).Info("Synced new block")
return nil
parentRoot := block.ParentRoot()
lf := logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"dataAvailabilityWaitedTime": daWaitedTime,
}
log.WithFields(lf).Debug("Synced new block")
} else {
log.WithFields(logrus.Fields{
"slot": block.Slot(),
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"epoch": slots.ToEpoch(block.Slot()),
}).Info("Synced new block")
}
log.WithFields(lessFields).WithField(logs.LogTargetField, logs.LogTargetUser).Info("Synced new block")
log.WithFields(moreFields).WithField(logs.LogTargetField, logs.LogTargetEphemeral).Info("Synced new block")
return nil
}

View File

@@ -232,8 +232,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
postVersionAndHeaders[i].version,
postVersionAndHeaders[i].header, b)
if err != nil {
// this call does not have the root in forkchoice yet.
return s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot(), [32]byte(postVersionAndHeaders[i].header.ParentHash()))
return s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot())
}
if isValidPayload {
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
@@ -993,9 +992,9 @@ func (s *Service) waitForSync() error {
}
}
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [32]byte, parentHash [32]byte) error {
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [fieldparams.RootLength]byte) error {
if IsInvalidBlock(err) && InvalidBlockLVH(err) != [32]byte{} {
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, parentHash, InvalidBlockLVH(err))
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, InvalidBlockLVH(err))
}
return err
}

View File

@@ -2006,7 +2006,6 @@ func TestNoViableHead_Reboot(t *testing.T) {
// Check that we have justified the second epoch
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(2), jc.Epoch)
time.Sleep(20 * time.Millisecond) // wait for async forkchoice update to be processed
// import block 19 to find out that the whole chain 13--18 was in fact
// invalid

View File

@@ -633,7 +633,7 @@ func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, block)
if err != nil {
s.cfg.ForkChoiceStore.Lock()
err = s.handleInvalidExecutionError(ctx, err, block.Root(), block.Block().ParentRoot(), [32]byte(header.BlockHash()))
err = s.handleInvalidExecutionError(ctx, err, block.Root(), block.Block().ParentRoot())
s.cfg.ForkChoiceStore.Unlock()
return false, err
}

View File

@@ -1,19 +0,0 @@
package blockchain
import (
"context"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
)
// PayloadAttestationReceiver interface defines the methods of chain service for receiving
// validated payload attestation messages.
type PayloadAttestationReceiver interface {
ReceivePayloadAttestationMessage(context.Context, *ethpb.PayloadAttestationMessage) error
}
// ReceivePayloadAttestationMessage accepts a payload attestation message.
func (s *Service) ReceivePayloadAttestationMessage(ctx context.Context, a *ethpb.PayloadAttestationMessage) error {
// TODO: Handle payload attestation message processing once Gloas is fully wired.
return nil
}

View File

@@ -89,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
return nil
}
func (mb *mockBroadcaster) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn) error {
func (mb *mockBroadcaster) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn, _ []blocks.PartialDataColumn) error {
mb.broadcastCalled = true
return nil
}

View File

@@ -700,14 +700,6 @@ func (s *ChainService) InsertNode(ctx context.Context, st state.BeaconState, blo
return nil
}
// InsertPayload mocks the same method in the chain service
func (s *ChainService) InsertPayload(ctx context.Context, pe interfaces.ROExecutionPayloadEnvelope) error {
if s.ForkChoiceStore != nil {
return s.ForkChoiceStore.InsertPayload(ctx, pe)
}
return nil
}
// ForkChoiceDump mocks the same method in the chain service
func (s *ChainService) ForkChoiceDump(ctx context.Context) (*forkchoice2.Dump, error) {
if s.ForkChoiceStore != nil {
@@ -765,11 +757,6 @@ func (c *ChainService) ReceiveDataColumns(dcs []blocks.VerifiedRODataColumn) err
return nil
}
// ReceivePayloadAttestationMessage implements the same method in the chain service.
func (c *ChainService) ReceivePayloadAttestationMessage(_ context.Context, _ *ethpb.PayloadAttestationMessage) error {
return nil
}
// DependentRootForEpoch mocks the same method in the chain service
func (c *ChainService) DependentRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
return c.TargetRoot, nil

View File

@@ -17,7 +17,6 @@ go_library(
"error.go",
"interfaces.go",
"log.go",
"payload_attestation.go",
"payload_id.go",
"proposer_indices.go",
"proposer_indices_disabled.go", # keep
@@ -77,7 +76,6 @@ go_test(
"checkpoint_state_test.go",
"committee_fuzz_test.go",
"committee_test.go",
"payload_attestation_test.go",
"payload_id_test.go",
"private_access_test.go",
"proposer_indices_test.go",

View File

@@ -1,53 +0,0 @@
package cache
import (
"sync"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
)
// PayloadAttestationCache tracks seen payload attestation messages for a single slot.
type PayloadAttestationCache struct {
slot primitives.Slot
seen map[primitives.ValidatorIndex]struct{}
mu sync.RWMutex
}
// Seen returns true if a vote for the given slot has already been
// processed for this validator index.
func (p *PayloadAttestationCache) Seen(slot primitives.Slot, idx primitives.ValidatorIndex) bool {
p.mu.RLock()
defer p.mu.RUnlock()
if p.slot != slot {
return false
}
if p.seen == nil {
return false
}
_, ok := p.seen[idx]
return ok
}
// Add marks the given slot and validator index as seen.
// This function assumes that the message has already been validated.
func (p *PayloadAttestationCache) Add(slot primitives.Slot, idx primitives.ValidatorIndex) error {
p.mu.Lock()
defer p.mu.Unlock()
if p.slot != slot {
p.slot = slot
p.seen = make(map[primitives.ValidatorIndex]struct{})
}
if p.seen == nil {
p.seen = make(map[primitives.ValidatorIndex]struct{})
}
p.seen[idx] = struct{}{}
return nil
}
// Clear clears the internal cache.
func (p *PayloadAttestationCache) Clear() {
p.mu.Lock()
defer p.mu.Unlock()
p.slot = 0
p.seen = nil
}

View File

@@ -1,48 +0,0 @@
package cache_test
import (
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/stretchr/testify/require"
)
func TestPayloadAttestationCache_SeenAndAdd(t *testing.T) {
var c cache.PayloadAttestationCache
slot1 := primitives.Slot(1)
slot2 := primitives.Slot(2)
idx1 := primitives.ValidatorIndex(3)
idx2 := primitives.ValidatorIndex(4)
require.False(t, c.Seen(slot1, idx1))
require.NoError(t, c.Add(slot1, idx1))
require.True(t, c.Seen(slot1, idx1))
require.False(t, c.Seen(slot1, idx2))
require.False(t, c.Seen(slot2, idx1))
require.NoError(t, c.Add(slot1, idx2))
require.True(t, c.Seen(slot1, idx1))
require.True(t, c.Seen(slot1, idx2))
require.NoError(t, c.Add(slot2, idx1))
require.True(t, c.Seen(slot2, idx1))
require.False(t, c.Seen(slot1, idx1))
require.False(t, c.Seen(slot1, idx2))
}
func TestPayloadAttestationCache_Clear(t *testing.T) {
var c cache.PayloadAttestationCache
slot := primitives.Slot(10)
idx := primitives.ValidatorIndex(42)
require.NoError(t, c.Add(slot, idx))
require.True(t, c.Seen(slot, idx))
c.Clear()
require.False(t, c.Seen(slot, idx))
require.NoError(t, c.Add(slot, idx))
require.True(t, c.Seen(slot, idx))
}

View File

@@ -17,56 +17,27 @@ import (
)
// ProcessExecutionPayloadBid processes a signed execution payload bid in the Gloas fork.
// Spec v1.7.0-alpha.0 (pseudocode):
// process_execution_payload_bid(state: BeaconState, block: BeaconBlock):
//
// <spec fn="process_execution_payload_bid" fork="gloas" hash="823c9f3a">
// def process_execution_payload_bid(state: BeaconState, block: BeaconBlock) -> None:
// signed_bid = block.body.signed_execution_payload_bid
// bid = signed_bid.message
// builder_index = bid.builder_index
// amount = bid.value
//
// # For self-builds, amount must be zero regardless of withdrawal credential prefix
// if builder_index == BUILDER_INDEX_SELF_BUILD:
// assert amount == 0
// assert signed_bid.signature == bls.G2_POINT_AT_INFINITY
// else:
// # Verify that the builder is active
// assert is_active_builder(state, builder_index)
// # Verify that the builder has funds to cover the bid
// assert can_builder_cover_bid(state, builder_index, amount)
// # Verify that the bid signature is valid
// assert verify_execution_payload_bid_signature(state, signed_bid)
//
// # Verify commitments are under limit
// assert (
// len(bid.blob_kzg_commitments)
// <= get_blob_parameters(get_current_epoch(state)).max_blobs_per_block
// )
//
// # Verify that the bid is for the current slot
// assert bid.slot == block.slot
// # Verify that the bid is for the right parent block
// assert bid.parent_block_hash == state.latest_block_hash
// assert bid.parent_block_root == block.parent_root
// assert bid.prev_randao == get_randao_mix(state, get_current_epoch(state))
//
// # Record the pending payment if there is some payment
// if amount > 0:
// pending_payment = BuilderPendingPayment(
// weight=0,
// withdrawal=BuilderPendingWithdrawal(
// fee_recipient=bid.fee_recipient,
// amount=amount,
// builder_index=builder_index,
// ),
// )
// state.builder_pending_payments[SLOTS_PER_EPOCH + bid.slot % SLOTS_PER_EPOCH] = (
// pending_payment
// )
//
// # Cache the signed execution payload bid
// state.latest_execution_payload_bid = bid
// </spec>
// signed_bid = block.body.signed_execution_payload_bid
// bid = signed_bid.message
// builder_index = bid.builder_index
// amount = bid.value
// if builder_index == BUILDER_INDEX_SELF_BUILD:
// assert amount == 0
// assert signed_bid.signature == G2_POINT_AT_INFINITY
// else:
// assert is_active_builder(state, builder_index)
// assert can_builder_cover_bid(state, builder_index, amount)
// assert verify_execution_payload_bid_signature(state, signed_bid)
// assert bid.slot == block.slot
// assert bid.parent_block_hash == state.latest_block_hash
// assert bid.parent_block_root == block.parent_root
// assert bid.prev_randao == get_randao_mix(state, get_current_epoch(state))
// if amount > 0:
// state.builder_pending_payments[...] = BuilderPendingPayment(weight=0, withdrawal=BuilderPendingWithdrawal(fee_recipient=bid.fee_recipient, amount=amount, builder_index=builder_index))
// state.latest_execution_payload_bid = bid
func ProcessExecutionPayloadBid(st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) error {
signedBid, err := block.Body().SignedExecutionPayloadBid()
if err != nil {
@@ -115,12 +86,6 @@ func ProcessExecutionPayloadBid(st state.BeaconState, block interfaces.ReadOnlyB
}
}
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(block.Slot()))
commitmentCount := bid.BlobKzgCommitmentCount()
if commitmentCount > uint64(maxBlobsPerBlock) {
return fmt.Errorf("bid has %d blob KZG commitments over max %d", commitmentCount, maxBlobsPerBlock)
}
if err := validateBidConsistency(st, bid, block); err != nil {
return errors.Wrap(err, "bid consistency validation failed")
}

View File

@@ -184,28 +184,6 @@ func signBid(t *testing.T, sk common.SecretKey, bid *ethpb.ExecutionPayloadBid,
return out
}
func blobCommitmentsForSlot(slot primitives.Slot, count int) [][]byte {
max := int(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(slot)))
if count > max {
count = max
}
commitments := make([][]byte, count)
for i := range commitments {
commitments[i] = bytes.Repeat([]byte{0xEE}, 48)
}
return commitments
}
func tooManyBlobCommitmentsForSlot(slot primitives.Slot) [][]byte {
max := int(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(slots.ToEpoch(slot)))
count := max + 1
commitments := make([][]byte, count)
for i := range commitments {
commitments[i] = bytes.Repeat([]byte{0xEE}, 48)
}
return commitments
}
func TestProcessExecutionPayloadBid_SelfBuildSuccess(t *testing.T) {
slot := primitives.Slot(12)
proposerIdx := primitives.ValidatorIndex(0)
@@ -216,17 +194,17 @@ func TestProcessExecutionPayloadBid_SelfBuildSuccess(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 0,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 0,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
signed := &ethpb.SignedExecutionPayloadBid{
Message: bid,
@@ -258,16 +236,16 @@ func TestProcessExecutionPayloadBid_SelfBuildNonZeroAmountFails(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, [48]byte{})
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
PrevRandao: randao[:],
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
PrevRandao: randao[:],
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
}
signed := &ethpb.SignedExecutionPayloadBid{
Message: bid,
@@ -302,17 +280,17 @@ func TestProcessExecutionPayloadBid_PendingPaymentAndCacheBid(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, balance, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 500_000,
ExecutionPayment: 1,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 500_000,
ExecutionPayment: 1,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
@@ -363,17 +341,17 @@ func TestProcessExecutionPayloadBid_BuilderNotActive(t *testing.T) {
state = stateIface.(*state_native.BeaconState)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0x03}, 32),
BlockHash: bytes.Repeat([]byte{0x04}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0x03}, 32),
BlockHash: bytes.Repeat([]byte{0x04}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x05}, 32),
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
@@ -416,17 +394,17 @@ func TestProcessExecutionPayloadBid_CannotCoverBid(t *testing.T) {
state = stateIface.(*state_native.BeaconState)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 25,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 25,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
@@ -458,17 +436,17 @@ func TestProcessExecutionPayloadBid_InvalidSignature(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
// Use an invalid signature.
invalidSig := [96]byte{1}
@@ -485,42 +463,6 @@ func TestProcessExecutionPayloadBid_InvalidSignature(t *testing.T) {
require.ErrorContains(t, "bid signature validation failed", err)
}
func TestProcessExecutionPayloadBid_TooManyBlobCommitments(t *testing.T) {
slot := primitives.Slot(9)
proposerIdx := primitives.ValidatorIndex(0)
builderIdx := params.BeaconConfig().BuilderIndexSelfBuild
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
pubKey := [48]byte{}
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
BuilderIndex: builderIdx,
Slot: slot,
BlobKzgCommitments: tooManyBlobCommitmentsForSlot(slot),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
signed := &ethpb.SignedExecutionPayloadBid{
Message: bid,
Signature: common.InfiniteSignature[:],
}
block := stubBlock{
slot: slot,
proposer: proposerIdx,
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
body: stubBlockBody{signedBid: signed},
v: version.Gloas,
}
err := ProcessExecutionPayloadBid(state, block)
require.ErrorContains(t, "blob KZG commitments over max", err)
}
func TestProcessExecutionPayloadBid_SlotMismatch(t *testing.T) {
slot := primitives.Slot(10)
builderIdx := primitives.BuilderIndex(1)
@@ -536,17 +478,17 @@ func TestProcessExecutionPayloadBid_SlotMismatch(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot + 1, // mismatch
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot + 1, // mismatch
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
@@ -578,17 +520,17 @@ func TestProcessExecutionPayloadBid_ParentHashMismatch(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32), // mismatch
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32), // mismatch
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
@@ -621,17 +563,17 @@ func TestProcessExecutionPayloadBid_ParentRootMismatch(t *testing.T) {
parentRoot := bytes.Repeat([]byte{0x22}, 32)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: parentRoot,
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: parentRoot,
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
@@ -663,17 +605,17 @@ func TestProcessExecutionPayloadBid_PrevRandaoMismatch(t *testing.T) {
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: bytes.Repeat([]byte{0x01}, 32), // mismatch
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitments: blobCommitmentsForSlot(slot, 1),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: bytes.Repeat([]byte{0x01}, 32), // mismatch
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)

View File

@@ -24,21 +24,14 @@ import (
)
// ProcessPayloadAttestations validates payload attestations in a block body.
// Spec v1.7.0-alpha.0 (pseudocode):
// process_payload_attestation(state: BeaconState, payload_attestation: PayloadAttestation):
//
// <spec fn="process_payload_attestation" fork="gloas" hash="f46bf0b0">
// def process_payload_attestation(
// state: BeaconState, payload_attestation: PayloadAttestation
// ) -> None:
// data = payload_attestation.data
//
// # Check that the attestation is for the parent beacon block
// assert data.beacon_block_root == state.latest_block_header.parent_root
// # Check that the attestation is for the previous slot
// assert data.slot + 1 == state.slot
// # Verify signature
// indexed_payload_attestation = get_indexed_payload_attestation(state, payload_attestation)
// assert is_valid_indexed_payload_attestation(state, indexed_payload_attestation)
// </spec>
// data = payload_attestation.data
// assert data.beacon_block_root == state.latest_block_header.parent_root
// assert data.slot + 1 == state.slot
// indexed = get_indexed_payload_attestation(state, data.slot, payload_attestation)
// assert is_valid_indexed_payload_attestation(state, indexed)
func ProcessPayloadAttestations(ctx context.Context, st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) error {
atts, err := body.PayloadAttestations()
if err != nil {
@@ -77,7 +70,7 @@ func ProcessPayloadAttestations(ctx context.Context, st state.BeaconState, body
// indexedPayloadAttestation converts a payload attestation into its indexed form.
func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState, att *eth.PayloadAttestation) (*consensus_types.IndexedPayloadAttestation, error) {
committee, err := PayloadCommittee(ctx, st, att.Data.Slot)
committee, err := payloadCommittee(ctx, st, att.Data.Slot)
if err != nil {
return nil, err
}
@@ -96,26 +89,19 @@ func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState
}, nil
}
// PayloadCommittee returns the payload timeliness committee for a given slot for the state.
// payloadCommittee returns the payload timeliness committee for a given slot for the state.
// Spec v1.7.0-alpha.0 (pseudocode):
// get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
//
// <spec fn="get_ptc" fork="gloas" hash="ae15f761">
// def get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
// """
// Get the payload timeliness committee for the given ``slot``.
// """
// epoch = compute_epoch_at_slot(slot)
// seed = hash(get_seed(state, epoch, DOMAIN_PTC_ATTESTER) + uint_to_bytes(slot))
// indices: List[ValidatorIndex] = []
// # Concatenate all committees for this slot in order
// committees_per_slot = get_committee_count_per_slot(state, epoch)
// for i in range(committees_per_slot):
// committee = get_beacon_committee(state, slot, CommitteeIndex(i))
// indices.extend(committee)
// return compute_balance_weighted_selection(
// state, indices, seed, size=PTC_SIZE, shuffle_indices=False
// )
// </spec>
func PayloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
// epoch = compute_epoch_at_slot(slot)
// seed = hash(get_seed(state, epoch, DOMAIN_PTC_ATTESTER) + uint_to_bytes(slot))
// indices = []
// committees_per_slot = get_committee_count_per_slot(state, epoch)
// for i in range(committees_per_slot):
// committee = get_beacon_committee(state, slot, CommitteeIndex(i))
// indices.extend(committee)
// return compute_balance_weighted_selection(state, indices, seed, size=PTC_SIZE, shuffle_indices=False)
func payloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
epoch := slots.ToEpoch(slot)
seed, err := ptcSeed(st, epoch, slot)
if err != nil {
@@ -166,35 +152,17 @@ func ptcSeed(st state.ReadOnlyBeaconState, epoch primitives.Epoch, slot primitiv
}
// selectByBalance selects a balance-weighted subset of input candidates.
// Spec v1.7.0-alpha.0 (pseudocode):
// compute_balance_weighted_selection(state, indices, seed, size, shuffle_indices):
// Note: shuffle_indices is false for PTC.
//
// <spec fn="compute_balance_weighted_selection" fork="gloas" hash="2c9f1c23">
// def compute_balance_weighted_selection(
// state: BeaconState,
// indices: Sequence[ValidatorIndex],
// seed: Bytes32,
// size: uint64,
// shuffle_indices: bool,
// ) -> Sequence[ValidatorIndex]:
// """
// Return ``size`` indices sampled by effective balance, using ``indices``
// as candidates. If ``shuffle_indices`` is ``True``, candidate indices
// are themselves sampled from ``indices`` by shuffling it, otherwise
// ``indices`` is traversed in order.
// """
// total = uint64(len(indices))
// assert total > 0
// selected: List[ValidatorIndex] = []
// i = uint64(0)
// while len(selected) < size:
// next_index = i % total
// if shuffle_indices:
// next_index = compute_shuffled_index(next_index, total, seed)
// candidate_index = indices[next_index]
// if compute_balance_weighted_acceptance(state, candidate_index, seed, i):
// selected.append(candidate_index)
// i += 1
// return selected
// </spec>
// total = len(indices); selected = []; i = 0
// while len(selected) < size:
// next = i % total
// if shuffle_indices: next = compute_shuffled_index(next, total, seed)
// if compute_balance_weighted_acceptance(state, indices[next], seed, i):
// selected.append(indices[next])
// i += 1
func selectByBalanceFill(
ctx context.Context,
st state.ReadOnlyBeaconState,
@@ -231,22 +199,15 @@ func selectByBalanceFill(
}
// acceptByBalance determines if a validator is accepted based on its effective balance.
// Spec v1.7.0-alpha.0 (pseudocode):
// compute_balance_weighted_acceptance(state, index, seed, i):
//
// <spec fn="compute_balance_weighted_acceptance" fork="gloas" hash="9954dcd0">
// def compute_balance_weighted_acceptance(
// state: BeaconState, index: ValidatorIndex, seed: Bytes32, i: uint64
// ) -> bool:
// """
// Return whether to accept the selection of the validator ``index``, with probability
// proportional to its ``effective_balance``, and randomness given by ``seed`` and ``i``.
// """
// MAX_RANDOM_VALUE = 2**16 - 1
// random_bytes = hash(seed + uint_to_bytes(i // 16))
// offset = i % 16 * 2
// random_value = bytes_to_uint64(random_bytes[offset : offset + 2])
// effective_balance = state.validators[index].effective_balance
// return effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value
// </spec>
// MAX_RANDOM_VALUE = 2**16 - 1
// random_bytes = hash(seed + uint_to_bytes(i // 16))
// offset = i % 16 * 2
// random_value = bytes_to_uint64(random_bytes[offset:offset+2])
// effective_balance = state.validators[index].effective_balance
// return effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value
func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex, seedBuf []byte, hashFunc func([]byte) [32]byte, maxBalance uint64, round uint64) (bool, error) {
// Reuse the seed buffer by overwriting the last 8 bytes with the round counter.
binary.LittleEndian.PutUint64(seedBuf[len(seedBuf)-8:], round/16)
@@ -263,26 +224,16 @@ func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex
}
// validIndexedPayloadAttestation verifies the signature of an indexed payload attestation.
// Spec v1.7.0-alpha.0 (pseudocode):
// is_valid_indexed_payload_attestation(state: BeaconState, indexed_payload_attestation: IndexedPayloadAttestation) -> bool:
//
// <spec fn="is_valid_indexed_payload_attestation" fork="gloas" hash="d76e0f89">
// def is_valid_indexed_payload_attestation(
// state: BeaconState, attestation: IndexedPayloadAttestation
// ) -> bool:
// """
// Check if ``attestation`` is non-empty, has sorted indices, and has
// a valid aggregate signature.
// """
// # Verify indices are non-empty and sorted
// indices = attestation.attesting_indices
// if len(indices) == 0 or not indices == sorted(indices):
// return False
//
// # Verify aggregate signature
// pubkeys = [state.validators[i].pubkey for i in indices]
// domain = get_domain(state, DOMAIN_PTC_ATTESTER, compute_epoch_at_slot(attestation.data.slot))
// signing_root = compute_signing_root(attestation.data, domain)
// return bls.FastAggregateVerify(pubkeys, signing_root, attestation.signature)
// </spec>
// indices = indexed_payload_attestation.attesting_indices
// return len(indices) > 0 and indices == sorted(indices) and
// bls.FastAggregateVerify(
// [state.validators[i].pubkey for i in indices],
// compute_signing_root(indexed_payload_attestation.data, get_domain(state, DOMAIN_PTC_ATTESTER, compute_epoch_at_slot(attestation.data.slot)),
// indexed_payload_attestation.signature,
// )
func validIndexedPayloadAttestation(st state.ReadOnlyBeaconState, att *consensus_types.IndexedPayloadAttestation) error {
indices := att.AttestingIndices
if len(indices) == 0 || !slices.IsSorted(indices) {

View File

@@ -10,21 +10,17 @@ import (
)
// ProcessBuilderPendingPayments processes the builder pending payments from the previous epoch.
// Spec v1.7.0-alpha.0 (pseudocode):
// def process_builder_pending_payments(state: BeaconState) -> None:
//
// <spec fn="process_builder_pending_payments" fork="gloas" hash="10da48dd">
// def process_builder_pending_payments(state: BeaconState) -> None:
// """
// Processes the builder pending payments from the previous epoch.
// """
// quorum = get_builder_payment_quorum_threshold(state)
// for payment in state.builder_pending_payments[:SLOTS_PER_EPOCH]:
// if payment.weight >= quorum:
// state.builder_pending_withdrawals.append(payment.withdrawal)
// quorum = get_builder_payment_quorum_threshold(state)
// for payment in state.builder_pending_payments[:SLOTS_PER_EPOCH]:
// if payment.weight >= quorum:
// state.builder_pending_withdrawals.append(payment.withdrawal)
//
// old_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:]
// new_payments = [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
// state.builder_pending_payments = old_payments + new_payments
// </spec>
// old_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:]
// new_payments = [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
// state.builder_pending_payments = old_payments + new_payments
func ProcessBuilderPendingPayments(state state.BeaconState) error {
quorum, err := builderQuorumThreshold(state)
if err != nil {
@@ -57,16 +53,12 @@ func ProcessBuilderPendingPayments(state state.BeaconState) error {
}
// builderQuorumThreshold calculates the quorum threshold for builder payments.
// Spec v1.7.0-alpha.0 (pseudocode):
// def get_builder_payment_quorum_threshold(state: BeaconState) -> uint64:
//
// <spec fn="get_builder_payment_quorum_threshold" fork="gloas" hash="a64b7ffb">
// def get_builder_payment_quorum_threshold(state: BeaconState) -> uint64:
// """
// Calculate the quorum threshold for builder payments.
// """
// per_slot_balance = get_total_active_balance(state) // SLOTS_PER_EPOCH
// quorum = per_slot_balance * BUILDER_PAYMENT_THRESHOLD_NUMERATOR
// return uint64(quorum // BUILDER_PAYMENT_THRESHOLD_DENOMINATOR)
// </spec>
// per_slot_balance = get_total_active_balance(state) // SLOTS_PER_EPOCH
// quorum = per_slot_balance * BUILDER_PAYMENT_THRESHOLD_NUMERATOR
// return uint64(quorum // BUILDER_PAYMENT_THRESHOLD_DENOMINATOR)
func builderQuorumThreshold(state state.ReadOnlyBeaconState) (primitives.Gwei, error) {
activeBalance, err := helpers.TotalActiveBalance(state)
if err != nil {

View File

@@ -11,20 +11,16 @@ import (
)
// RemoveBuilderPendingPayment removes the pending builder payment for the proposal slot.
// Spec v1.7.0 (pseudocode):
//
// <spec fn="process_proposer_slashing" fork="gloas" lines="22-32" hash="4da721ef">
// # [New in Gloas:EIP7732]
// # Remove the BuilderPendingPayment corresponding to
// # this proposal if it is still in the 2-epoch window.
// slot = header_1.slot
// proposal_epoch = compute_epoch_at_slot(slot)
// if proposal_epoch == get_current_epoch(state):
// payment_index = SLOTS_PER_EPOCH + slot % SLOTS_PER_EPOCH
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
// payment_index = SLOTS_PER_EPOCH + slot % SLOTS_PER_EPOCH
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
// elif proposal_epoch == get_previous_epoch(state):
// payment_index = slot % SLOTS_PER_EPOCH
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
// </spec>
// payment_index = slot % SLOTS_PER_EPOCH
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
func RemoveBuilderPendingPayment(st state.BeaconState, header *eth.BeaconBlockHeader) error {
proposalEpoch := slots.ToEpoch(header.Slot)
currentEpoch := time.CurrentEpoch(st)

View File

@@ -33,6 +33,7 @@ go_library(
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)

View File

@@ -1,11 +1,15 @@
package peerdas
import (
stderrors "errors"
"iter"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/container/trie"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/pkg/errors"
)
@@ -16,6 +20,7 @@ var (
ErrIndexTooLarge = errors.New("column index is larger than the specified columns count")
ErrNoKzgCommitments = errors.New("no KZG commitments found")
ErrMismatchLength = errors.New("mismatch in the length of the column, commitments or proofs")
ErrEmptySegment = errors.New("empty segment in batch")
ErrInvalidKZGProof = errors.New("invalid KZG proof")
ErrBadRootLength = errors.New("bad root length")
ErrInvalidInclusionProof = errors.New("invalid inclusion proof")
@@ -57,80 +62,113 @@ func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
return nil
}
// VerifyDataColumnsSidecarKZGProofs verifies if the KZG proofs are correct.
// CellProofBundleSegment is returned when a batch fails. The caller can call
// the `.Verify` method to verify just this segment.
type CellProofBundleSegment struct {
indices []uint64
commitments []kzg.Bytes48
cells []kzg.Cell
proofs []kzg.Bytes48
}
// Verify verifies this segment without batching.
func (s CellProofBundleSegment) Verify() error {
if len(s.cells) == 0 {
return ErrEmptySegment
}
verified, err := kzg.VerifyCellKZGProofBatch(s.commitments, s.indices, s.cells, s.proofs)
if err != nil {
return stderrors.Join(err, ErrInvalidKZGProof)
}
if !verified {
return ErrInvalidKZGProof
}
return nil
}
func VerifyDataColumnsCellsKZGProofs(sizeHint int, cellProofsIter iter.Seq[blocks.CellProofBundle]) error {
// ignore the failed segment list since we are just passing in one segment.
_, err := BatchVerifyDataColumnsCellsKZGProofs(sizeHint, []iter.Seq[blocks.CellProofBundle]{cellProofsIter})
return err
}
// BatchVerifyDataColumnsCellsKZGProofs verifies if the KZG proofs are correct.
// Note: We are slightly deviating from the specification here:
// The specification verifies the KZG proofs for each sidecar separately,
// while we are verifying all the KZG proofs from multiple sidecars in a batch.
// This is done to improve performance since the internal KZG library is way more
// efficient when verifying in batch.
// efficient when verifying in batch. If the batch fails, the failed segments
// are returned to the caller so that they may try segment by segment without
// batching. On success the failed segment list is empty.
//
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
// Compute the total count.
count := 0
for _, sidecar := range sidecars {
count += len(sidecar.Column)
}
func BatchVerifyDataColumnsCellsKZGProofs(sizeHint int, cellProofsIters []iter.Seq[blocks.CellProofBundle]) ( /* failed segment list */ []CellProofBundleSegment, error) {
commitments := make([]kzg.Bytes48, 0, sizeHint)
indices := make([]uint64, 0, sizeHint)
cells := make([]kzg.Cell, 0, sizeHint)
proofs := make([]kzg.Bytes48, 0, sizeHint)
commitments := make([]kzg.Bytes48, 0, count)
indices := make([]uint64, 0, count)
cells := make([]kzg.Cell, 0, count)
proofs := make([]kzg.Bytes48, 0, count)
for _, sidecar := range sidecars {
for i := range sidecar.Column {
var anySegmentEmpty bool
var segments []CellProofBundleSegment
for _, cellProofsIter := range cellProofsIters {
startIdx := len(cells)
for bundle := range cellProofsIter {
var (
commitment kzg.Bytes48
cell kzg.Cell
proof kzg.Bytes48
)
commitmentBytes := sidecar.KzgCommitments[i]
cellBytes := sidecar.Column[i]
proofBytes := sidecar.KzgProofs[i]
if len(commitmentBytes) != len(commitment) ||
len(cellBytes) != len(cell) ||
len(proofBytes) != len(proof) {
return ErrMismatchLength
if len(bundle.Commitment) != len(commitment) ||
len(bundle.Cell) != len(cell) ||
len(bundle.Proof) != len(proof) {
return nil, ErrMismatchLength
}
copy(commitment[:], commitmentBytes)
copy(cell[:], cellBytes)
copy(proof[:], proofBytes)
copy(commitment[:], bundle.Commitment)
copy(cell[:], bundle.Cell)
copy(proof[:], bundle.Proof)
commitments = append(commitments, commitment)
indices = append(indices, sidecar.Index)
indices = append(indices, bundle.ColumnIndex)
cells = append(cells, cell)
proofs = append(proofs, proof)
}
if len(cells[startIdx:]) == 0 {
anySegmentEmpty = true
}
segments = append(segments, CellProofBundleSegment{
indices: indices[startIdx:],
commitments: commitments[startIdx:],
cells: cells[startIdx:],
proofs: proofs[startIdx:],
})
}
if anySegmentEmpty {
return segments, ErrEmptySegment
}
// Batch verify that the cells match the corresponding commitments and proofs.
verified, err := kzg.VerifyCellKZGProofBatch(commitments, indices, cells, proofs)
if err != nil {
return errors.Wrap(err, "verify cell KZG proof batch")
return segments, stderrors.Join(err, ErrInvalidKZGProof)
}
if !verified {
return ErrInvalidKZGProof
return segments, ErrInvalidKZGProof
}
return nil
return nil, nil
}
// VerifyDataColumnSidecarInclusionProof verifies if the given KZG commitments included in the given beacon block.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_inclusion_proof
func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
if sidecar.SignedBlockHeader == nil || sidecar.SignedBlockHeader.Header == nil {
return ErrNilBlockHeader
}
root := sidecar.SignedBlockHeader.Header.BodyRoot
if len(root) != fieldparams.RootLength {
// verifyKzgCommitmentsInclusionProof is the shared implementation for inclusion proof verification.
func verifyKzgCommitmentsInclusionProof(bodyRoot []byte, kzgCommitments [][]byte, inclusionProof [][]byte) error {
if len(bodyRoot) != fieldparams.RootLength {
return ErrBadRootLength
}
leaves := blocks.LeavesFromCommitments(sidecar.KzgCommitments)
leaves := blocks.LeavesFromCommitments(kzgCommitments)
sparse, err := trie.GenerateTrieFromItems(leaves, fieldparams.LogMaxBlobCommitments)
if err != nil {
@@ -142,7 +180,7 @@ func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
return errors.Wrap(err, "hash tree root")
}
verified := trie.VerifyMerkleProof(root, hashTreeRoot[:], kzgPosition, sidecar.KzgCommitmentsInclusionProof)
verified := trie.VerifyMerkleProof(bodyRoot, hashTreeRoot[:], kzgPosition, inclusionProof)
if !verified {
return ErrInvalidInclusionProof
}
@@ -150,6 +188,31 @@ func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
return nil
}
// VerifyDataColumnSidecarInclusionProof verifies if the given KZG commitments included in the given beacon block.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_inclusion_proof
func VerifyDataColumnSidecarInclusionProof(sidecar blocks.RODataColumn) error {
if sidecar.SignedBlockHeader == nil || sidecar.SignedBlockHeader.Header == nil {
return ErrNilBlockHeader
}
return verifyKzgCommitmentsInclusionProof(
sidecar.SignedBlockHeader.Header.BodyRoot,
sidecar.KzgCommitments,
sidecar.KzgCommitmentsInclusionProof,
)
}
// VerifyPartialDataColumnHeaderInclusionProof verifies if the KZG commitments are included in the beacon block.
func VerifyPartialDataColumnHeaderInclusionProof(header *ethpb.PartialDataColumnHeader) error {
if header.SignedBlockHeader == nil || header.SignedBlockHeader.Header == nil {
return ErrNilBlockHeader
}
return verifyKzgCommitmentsInclusionProof(
header.SignedBlockHeader.Header.BodyRoot,
header.KzgCommitments,
header.KzgCommitmentsInclusionProof,
)
}
// ComputeSubnetForDataColumnSidecar computes the subnet for a data column sidecar.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#compute_subnet_for_data_column_sidecar
func ComputeSubnetForDataColumnSidecar(columnIndex uint64) uint64 {

View File

@@ -3,6 +3,7 @@ package peerdas_test
import (
"crypto/rand"
"fmt"
"iter"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
@@ -72,7 +73,7 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
sidecars := generateRandomSidecars(t, seed, blobCount)
sidecars[0].Column[0] = sidecars[0].Column[0][:len(sidecars[0].Column[0])-1] // Remove one byte to create size mismatch
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(sidecars))
require.ErrorIs(t, err, peerdas.ErrMismatchLength)
})
@@ -80,14 +81,15 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
sidecars := generateRandomSidecars(t, seed, blobCount)
sidecars[0].Column[0][0]++ // It is OK to overflow
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(sidecars))
require.ErrorIs(t, err, peerdas.ErrInvalidKZGProof)
})
t.Run("nominal", func(t *testing.T) {
sidecars := generateRandomSidecars(t, seed, blobCount)
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
failedSegments, err := peerdas.BatchVerifyDataColumnsCellsKZGProofs(blobCount, []iter.Seq[blocks.CellProofBundle]{blocks.RODataColumnsToCellProofBundles(sidecars)})
require.NoError(t, err)
require.Equal(t, 0, len(failedSegments))
})
}
@@ -273,7 +275,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_SameCommitments_NoBatch(b *testin
for _, sidecar := range sidecars {
sidecars := []blocks.RODataColumn{sidecar}
b.StartTimer()
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(sidecars))
b.StopTimer()
require.NoError(b, err)
}
@@ -308,7 +310,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch(b *testing.
}
b.StartTimer()
err := peerdas.VerifyDataColumnsSidecarKZGProofs(allSidecars)
err := peerdas.VerifyDataColumnsCellsKZGProofs(0, blocks.RODataColumnsToCellProofBundles(allSidecars))
b.StopTimer()
require.NoError(b, err)
}
@@ -341,7 +343,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch4(b *testing
for _, sidecars := range allSidecars {
b.StartTimer()
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
err := peerdas.VerifyDataColumnsCellsKZGProofs(len(allSidecars), blocks.RODataColumnsToCellProofBundles(sidecars))
b.StopTimer()
require.NoError(b, err)
}

View File

@@ -5,6 +5,7 @@ import (
"sync"
"time"
"github.com/OffchainLabs/go-bitfield"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
@@ -339,7 +340,8 @@ func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg
}
// ComputeCellsAndProofsFromStructured computes the cells and proofs from blobs and cell proofs.
func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([][]kzg.Cell, [][]kzg.Proof, error) {
// commitmentCount is required to return the correct sized bitlist even if we see a nil slice of blobsAndProofs.
func ComputeCellsAndProofsFromStructured(commitmentCount uint64, blobsAndProofs []*pb.BlobAndProofV2) (bitfield.Bitlist /* parts included */, [][]kzg.Cell, [][]kzg.Proof, error) {
start := time.Now()
defer func() {
cellsAndProofsFromStructuredComputationTime.Observe(float64(time.Since(start).Milliseconds()))
@@ -347,14 +349,24 @@ func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([
var wg errgroup.Group
cellsPerBlob := make([][]kzg.Cell, len(blobsAndProofs))
proofsPerBlob := make([][]kzg.Proof, len(blobsAndProofs))
var blobsPresent int
for _, blobAndProof := range blobsAndProofs {
if blobAndProof != nil {
blobsPresent++
}
}
cellsPerBlob := make([][]kzg.Cell, blobsPresent)
proofsPerBlob := make([][]kzg.Proof, blobsPresent)
included := bitfield.NewBitlist(commitmentCount)
var j int
for i, blobAndProof := range blobsAndProofs {
if blobAndProof == nil {
return nil, nil, ErrNilBlobAndProof
continue
}
included.SetBitAt(uint64(i), true)
compactIndex := j
wg.Go(func() error {
var kzgBlob kzg.Blob
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
@@ -381,17 +393,18 @@ func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([
kzgProofs = append(kzgProofs, kzgProof)
}
cellsPerBlob[i] = cells
proofsPerBlob[i] = kzgProofs
cellsPerBlob[compactIndex] = cells
proofsPerBlob[compactIndex] = kzgProofs
return nil
})
j++
}
if err := wg.Wait(); err != nil {
return nil, nil, err
return nil, nil, nil, err
}
return cellsPerBlob, proofsPerBlob, nil
return included, cellsPerBlob, proofsPerBlob, nil
}
// ReconstructBlobs reconstructs blobs from data column sidecars without computing KZG proofs or creating sidecars.

View File

@@ -479,8 +479,9 @@ func TestComputeCellsAndProofsFromFlat(t *testing.T) {
func TestComputeCellsAndProofsFromStructured(t *testing.T) {
t.Run("nil blob and proof", func(t *testing.T) {
_, _, err := peerdas.ComputeCellsAndProofsFromStructured([]*pb.BlobAndProofV2{nil})
require.ErrorIs(t, err, peerdas.ErrNilBlobAndProof)
included, _, _, err := peerdas.ComputeCellsAndProofsFromStructured(0, []*pb.BlobAndProofV2{nil})
require.NoError(t, err)
require.Equal(t, uint64(0), included.Count())
})
t.Run("nominal", func(t *testing.T) {
@@ -533,7 +534,8 @@ func TestComputeCellsAndProofsFromStructured(t *testing.T) {
require.NoError(t, err)
// Test ComputeCellsAndProofs
actualCellsPerBlob, actualProofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(blobsAndProofs)
included, actualCellsPerBlob, actualProofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(uint64(len(blobsAndProofs)), blobsAndProofs)
require.Equal(t, included.Count(), uint64(len(actualCellsPerBlob)))
require.NoError(t, err)
require.Equal(t, blobCount, len(actualCellsPerBlob))

View File

@@ -3,6 +3,7 @@ package peerdas
import (
"time"
"github.com/OffchainLabs/go-bitfield"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
beaconState "github.com/OffchainLabs/prysm/v7/beacon-chain/state"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
@@ -143,6 +144,40 @@ func DataColumnSidecars(cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof,
return roSidecars, nil
}
func PartialColumns(included bitfield.Bitlist, cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof, src ConstructionPopulator) ([]blocks.PartialDataColumn, error) {
start := time.Now()
const numberOfColumns = uint64(fieldparams.NumberOfColumns)
cells, proofs, err := rotateRowsToCols(cellsPerBlob, proofsPerBlob, numberOfColumns)
if err != nil {
return nil, errors.Wrap(err, "rotate cells and proofs")
}
info, err := src.extract()
if err != nil {
return nil, errors.Wrap(err, "extract block info")
}
dataColumns := make([]blocks.PartialDataColumn, 0, numberOfColumns)
for idx := range numberOfColumns {
dc, err := blocks.NewPartialDataColumn(info.signedBlockHeader, idx, info.kzgCommitments, info.kzgInclusionProof)
if err != nil {
return nil, errors.Wrap(err, "new ro data column")
}
for i := range len(info.kzgCommitments) {
if !included.BitAt(uint64(i)) {
continue
}
dc.ExtendFromVerfifiedCell(uint64(i), cells[idx][0], proofs[idx][0])
cells[idx] = cells[idx][1:]
proofs[idx] = proofs[idx][1:]
}
dataColumns = append(dataColumns, dc)
}
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
return dataColumns, nil
}
// Slot returns the slot of the source
func (s *BlockReconstructionSource) Slot() primitives.Slot {
return s.Block().Slot()

View File

@@ -143,11 +143,10 @@ func ProcessSlot(ctx context.Context, state state.BeaconState) (state.BeaconStat
return nil, err
}
// <spec fn="process_slot" fork="gloas" lines="11-13" hash="62b28839">
// Spec v1.6.1 (pseudocode):
// # [New in Gloas:EIP7732]
// # Unset the next payload availability
// state.execution_payload_availability[(state.slot + 1) % SLOTS_PER_HISTORICAL_ROOT] = 0b0
// </spec>
if state.Version() >= version.Gloas {
index := uint64((state.Slot() + 1) % params.BeaconConfig().SlotsPerHistoricalRoot)
if err := state.UpdateExecutionPayloadAvailabilityAtIndex(index, 0x0); err != nil {

View File

@@ -78,7 +78,7 @@ func newGloasState(t *testing.T, slot primitives.Slot, availability []byte) stat
BlockHash: make([]byte, 32),
PrevRandao: make([]byte, 32),
FeeRecipient: make([]byte, 20),
BlobKzgCommitments: [][]byte{make([]byte, 48)},
BlobKzgCommitmentsRoot: make([]byte, 32),
},
Eth1Data: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),

View File

@@ -2,7 +2,6 @@ package kv
import (
"context"
"slices"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
@@ -34,9 +33,6 @@ func (s *Store) LastArchivedRoot(ctx context.Context) [32]byte {
if err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(stateSlotIndicesBucket)
_, blockRoot = bkt.Cursor().Last()
if len(blockRoot) > 0 {
blockRoot = slices.Clone(blockRoot)
}
return nil
}); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
panic(err) // lint:nopanic -- View never returns an error.
@@ -55,9 +51,6 @@ func (s *Store) ArchivedPointRoot(ctx context.Context, slot primitives.Slot) [32
if err := s.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(stateSlotIndicesBucket)
blockRoot = bucket.Get(bytesutil.SlotToBytesBigEndian(slot))
if len(blockRoot) > 0 {
blockRoot = slices.Clone(blockRoot)
}
return nil
}); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
panic(err) // lint:nopanic -- View never returns an error.

View File

@@ -812,10 +812,7 @@ func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id primitives.Val
var addr []byte
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(feeRecipientBucket)
stored := bkt.Get(bytesutil.Uint64ToBytesBigEndian(uint64(id)))
if len(stored) > 0 {
addr = slices.Clone(stored)
}
addr = bkt.Get(bytesutil.Uint64ToBytesBigEndian(uint64(id)))
// IF the fee recipient is not found in the standard fee recipient bucket, then
// check the registration bucket. The fee recipient may be there.
// This is to resolve imcompatility until we fully migrate to the registration bucket.
@@ -829,7 +826,7 @@ func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id primitives.Val
if err := decode(ctx, enc, reg); err != nil {
return err
}
addr = slices.Clone(reg.FeeRecipient)
addr = reg.FeeRecipient
}
return nil
})

View File

@@ -3,7 +3,6 @@ package kv
import (
"context"
"fmt"
"slices"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
"github.com/ethereum/go-ethereum/common"
@@ -18,10 +17,7 @@ func (s *Store) DepositContractAddress(ctx context.Context) ([]byte, error) {
var addr []byte
if err := s.db.View(func(tx *bolt.Tx) error {
chainInfo := tx.Bucket(chainMetadataBucket)
stored := chainInfo.Get(depositContractAddressKey)
if len(stored) > 0 {
addr = slices.Clone(stored)
}
addr = chainInfo.Get(depositContractAddressKey)
return nil
}); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
panic(err) // lint:nopanic -- View never returns an error.

View File

@@ -199,7 +199,7 @@ func performValidatorStateMigration(ctx context.Context, bar *progressbar.Progre
func stateBucketKeys(stateBucket *bolt.Bucket) ([][]byte, error) {
var keys [][]byte
if err := stateBucket.ForEach(func(pubKey, v []byte) error {
keys = append(keys, bytes.Clone(pubKey))
keys = append(keys, pubKey)
return nil
}); err != nil {
return nil, err

View File

@@ -2,7 +2,6 @@ package kv
import (
"context"
"slices"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
@@ -188,23 +187,20 @@ func (s *Store) getDiff(lvl int, slot uint64) (hdiff.HdiffBytes, error) {
return bolt.ErrBucketNotFound
}
buf := append(key, stateSuffix...)
rawStateDiff := bucket.Get(buf)
if len(rawStateDiff) == 0 {
stateDiff = bucket.Get(buf)
if stateDiff == nil {
return errors.New("state diff not found")
}
stateDiff = slices.Clone(rawStateDiff)
buf = append(key, validatorSuffix...)
rawValidatorDiff := bucket.Get(buf)
if len(rawValidatorDiff) == 0 {
validatorDiff = bucket.Get(buf)
if validatorDiff == nil {
return errors.New("validator diff not found")
}
validatorDiff = slices.Clone(rawValidatorDiff)
buf = append(key, balancesSuffix...)
rawBalancesDiff := bucket.Get(buf)
if len(rawBalancesDiff) == 0 {
balancesDiff = bucket.Get(buf)
if balancesDiff == nil {
return errors.New("balances diff not found")
}
balancesDiff = slices.Clone(rawBalancesDiff)
return nil
})
@@ -228,11 +224,10 @@ func (s *Store) getFullSnapshot(slot uint64) (state.BeaconState, error) {
if bucket == nil {
return bolt.ErrBucketNotFound
}
rawEnc := bucket.Get(key)
if rawEnc == nil {
enc = bucket.Get(key)
if enc == nil {
return errors.New("state not found")
}
enc = slices.Clone(rawEnc)
return nil
})

View File

@@ -2,7 +2,6 @@ package kv
import (
"context"
"slices"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
@@ -48,11 +47,7 @@ func (s *Store) StateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.St
}
var enc []byte
if err := s.db.View(func(tx *bolt.Tx) error {
rawEnc := tx.Bucket(stateSummaryBucket).Get(blockRoot[:])
if len(rawEnc) == 0 {
return nil
}
enc = slices.Clone(rawEnc)
enc = tx.Bucket(stateSummaryBucket).Get(blockRoot[:])
return nil
}); err != nil {
return nil, err

View File

@@ -73,6 +73,7 @@ go_library(
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_k8s_client_go//tools/cache:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",

View File

@@ -7,6 +7,7 @@ import (
"strings"
"time"
"github.com/OffchainLabs/go-bitfield"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/execution/types"
@@ -58,6 +59,7 @@ var (
fuluEngineEndpoints = []string{
GetPayloadMethodV5,
GetBlobsV2,
GetBlobsV3,
}
)
@@ -99,6 +101,8 @@ const (
GetBlobsV1 = "engine_getBlobsV1"
// GetBlobsV2 request string for JSON-RPC.
GetBlobsV2 = "engine_getBlobsV2"
// GetBlobsV3 request string for JSON-RPC.
GetBlobsV3 = "engine_getBlobsV3"
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
defaultEngineTimeout = time.Second
)
@@ -122,7 +126,7 @@ type Reconstructor interface {
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
) ([]interfaces.SignedBeaconBlock, error)
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error)
ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, []blocks.PartialDataColumn, error)
}
// EngineCaller defines a client that can interact with an Ethereum
@@ -553,6 +557,22 @@ func (s *Service) GetBlobsV2(ctx context.Context, versionedHashes []common.Hash)
return result, handleRPCError(err)
}
func (s *Service) GetBlobsV3(ctx context.Context, versionedHashes []common.Hash) ([]*pb.BlobAndProofV2, error) {
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobsV3")
defer span.End()
start := time.Now()
if !s.capabilityCache.has(GetBlobsV3) {
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV3))
}
getBlobsV3RequestsTotal.Inc()
result := make([]*pb.BlobAndProofV2, len(versionedHashes))
err := s.rpcClient.CallContext(ctx, &result, GetBlobsV3, versionedHashes)
getBlobsV3Latency.Observe(time.Since(start).Seconds())
return result, handleRPCError(err)
}
// ReconstructFullBlock takes in a blinded beacon block and reconstructs
// a beacon block with a full execution payload via the engine API.
func (s *Service) ReconstructFullBlock(
@@ -663,40 +683,47 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
return verifiedBlobs, nil
}
func (s *Service) ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error) {
func (s *Service) ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, []blocks.PartialDataColumn, error) {
root := populator.Root()
// Fetch cells and proofs from the execution client using the KZG commitments from the sidecar.
commitments, err := populator.Commitments()
if err != nil {
return nil, wrapWithBlockRoot(err, root, "commitments")
return nil, nil, wrapWithBlockRoot(err, root, "commitments")
}
cellsPerBlob, proofsPerBlob, err := s.fetchCellsAndProofsFromExecution(ctx, commitments)
included, cellsPerBlob, proofsPerBlob, err := s.fetchCellsAndProofsFromExecution(ctx, commitments)
log.Info("Received cells and proofs from execution client", "included", included, "cells count", len(cellsPerBlob), "err", err)
if err != nil {
return nil, wrapWithBlockRoot(err, root, "fetch cells and proofs from execution client")
return nil, nil, wrapWithBlockRoot(err, root, "fetch cells and proofs from execution client")
}
// Return early if nothing is returned from the EL.
if len(cellsPerBlob) == 0 {
return nil, nil
partialColumns, err := peerdas.PartialColumns(included, cellsPerBlob, proofsPerBlob, populator)
haveAllBlobs := included.Count() == uint64(len(commitments))
log.Info("Constructed partial columns", "haveAllBlobs", haveAllBlobs)
if haveAllBlobs {
// Construct data column sidears from the signed block and cells and proofs.
roSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, populator)
if err != nil {
return nil, nil, wrapWithBlockRoot(err, populator.Root(), "data column sidcars from column sidecar")
}
// Upgrade the sidecars to verified sidecars.
// We trust the execution layer we are connected to, so we can upgrade the sidecar into a verified one.
verifiedROSidecars := upgradeSidecarsToVerifiedSidecars(roSidecars)
return verifiedROSidecars, partialColumns, nil
}
// Construct data column sidears from the signed block and cells and proofs.
roSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, populator)
if err != nil {
return nil, wrapWithBlockRoot(err, populator.Root(), "data column sidcars from column sidecar")
return nil, nil, wrapWithBlockRoot(err, populator.Root(), "partial columns from column sidecar")
}
// Upgrade the sidecars to verified sidecars.
// We trust the execution layer we are connected to, so we can upgrade the sidecar into a verified one.
verifiedROSidecars := upgradeSidecarsToVerifiedSidecars(roSidecars)
return verifiedROSidecars, nil
return nil, partialColumns, nil
}
// fetchCellsAndProofsFromExecution fetches cells and proofs from the execution client (using engine_getBlobsV2 execution API method)
func (s *Service) fetchCellsAndProofsFromExecution(ctx context.Context, kzgCommitments [][]byte) ([][]kzg.Cell, [][]kzg.Proof, error) {
func (s *Service) fetchCellsAndProofsFromExecution(ctx context.Context, kzgCommitments [][]byte) (bitfield.Bitlist /* included parts */, [][]kzg.Cell, [][]kzg.Proof, error) {
// Collect KZG hashes for all blobs.
versionedHashes := make([]common.Hash, 0, len(kzgCommitments))
for _, commitment := range kzgCommitments {
@@ -704,24 +731,34 @@ func (s *Service) fetchCellsAndProofsFromExecution(ctx context.Context, kzgCommi
versionedHashes = append(versionedHashes, versionedHash)
}
var blobAndProofs []*pb.BlobAndProofV2
// Fetch all blobsAndCellsProofs from the execution client.
blobAndProofV2s, err := s.GetBlobsV2(ctx, versionedHashes)
if err != nil {
return nil, nil, errors.Wrapf(err, "get blobs V2")
var err error
useV3 := s.capabilityCache.has(GetBlobsV3)
if useV3 {
// v3 can return a partial response. V2 is all or nothing
blobAndProofs, err = s.GetBlobsV3(ctx, versionedHashes)
} else {
blobAndProofs, err = s.GetBlobsV2(ctx, versionedHashes)
}
// Return early if nothing is returned from the EL.
if len(blobAndProofV2s) == 0 {
return nil, nil, nil
if err != nil {
return nil, nil, nil, errors.Wrapf(err, "get blobs V2/3")
}
// Compute cells and proofs from the blobs and cell proofs.
cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(blobAndProofV2s)
included, cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(uint64(len(kzgCommitments)), blobAndProofs)
if err != nil {
return nil, nil, errors.Wrap(err, "compute cells and proofs")
return nil, nil, nil, errors.Wrap(err, "compute cells and proofs")
}
if included.Count() == uint64(len(kzgCommitments)) {
getBlobsV3CompleteResponsesTotal.Inc()
} else if included.Count() > 0 {
getBlobsV3PartialResponsesTotal.Inc()
}
return cellsPerBlob, proofsPerBlob, nil
return included, cellsPerBlob, proofsPerBlob, nil
}
// upgradeSidecarsToVerifiedSidecars upgrades a list of data column sidecars into verified data column sidecars.

View File

@@ -2587,7 +2587,7 @@ func TestConstructDataColumnSidecars(t *testing.T) {
ctx := context.Background()
t.Run("GetBlobsV2 is not supported", func(t *testing.T) {
_, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
_, _, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
require.ErrorContains(t, "engine_getBlobsV2 is not supported", err)
})
@@ -2598,7 +2598,7 @@ func TestConstructDataColumnSidecars(t *testing.T) {
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
defer rpcClient.Close()
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
dataColumns, _, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
require.NoError(t, err)
require.Equal(t, 0, len(dataColumns))
})
@@ -2611,7 +2611,7 @@ func TestConstructDataColumnSidecars(t *testing.T) {
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
defer rpcClient.Close()
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
dataColumns, _, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
require.NoError(t, err)
require.Equal(t, 128, len(dataColumns))
})

View File

@@ -34,6 +34,25 @@ var (
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
},
)
getBlobsV3RequestsTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "beacon_engine_getBlobsV3_requests_total",
Help: "Total number of engine_getBlobsV3 requests sent",
})
getBlobsV3CompleteResponsesTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "beacon_engine_getBlobsV3_complete_responses_total",
Help: "Total number of complete engine_getBlobsV3 successful responses received",
})
getBlobsV3PartialResponsesTotal = promauto.NewCounter(prometheus.CounterOpts{
Name: "beacon_engine_getBlobsV3_partial_responses_total",
Help: "Total number of engine_getBlobsV3 partial responses received",
})
getBlobsV3Latency = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "beacon_engine_getBlobsV3_request_duration_seconds",
Help: "Duration of engine_getBlobsV3 requests in seconds",
Buckets: []float64{0.025, 0.05, 0.1, 0.2, 0.5, 1, 2, 4},
},
)
errParseCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "execution_parse_error_count",
Help: "The number of errors that occurred while parsing execution payload",

View File

@@ -118,8 +118,8 @@ func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadO
}
// ConstructDataColumnSidecars is a mock implementation of the ConstructDataColumnSidecars method.
func (e *EngineClient) ConstructDataColumnSidecars(context.Context, peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error) {
return e.DataColumnSidecars, e.ErrorDataColumnSidecars
func (e *EngineClient) ConstructDataColumnSidecars(context.Context, peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, []blocks.PartialDataColumn, error) {
return e.DataColumnSidecars, nil, e.ErrorDataColumnSidecars
}
// GetTerminalBlockHash --

View File

@@ -20,7 +20,6 @@ go_library(
"//config/fieldparams:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/forkchoice:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],

View File

@@ -6,7 +6,7 @@ go_library(
"doc.go",
"errors.go",
"forkchoice.go",
"gloas.go",
"last_root.go",
"log.go",
"metrics.go",
"node.go",
@@ -33,7 +33,6 @@ go_library(
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/forkchoice:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//monitoring/tracing/trace:go_default_library",
@@ -52,7 +51,7 @@ go_test(
srcs = [
"ffg_update_test.go",
"forkchoice_test.go",
"gloas_test.go",
"last_root_test.go",
"no_vote_test.go",
"node_test.go",
"on_tick_test.go",
@@ -72,7 +71,6 @@ go_test(
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/forkchoice:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",

View File

@@ -31,8 +31,8 @@ func New() *ForkChoice {
prevJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
proposerBoostRoot: [32]byte{},
emptyNodeByRoot: make(map[[fieldparams.RootLength]byte]*PayloadNode),
fullNodeByRoot: make(map[[fieldparams.RootLength]byte]*PayloadNode),
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
slashedIndices: make(map[primitives.ValidatorIndex]bool),
receivedBlocksLastEpoch: [fieldparams.SlotsPerEpoch]primitives.Slot{},
}
@@ -44,7 +44,7 @@ func New() *ForkChoice {
// NodeCount returns the current number of nodes in the Store.
func (f *ForkChoice) NodeCount() int {
return len(f.store.emptyNodeByRoot)
return len(f.store.nodeByRoot)
}
// Head returns the head root from fork choice store.
@@ -65,14 +65,14 @@ func (f *ForkChoice) Head(
return [32]byte{}, errors.Wrap(err, "could not apply proposer boost score")
}
if err := f.store.applyWeightChangesConsensusNode(ctx, f.store.treeRootNode); err != nil {
if err := f.store.treeRootNode.applyWeightChanges(ctx); err != nil {
return [32]byte{}, errors.Wrap(err, "could not apply weight changes")
}
jc := f.JustifiedCheckpoint()
fc := f.FinalizedCheckpoint()
currentEpoch := slots.EpochsSinceGenesis(f.store.genesisTime)
if err := f.store.updateBestDescendantConsensusNode(ctx, f.store.treeRootNode, jc.Epoch, fc.Epoch, currentEpoch); err != nil {
if err := f.store.treeRootNode.updateBestDescendant(ctx, jc.Epoch, fc.Epoch, currentEpoch); err != nil {
return [32]byte{}, errors.Wrap(err, "could not update best descendant")
}
return f.store.head(ctx)
@@ -119,14 +119,14 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, ro
return errInvalidNilCheckpoint
}
finalizedEpoch := fc.Epoch
pn, err := f.store.insert(ctx, roblock, justifiedEpoch, finalizedEpoch)
node, err := f.store.insert(ctx, roblock, justifiedEpoch, finalizedEpoch)
if err != nil {
return err
}
jc, fc = f.store.pullTips(state, pn.node, jc, fc)
jc, fc = f.store.pullTips(state, node, jc, fc)
if err := f.updateCheckpoints(ctx, jc, fc); err != nil {
_, remErr := f.store.removeNode(ctx, pn)
_, remErr := f.store.removeNode(ctx, node)
if remErr != nil {
log.WithError(remErr).Error("Could not remove node")
}
@@ -149,63 +149,49 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
if fc.Epoch <= f.store.finalizedCheckpoint.Epoch {
return nil
}
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{
Epoch: fc.Epoch,
Root: bytesutil.ToBytes32(fc.Root),
}
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: fc.Epoch,
Root: bytesutil.ToBytes32(fc.Root)}
return f.store.prune(ctx)
}
// HasNode returns true if the node exists in fork choice store,
// false else wise.
func (f *ForkChoice) HasNode(root [32]byte) bool {
_, ok := f.store.emptyNodeByRoot[root]
_, ok := f.store.nodeByRoot[root]
return ok
}
// IsCanonical returns true if the given root is part of the canonical chain.
func (f *ForkChoice) IsCanonical(root [32]byte) bool {
// It is fine to pick empty node here since we only check if the beacon block is canonical.
pn, ok := f.store.emptyNodeByRoot[root]
if !ok || pn == nil {
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return false
}
if pn.node.bestDescendant == nil {
// The node doesn't have any children
if node.bestDescendant == nil {
if f.store.headNode.bestDescendant == nil {
// headNode is itself head.
return pn.node == f.store.headNode
return node == f.store.headNode
}
// headNode is not actualized and there are some descendants
return pn.node == f.store.headNode.bestDescendant
return node == f.store.headNode.bestDescendant
}
// The node has children
if f.store.headNode.bestDescendant == nil {
return pn.node.bestDescendant == f.store.headNode
return node.bestDescendant == f.store.headNode
}
return pn.node.bestDescendant == f.store.headNode.bestDescendant
return node.bestDescendant == f.store.headNode.bestDescendant
}
// IsOptimistic returns true if the given root has been optimistically synced.
// TODO: Gloas, the current implementation uses the result of the full block for
// the given root. In gloas this would be incorrect and we should specify the
// payload content, thus we should expose a full/empty version of this call.
func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
if f.store.allTipsAreInvalid {
return true, nil
}
en, ok := f.store.emptyNodeByRoot[root]
if !ok || en == nil {
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return true, ErrNilNode
}
fn := f.store.fullNodeByRoot[root]
if fn != nil {
return fn.optimistic, nil
}
return en.optimistic, nil
return node.optimistic, nil
}
// AncestorRoot returns the ancestor root of input block root at a given slot.
@@ -213,21 +199,17 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot primi
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.AncestorRoot")
defer span.End()
pn, ok := f.store.emptyNodeByRoot[root]
if !ok || pn == nil {
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return [32]byte{}, errors.Wrap(ErrNilNode, "could not determine ancestor root")
}
n := pn.node
for n.slot > slot {
n := node
for n != nil && n.slot > slot {
if ctx.Err() != nil {
return [32]byte{}, ctx.Err()
}
if n.parent == nil {
n = nil
break
}
n = n.parent.node
n = n.parent
}
if n == nil {
@@ -240,11 +222,10 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot primi
// IsViableForCheckpoint returns whether the root passed is a checkpoint root for any
// known chain in forkchoice.
func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
pn, ok := f.store.emptyNodeByRoot[cp.Root]
if !ok || pn == nil {
node, ok := f.store.nodeByRoot[cp.Root]
if !ok || node == nil {
return false, nil
}
node := pn.node
epochStart, err := slots.EpochStart(cp.Epoch)
if err != nil {
return false, err
@@ -253,13 +234,10 @@ func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool
return false, nil
}
// If it's the start of the epoch, it is a checkpoint
if node.slot == epochStart {
if len(node.children) == 0 {
return true, nil
}
// If there are no descendants of this beacon block, it is is viable as a checkpoint
children := f.store.allConsensusChildren(node)
if len(children) == 0 {
if node.slot == epochStart {
return true, nil
}
if !features.Get().IgnoreUnviableAttestations {
@@ -269,8 +247,7 @@ func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool
return true, nil
}
}
// If some child is after the start of the epoch, the checkpoint is viable.
for _, child := range children {
for _, child := range node.children {
if child.slot > epochStart {
return true, nil
}
@@ -311,7 +288,7 @@ func (f *ForkChoice) updateBalances() error {
if vote.currentRoot != vote.nextRoot || oldBalance != newBalance {
// Ignore the vote if the root is not in fork choice
// store, that means we have not seen the block before.
nextNode, ok := f.store.emptyNodeByRoot[vote.nextRoot]
nextNode, ok := f.store.nodeByRoot[vote.nextRoot]
if ok && vote.nextRoot != zHash {
// Protection against nil node
if nextNode == nil {
@@ -320,7 +297,7 @@ func (f *ForkChoice) updateBalances() error {
nextNode.balance += newBalance
}
currentNode, ok := f.store.emptyNodeByRoot[vote.currentRoot]
currentNode, ok := f.store.nodeByRoot[vote.currentRoot]
if ok && vote.currentRoot != zHash {
// Protection against nil node
if currentNode == nil {
@@ -361,13 +338,13 @@ func (f *ForkChoice) ProposerBoost() [fieldparams.RootLength]byte {
return f.store.proposerBoost()
}
// SetOptimisticToValid sets the node with the given root as a fully validated node. The payload for this root MUST have been processed.
// SetOptimisticToValid sets the node with the given root as a fully validated node
func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [fieldparams.RootLength]byte) error {
fn, ok := f.store.fullNodeByRoot[root]
if !ok || fn == nil {
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
return errors.Wrap(ErrNilNode, "could not set node to valid")
}
return f.store.setNodeAndParentValidated(ctx, fn)
return node.setNodeAndParentValidated(ctx)
}
// PreviousJustifiedCheckpoint of fork choice store.
@@ -386,8 +363,8 @@ func (f *ForkChoice) FinalizedCheckpoint() *forkchoicetypes.Checkpoint {
}
// SetOptimisticToInvalid removes a block with an invalid execution payload from fork choice store
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, parentRoot, parentHash, payloadHash [fieldparams.RootLength]byte) ([][32]byte, error) {
return f.store.setOptimisticToInvalid(ctx, root, parentRoot, parentHash, payloadHash)
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, parentRoot, payloadHash [fieldparams.RootLength]byte) ([][32]byte, error) {
return f.store.setOptimisticToInvalid(ctx, root, parentRoot, payloadHash)
}
// InsertSlashedIndex adds the given slashed validator index to the
@@ -410,7 +387,7 @@ func (f *ForkChoice) InsertSlashedIndex(_ context.Context, index primitives.Vali
return
}
node, ok := f.store.emptyNodeByRoot[f.votes[index].currentRoot]
node, ok := f.store.nodeByRoot[f.votes[index].currentRoot]
if !ok || node == nil {
return
}
@@ -445,30 +422,22 @@ func (f *ForkChoice) UpdateFinalizedCheckpoint(fc *forkchoicetypes.Checkpoint) e
}
// CommonAncestor returns the common ancestor root and slot between the two block roots r1 and r2.
// This is payload aware. Consider the following situation
// [A,full] <--- [B, full] <---[C,pending]
//
// \---------[B, empty] <--[D, pending]
//
// Then even though C and D both descend from the beacon block B, their common ancestor is A.
// Notice that also this function **requires** that the two roots are actually contending blocks! otherwise the
// behavior is not defined.
func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, primitives.Slot, error) {
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.CommonAncestorRoot")
defer span.End()
en1, ok := f.store.emptyNodeByRoot[r1]
if !ok || en1 == nil {
n1, ok := f.store.nodeByRoot[r1]
if !ok || n1 == nil {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
}
// Do nothing if the input roots are the same.
if r1 == r2 {
return r1, en1.node.slot, nil
return r1, n1.slot, nil
}
en2, ok := f.store.emptyNodeByRoot[r2]
if !ok || en2 == nil {
n2, ok := f.store.nodeByRoot[r2]
if !ok || n2 == nil {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
}
@@ -476,23 +445,23 @@ func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byt
if ctx.Err() != nil {
return [32]byte{}, 0, ctx.Err()
}
if en1.node.slot > en2.node.slot {
en1 = en1.node.parent
if n1.slot > n2.slot {
n1 = n1.parent
// Reaches the end of the tree and unable to find common ancestor.
// This should not happen at runtime as the finalized
// node has to be a common ancestor
if en1 == nil {
if n1 == nil {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
}
} else {
en2 = en2.node.parent
n2 = n2.parent
// Reaches the end of the tree and unable to find common ancestor.
if en2 == nil {
if n2 == nil {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
}
}
if en1 == en2 {
return en1.node.root, en1.node.slot, nil
if n1 == n2 {
return n1.root, n1.slot, nil
}
}
}
@@ -539,17 +508,35 @@ func (f *ForkChoice) CachedHeadRoot() [32]byte {
// FinalizedPayloadBlockHash returns the hash of the payload at the finalized checkpoint
func (f *ForkChoice) FinalizedPayloadBlockHash() [32]byte {
return f.store.latestHashForRoot(f.FinalizedCheckpoint().Root)
root := f.FinalizedCheckpoint().Root
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
// This should not happen
return [32]byte{}
}
return node.payloadHash
}
// JustifiedPayloadBlockHash returns the hash of the payload at the justified checkpoint
func (f *ForkChoice) JustifiedPayloadBlockHash() [32]byte {
return f.store.latestHashForRoot(f.JustifiedCheckpoint().Root)
root := f.JustifiedCheckpoint().Root
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
// This should not happen
return [32]byte{}
}
return node.payloadHash
}
// UnrealizedJustifiedPayloadBlockHash returns the hash of the payload at the unrealized justified checkpoint
func (f *ForkChoice) UnrealizedJustifiedPayloadBlockHash() [32]byte {
return f.store.latestHashForRoot(f.store.unrealizedJustifiedCheckpoint.Root)
root := f.store.unrealizedJustifiedCheckpoint.Root
node, ok := f.store.nodeByRoot[root]
if !ok || node == nil {
// This should not happen
return [32]byte{}
}
return node.payloadHash
}
// ForkChoiceDump returns a full dump of forkchoice.
@@ -573,7 +560,7 @@ func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*forkchoice2.Dump, err
nodes := make([]*forkchoice2.Node, 0, f.NodeCount())
var err error
if f.store.treeRootNode != nil {
nodes, err = f.store.nodeTreeDump(ctx, f.store.treeRootNode, nodes)
nodes, err = f.store.treeRootNode.nodeTreeDump(ctx, nodes)
if err != nil {
return nil, err
}
@@ -602,7 +589,7 @@ func (f *ForkChoice) SetBalancesByRooter(handler forkchoice.BalancesByRooter) {
// Weight returns the weight of the given root if found on the store
func (f *ForkChoice) Weight(root [32]byte) (uint64, error) {
n, ok := f.store.emptyNodeByRoot[root]
n, ok := f.store.nodeByRoot[root]
if !ok || n == nil {
return 0, ErrNilNode
}
@@ -630,11 +617,11 @@ func (f *ForkChoice) updateJustifiedBalances(ctx context.Context, root [32]byte)
// Slot returns the slot of the given root if it's known to forkchoice
func (f *ForkChoice) Slot(root [32]byte) (primitives.Slot, error) {
n, ok := f.store.emptyNodeByRoot[root]
n, ok := f.store.nodeByRoot[root]
if !ok || n == nil {
return 0, ErrNilNode
}
return n.node.slot, nil
return n.slot, nil
}
// DependentRoot returns the last root of the epoch prior to the requested ecoch in the canonical chain.
@@ -642,7 +629,7 @@ func (f *ForkChoice) DependentRoot(epoch primitives.Epoch) ([32]byte, error) {
return f.DependentRootForEpoch(f.CachedHeadRoot(), epoch)
}
// DependentRootForEpoch return the last root of the epoch prior to the requested epoch for the given root.
// DependentRootForEpoch return the last root of the epoch prior to the requested ecoch for the given root.
func (f *ForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
tr, err := f.TargetRootForEpoch(root, epoch)
if err != nil {
@@ -651,18 +638,18 @@ func (f *ForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch
if tr == [32]byte{} {
return [32]byte{}, nil
}
en, ok := f.store.emptyNodeByRoot[tr]
if !ok || en == nil {
node, ok := f.store.nodeByRoot[tr]
if !ok || node == nil {
return [32]byte{}, ErrNilNode
}
if slots.ToEpoch(en.node.slot) >= epoch {
if en.node.parent != nil {
en = en.node.parent
if slots.ToEpoch(node.slot) >= epoch {
if node.parent != nil {
node = node.parent
} else {
return f.store.finalizedDependentRoot, nil
}
}
return en.node.root, nil
return node.root, nil
}
// TargetRootForEpoch returns the root of the target block for a given epoch.
@@ -674,48 +661,46 @@ func (f *ForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch
// which case we return the root of the checkpoint of the chain containing the
// passed root, at the given epoch
func (f *ForkChoice) TargetRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
n, ok := f.store.emptyNodeByRoot[root]
n, ok := f.store.nodeByRoot[root]
if !ok || n == nil {
return [32]byte{}, ErrNilNode
}
node := n.node
nodeEpoch := slots.ToEpoch(node.slot)
nodeEpoch := slots.ToEpoch(n.slot)
if epoch > nodeEpoch {
return node.root, nil
return n.root, nil
}
if node.target == nil {
if n.target == nil {
return [32]byte{}, nil
}
targetRoot := node.target.root
targetRoot := n.target.root
if epoch == nodeEpoch {
return targetRoot, nil
}
targetNode, ok := f.store.emptyNodeByRoot[targetRoot]
targetNode, ok := f.store.nodeByRoot[targetRoot]
if !ok || targetNode == nil {
return [32]byte{}, ErrNilNode
}
// If slot 0 was not missed we consider a previous block to go back at least one epoch
if nodeEpoch == slots.ToEpoch(targetNode.node.slot) {
targetNode = targetNode.node.parent
if nodeEpoch == slots.ToEpoch(targetNode.slot) {
targetNode = targetNode.parent
if targetNode == nil {
return [32]byte{}, ErrNilNode
}
}
return f.TargetRootForEpoch(targetNode.node.root, epoch)
return f.TargetRootForEpoch(targetNode.root, epoch)
}
// ParentRoot returns the block root of the parent node if it is in forkchoice.
// The exception is for the finalized checkpoint root which we return the zero
// hash.
func (f *ForkChoice) ParentRoot(root [32]byte) ([32]byte, error) {
n, ok := f.store.emptyNodeByRoot[root]
n, ok := f.store.nodeByRoot[root]
if !ok || n == nil {
return [32]byte{}, ErrNilNode
}
// Return the zero hash for the tree root
parent := n.node.parent
if parent == nil {
if n.parent == nil {
return [32]byte{}, nil
}
return parent.node.root, nil
return n.parent.root, nil
}

View File

@@ -3,6 +3,7 @@ package doublylinkedtree
import (
"context"
"encoding/binary"
"errors"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice"
@@ -103,9 +104,9 @@ func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
f.justifiedBalances = []uint64{10, 20, 30}
require.NoError(t, f.updateBalances())
s := f.store
assert.Equal(t, uint64(10), s.emptyNodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(20), s.emptyNodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(30), s.emptyNodeByRoot[indexToHash(3)].balance)
assert.Equal(t, uint64(10), s.nodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(20), s.nodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(30), s.nodeByRoot[indexToHash(3)].balance)
}
func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
@@ -121,9 +122,9 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
s := f.store
s.emptyNodeByRoot[indexToHash(1)].balance = 100
s.emptyNodeByRoot[indexToHash(2)].balance = 100
s.emptyNodeByRoot[indexToHash(3)].balance = 100
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
f.balances = []uint64{100, 100, 100}
f.votes = []Vote{
@@ -134,9 +135,9 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
f.justifiedBalances = []uint64{10, 20, 30}
require.NoError(t, f.updateBalances())
assert.Equal(t, uint64(10), s.emptyNodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(20), s.emptyNodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(30), s.emptyNodeByRoot[indexToHash(3)].balance)
assert.Equal(t, uint64(10), s.nodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(20), s.nodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(30), s.nodeByRoot[indexToHash(3)].balance)
}
func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
@@ -152,9 +153,9 @@ func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
s := f.store
s.emptyNodeByRoot[indexToHash(1)].balance = 100
s.emptyNodeByRoot[indexToHash(2)].balance = 100
s.emptyNodeByRoot[indexToHash(3)].balance = 100
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
f.balances = []uint64{125, 125, 125}
f.votes = []Vote{
@@ -165,9 +166,9 @@ func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
f.justifiedBalances = []uint64{10, 20, 30}
require.NoError(t, f.updateBalances())
assert.Equal(t, uint64(0), s.emptyNodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(0), s.emptyNodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(5), s.emptyNodeByRoot[indexToHash(3)].balance)
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(1)].balance)
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(2)].balance)
assert.Equal(t, uint64(5), s.nodeByRoot[indexToHash(3)].balance)
}
func TestForkChoice_IsCanonical(t *testing.T) {
@@ -223,12 +224,12 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
f.store.emptyNodeByRoot[[32]byte{'3'}].balance = 10
require.NoError(t, f.store.applyWeightChangesConsensusNode(ctx, f.store.treeRootNode))
require.Equal(t, uint64(10), f.store.emptyNodeByRoot[[32]byte{'1'}].node.weight)
require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'2'}].node.weight)
f.store.nodeByRoot[[32]byte{'3'}].balance = 10
require.NoError(t, f.store.treeRootNode.applyWeightChanges(ctx))
require.Equal(t, uint64(10), f.store.nodeByRoot[[32]byte{'1'}].weight)
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'2'}].weight)
require.NoError(t, f.store.updateBestDescendantConsensusNode(ctx, f.store.treeRootNode, 1, 1, 1))
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
require.DeepEqual(t, [32]byte{'3'}, f.store.treeRootNode.bestDescendant.root)
r1 := [32]byte{'1'}
@@ -259,7 +260,7 @@ func TestForkChoice_AncestorRoot(t *testing.T) {
st, roblock, err = prepareForkchoiceState(ctx, 5, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
f.store.treeRootNode = f.store.emptyNodeByRoot[indexToHash(1)].node
f.store.treeRootNode = f.store.nodeByRoot[indexToHash(1)]
f.store.treeRootNode.parent = nil
r, err := f.AncestorRoot(ctx, indexToHash(3), 6)
@@ -341,21 +342,21 @@ func TestForkChoice_RemoveEquivocating(t *testing.T) {
// Process b's slashing, c is now head
f.InsertSlashedIndex(ctx, 1)
require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].balance)
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].balance)
f.justifiedBalances = []uint64{100, 200, 200, 300}
head, err = f.Head(ctx)
require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].weight)
require.Equal(t, uint64(300), f.store.emptyNodeByRoot[[32]byte{'c'}].weight)
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].weight)
require.Equal(t, uint64(300), f.store.nodeByRoot[[32]byte{'c'}].weight)
require.NoError(t, err)
require.Equal(t, [32]byte{'c'}, head)
// Process b's slashing again, should be a noop
f.InsertSlashedIndex(ctx, 1)
require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].balance)
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].balance)
f.justifiedBalances = []uint64{100, 200, 200, 300}
head, err = f.Head(ctx)
require.Equal(t, uint64(200), f.store.emptyNodeByRoot[[32]byte{'b'}].weight)
require.Equal(t, uint64(300), f.store.emptyNodeByRoot[[32]byte{'c'}].weight)
require.Equal(t, uint64(200), f.store.nodeByRoot[[32]byte{'b'}].weight)
require.Equal(t, uint64(300), f.store.nodeByRoot[[32]byte{'c'}].weight)
require.NoError(t, err)
require.Equal(t, [32]byte{'c'}, head)
@@ -513,6 +514,58 @@ func TestStore_CommonAncestor(t *testing.T) {
})
}
// a -- b -- c -- d
f = setup(0, 0)
st, roblock, err = prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 2, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
st, roblock, err = prepareForkchoiceState(ctx, 3, [32]byte{'d'}, [32]byte{'c'}, [32]byte{}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
tests = []struct {
name string
r1 [32]byte
r2 [32]byte
wantRoot [32]byte
wantSlot primitives.Slot
}{
{
name: "Common ancestor between a and b is a",
r1: [32]byte{'a'},
r2: [32]byte{'b'},
wantRoot: [32]byte{'a'},
wantSlot: 0,
},
{
name: "Common ancestor between b and d is b",
r1: [32]byte{'d'},
r2: [32]byte{'b'},
wantRoot: [32]byte{'b'},
wantSlot: 1,
},
{
name: "Common ancestor between d and a is a",
r1: [32]byte{'d'},
r2: [32]byte{'a'},
wantRoot: [32]byte{'a'},
wantSlot: 0,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
gotRoot, gotSlot, err := f.CommonAncestor(ctx, tc.r1, tc.r2)
require.NoError(t, err)
require.Equal(t, tc.wantRoot, gotRoot)
require.Equal(t, tc.wantSlot, gotSlot)
})
}
// Equal inputs should return the same root.
r, s, err := f.CommonAncestor(ctx, [32]byte{'b'}, [32]byte{'b'})
require.NoError(t, err)
@@ -535,9 +588,10 @@ func TestStore_CommonAncestor(t *testing.T) {
unrealizedJustifiedEpoch: 1,
finalizedEpoch: 1,
unrealizedFinalizedEpoch: 1,
optimistic: true,
}
f.store.emptyNodeByRoot[[32]byte{'y'}] = &PayloadNode{node: n, optimistic: true}
f.store.nodeByRoot[[32]byte{'y'}] = n
// broken link
_, _, err = f.CommonAncestor(ctx, [32]byte{'y'}, [32]byte{'a'})
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
@@ -556,8 +610,7 @@ func TestStore_InsertChain(t *testing.T) {
require.NoError(t, err)
roblock, err := blocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{
Block: roblock,
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
JustifiedCheckpoint: &ethpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
FinalizedCheckpoint: &ethpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
})
@@ -572,8 +625,7 @@ func TestStore_InsertChain(t *testing.T) {
require.NoError(t, err)
roblock, err := blocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{
Block: roblock,
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
JustifiedCheckpoint: &ethpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
FinalizedCheckpoint: &ethpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
})
@@ -690,7 +742,7 @@ func TestWeight(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
n, ok := f.store.emptyNodeByRoot[root]
n, ok := f.store.nodeByRoot[root]
require.Equal(t, true, ok)
n.weight = 10
w, err := f.Weight(root)
@@ -862,3 +914,16 @@ func TestForkchoiceParentRoot(t *testing.T) {
require.NoError(t, err)
require.Equal(t, zeroHash, root)
}
func TestForkChoice_CleanupInserting(t *testing.T) {
f := setup(0, 0)
ctx := t.Context()
st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 2, 2)
f.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) {
return f.justifiedBalances, errors.New("mock err")
})
require.NoError(t, err)
require.NotNil(t, f.InsertNode(ctx, st, roblock))
require.Equal(t, false, f.HasNode(roblock.Root()))
}

View File

@@ -1,327 +0,0 @@
package doublylinkedtree
import (
"bytes"
"context"
"slices"
"time"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
forkchoice2 "github.com/OffchainLabs/prysm/v7/consensus-types/forkchoice"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/pkg/errors"
)
func (s *Store) resolveParentPayloadStatus(block interfaces.ReadOnlyBeaconBlock, parent **PayloadNode, blockHash *[32]byte) error {
sb, err := block.Body().SignedExecutionPayloadBid()
if err != nil {
return err
}
wb, err := blocks.WrappedROSignedExecutionPayloadBid(sb)
if err != nil {
return errors.Wrap(err, "failed to wrap signed bid")
}
bid, err := wb.Bid()
if err != nil {
return errors.Wrap(err, "failed to get bid from wrapped bid")
}
*blockHash = bid.BlockHash()
parentRoot := block.ParentRoot()
*parent = s.emptyNodeByRoot[parentRoot]
if *parent == nil {
// This is the tree root node.
return nil
}
if bid.ParentBlockHash() == (*parent).node.blockHash {
// block builds on full
*parent = s.fullNodeByRoot[(*parent).node.root]
}
return nil
}
// applyWeightChangesConsensusNode recomputes the weight of the node passed as an argument and all of its descendants,
// using the current balance stored in each node.
func (s *Store) applyWeightChangesConsensusNode(ctx context.Context, n *Node) error {
// Recursively calling the children to sum their weights.
en := s.emptyNodeByRoot[n.root]
if err := s.applyWeightChangesPayloadNode(ctx, en); err != nil {
return err
}
childrenWeight := en.weight
fn := s.fullNodeByRoot[n.root]
if fn != nil {
if err := s.applyWeightChangesPayloadNode(ctx, fn); err != nil {
return err
}
childrenWeight += fn.weight
}
if n.root == params.BeaconConfig().ZeroHash {
return nil
}
n.weight = n.balance + childrenWeight
return nil
}
// applyWeightChangesPayloadNode recomputes the weight of the node passed as an argument and all of its descendants,
// using the current balance stored in each node.
func (s *Store) applyWeightChangesPayloadNode(ctx context.Context, n *PayloadNode) error {
// Recursively calling the children to sum their weights.
childrenWeight := uint64(0)
for _, child := range n.children {
if ctx.Err() != nil {
return ctx.Err()
}
if err := s.applyWeightChangesConsensusNode(ctx, child); err != nil {
return err
}
childrenWeight += child.weight
}
n.weight = n.balance + childrenWeight
return nil
}
// allConsensusChildren returns the list of all consensus blocks that build on the given node.
func (s *Store) allConsensusChildren(n *Node) []*Node {
en := s.emptyNodeByRoot[n.root]
fn, ok := s.fullNodeByRoot[n.root]
if ok {
return append(slices.Clone(en.children), fn.children...)
}
return en.children
}
// setNodeAndParentValidated sets the current node and all the ancestors as validated (i.e. non-optimistic).
func (s *Store) setNodeAndParentValidated(ctx context.Context, pn *PayloadNode) error {
if ctx.Err() != nil {
return ctx.Err()
}
if !pn.optimistic {
return nil
}
pn.optimistic = false
if pn.full {
// set the empty node also a as valid
en := s.emptyNodeByRoot[pn.node.root]
en.optimistic = false
}
if pn.node.parent == nil {
return nil
}
return s.setNodeAndParentValidated(ctx, pn.node.parent)
}
// fullAncestor returns the highest ancestor with a full payload that a block with the
// given root has. If there is a payload for the past root, then it will return that full
// node. Otherwise it will use the full parent actually being an ancestor of the given root
func (s *Store) fullAncestor(root [32]byte) *PayloadNode {
fn, ok := s.fullNodeByRoot[root]
if ok {
return fn
}
en := s.emptyNodeByRoot[root]
if en == nil {
return nil
}
return s.fullParent(en)
}
// fullParent returns the latest full node that this block builds on.
func (s *Store) fullParent(pn *PayloadNode) *PayloadNode {
parent := pn.node.parent
for ; parent != nil && !parent.full; parent = parent.node.parent {
}
return parent
}
// parentHash return the payload hash of the latest full node that this block builds on.
func (s *Store) parentHash(pn *PayloadNode) [32]byte {
fullParent := s.fullParent(pn)
if fullParent == nil {
return [32]byte{}
}
return fullParent.node.blockHash
}
// latestHashForRoot returns the latest payload hash for the given block root.
func (s *Store) latestHashForRoot(root [32]byte) [32]byte {
// try to get the full node first
fn, ok := s.fullNodeByRoot[root]
if ok && fn != nil {
return fn.node.blockHash
}
en := s.emptyNodeByRoot[root]
if !ok || en == nil {
// This should not happen
return [32]byte{}
}
return s.parentHash(en)
}
// updateBestDescendantPayloadNode updates the best descendant of this node and its
// children.
func (s *Store) updateBestDescendantPayloadNode(ctx context.Context, n *PayloadNode, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
if ctx.Err() != nil {
return ctx.Err()
}
var bestChild *Node
bestWeight := uint64(0)
for _, child := range n.children {
if child == nil {
return errors.Wrap(ErrNilNode, "could not update best descendant")
}
if err := s.updateBestDescendantConsensusNode(ctx, child, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
return err
}
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, currentEpoch)
if childLeadsToViableHead && bestChild == nil {
// The child leads to a viable head, but the current
// parent's best child doesn't.
bestWeight = child.weight
bestChild = child
} else if childLeadsToViableHead {
// If both are viable, compare their weights.
if child.weight == bestWeight {
// Tie-breaker of equal weights by root.
if bytes.Compare(child.root[:], bestChild.root[:]) > 0 {
bestChild = child
}
} else if child.weight > bestWeight {
bestChild = child
bestWeight = child.weight
}
}
}
if bestChild == nil {
n.bestDescendant = nil
} else {
if bestChild.bestDescendant == nil {
n.bestDescendant = bestChild
} else {
n.bestDescendant = bestChild.bestDescendant
}
}
return nil
}
// updateBestDescendantConsensusNode updates the best descendant of this node and its
// children.
func (s *Store) updateBestDescendantConsensusNode(ctx context.Context, n *Node, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
if ctx.Err() != nil {
return ctx.Err()
}
if len(s.allConsensusChildren(n)) == 0 {
n.bestDescendant = nil
return nil
}
en := s.emptyNodeByRoot[n.root]
if err := s.updateBestDescendantPayloadNode(ctx, en, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
return err
}
fn := s.fullNodeByRoot[n.root]
if fn == nil {
n.bestDescendant = en.bestDescendant
return nil
}
// TODO GLOAS: pick between full or empty
if err := s.updateBestDescendantPayloadNode(ctx, fn, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
return err
}
n.bestDescendant = fn.bestDescendant
return nil
}
// choosePayloadContent chooses between empty or full for the passed consensus node. TODO Gloas: use PTC to choose.
func (s *Store) choosePayloadContent(n *Node) *PayloadNode {
if n == nil {
return nil
}
fn := s.fullNodeByRoot[n.root]
if fn != nil {
return fn
}
return s.emptyNodeByRoot[n.root]
}
// nodeTreeDump appends to the given list all the nodes descending from this one
func (s *Store) nodeTreeDump(ctx context.Context, n *Node, nodes []*forkchoice2.Node) ([]*forkchoice2.Node, error) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
var parentRoot [32]byte
if n.parent != nil {
parentRoot = n.parent.node.root
}
target := [32]byte{}
if n.target != nil {
target = n.target.root
}
optimistic := false
if n.parent != nil {
optimistic = n.parent.optimistic
}
en := s.emptyNodeByRoot[n.root]
timestamp := en.timestamp
fn := s.fullNodeByRoot[n.root]
if fn != nil {
optimistic = fn.optimistic
timestamp = fn.timestamp
}
thisNode := &forkchoice2.Node{
Slot: n.slot,
BlockRoot: n.root[:],
ParentRoot: parentRoot[:],
JustifiedEpoch: n.justifiedEpoch,
FinalizedEpoch: n.finalizedEpoch,
UnrealizedJustifiedEpoch: n.unrealizedJustifiedEpoch,
UnrealizedFinalizedEpoch: n.unrealizedFinalizedEpoch,
Balance: n.balance,
Weight: n.weight,
ExecutionOptimistic: optimistic,
ExecutionBlockHash: n.blockHash[:],
Timestamp: timestamp,
Target: target[:],
}
if optimistic {
thisNode.Validity = forkchoice2.Optimistic
} else {
thisNode.Validity = forkchoice2.Valid
}
nodes = append(nodes, thisNode)
var err error
children := s.allConsensusChildren(n)
for _, child := range children {
nodes, err = s.nodeTreeDump(ctx, child, nodes)
if err != nil {
return nil, err
}
}
return nodes, nil
}
func (f *ForkChoice) InsertPayload(ctx context.Context, pe interfaces.ROExecutionPayloadEnvelope) error {
s := f.store
root := pe.BeaconBlockRoot()
en := s.emptyNodeByRoot[root]
if en == nil {
return errors.Wrap(ErrNilNode, "cannot insert full node without an empty one")
}
if _, ok := s.fullNodeByRoot[root]; ok {
// We don't import two payloads for the same root
return nil
}
fn := &PayloadNode{
node: en.node,
optimistic: true,
timestamp: time.Now(),
full: true,
children: make([]*Node, 0),
}
s.fullNodeByRoot[root] = fn
return nil
}

View File

@@ -1,300 +0,0 @@
package doublylinkedtree
import (
"context"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
)
func prepareGloasForkchoiceState(
_ context.Context,
slot primitives.Slot,
blockRoot [32]byte,
parentRoot [32]byte,
blockHash [32]byte,
parentBlockHash [32]byte,
justifiedEpoch primitives.Epoch,
finalizedEpoch primitives.Epoch,
) (state.BeaconState, blocks.ROBlock, error) {
blockHeader := &ethpb.BeaconBlockHeader{
ParentRoot: parentRoot[:],
}
justifiedCheckpoint := &ethpb.Checkpoint{
Epoch: justifiedEpoch,
}
finalizedCheckpoint := &ethpb.Checkpoint{
Epoch: finalizedEpoch,
}
builderPendingPayments := make([]*ethpb.BuilderPendingPayment, 64)
for i := range builderPendingPayments {
builderPendingPayments[i] = &ethpb.BuilderPendingPayment{
Withdrawal: &ethpb.BuilderPendingWithdrawal{
FeeRecipient: make([]byte, 20),
},
}
}
base := &ethpb.BeaconStateGloas{
Slot: slot,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
CurrentJustifiedCheckpoint: justifiedCheckpoint,
FinalizedCheckpoint: finalizedCheckpoint,
LatestBlockHeader: blockHeader,
LatestExecutionPayloadBid: &ethpb.ExecutionPayloadBid{
BlockHash: blockHash[:],
ParentBlockHash: parentBlockHash[:],
ParentBlockRoot: make([]byte, 32),
PrevRandao: make([]byte, 32),
FeeRecipient: make([]byte, 20),
BlobKzgCommitments: [][]byte{make([]byte, 48)},
},
Builders: make([]*ethpb.Builder, 0),
BuilderPendingPayments: builderPendingPayments,
ExecutionPayloadAvailability: make([]byte, 1024),
LatestBlockHash: make([]byte, 32),
PayloadExpectedWithdrawals: make([]*enginev1.Withdrawal, 0),
ProposerLookahead: make([]uint64, 64),
}
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
if err != nil {
return nil, blocks.ROBlock{}, err
}
bid := util.HydrateSignedExecutionPayloadBid(&ethpb.SignedExecutionPayloadBid{
Message: &ethpb.ExecutionPayloadBid{
BlockHash: blockHash[:],
ParentBlockHash: parentBlockHash[:],
},
})
blk := util.HydrateSignedBeaconBlockGloas(&ethpb.SignedBeaconBlockGloas{
Block: &ethpb.BeaconBlockGloas{
Slot: slot,
ParentRoot: parentRoot[:],
Body: &ethpb.BeaconBlockBodyGloas{
SignedExecutionPayloadBid: bid,
},
},
})
signed, err := blocks.NewSignedBeaconBlock(blk)
if err != nil {
return nil, blocks.ROBlock{}, err
}
roblock, err := blocks.NewROBlockWithRoot(signed, blockRoot)
return st, roblock, err
}
func prepareGloasForkchoicePayload(
blockRoot [32]byte,
) (interfaces.ROExecutionPayloadEnvelope, error) {
env := &ethpb.ExecutionPayloadEnvelope{
BeaconBlockRoot: blockRoot[:],
Payload: &enginev1.ExecutionPayloadDeneb{},
}
return blocks.WrappedROExecutionPayloadEnvelope(env)
}
func TestInsertGloasBlock_EmptyNodeOnly(t *testing.T) {
f := setup(0, 0)
ctx := t.Context()
root := indexToHash(1)
blockHash := indexToHash(100)
st, roblock, err := prepareGloasForkchoiceState(ctx, 1, root, params.BeaconConfig().ZeroHash, blockHash, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
// Empty node should exist.
en := f.store.emptyNodeByRoot[root]
require.NotNil(t, en)
// Full node should NOT exist.
_, hasFull := f.store.fullNodeByRoot[root]
assert.Equal(t, false, hasFull)
// Parent should be the genesis full node.
genesisRoot := params.BeaconConfig().ZeroHash
genesisFull := f.store.fullNodeByRoot[genesisRoot]
require.NotNil(t, genesisFull)
assert.Equal(t, genesisFull, en.node.parent)
}
func TestInsertPayload_CreatesFullNode(t *testing.T) {
f := setup(0, 0)
ctx := t.Context()
root := indexToHash(1)
blockHash := indexToHash(100)
st, roblock, err := prepareGloasForkchoiceState(ctx, 1, root, params.BeaconConfig().ZeroHash, blockHash, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
require.Equal(t, 2, len(f.store.emptyNodeByRoot))
require.Equal(t, 1, len(f.store.fullNodeByRoot))
pe, err := prepareGloasForkchoicePayload(root)
require.NoError(t, err)
require.NoError(t, f.InsertPayload(ctx, pe))
require.Equal(t, 2, len(f.store.fullNodeByRoot))
fn := f.store.fullNodeByRoot[root]
require.NotNil(t, fn)
en := f.store.emptyNodeByRoot[root]
require.NotNil(t, en)
// Empty and full share the same *Node.
assert.Equal(t, en.node, fn.node)
assert.Equal(t, true, fn.optimistic)
assert.Equal(t, true, fn.full)
}
func TestInsertPayload_DuplicateIsNoop(t *testing.T) {
f := setup(0, 0)
ctx := t.Context()
root := indexToHash(1)
blockHash := indexToHash(100)
st, roblock, err := prepareGloasForkchoiceState(ctx, 1, root, params.BeaconConfig().ZeroHash, blockHash, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
pe, err := prepareGloasForkchoicePayload(root)
require.NoError(t, err)
require.NoError(t, f.InsertPayload(ctx, pe))
require.Equal(t, 2, len(f.store.fullNodeByRoot))
fn := f.store.fullNodeByRoot[root]
require.NotNil(t, fn)
// Insert again — should be a no-op.
require.NoError(t, f.InsertPayload(ctx, pe))
assert.Equal(t, fn, f.store.fullNodeByRoot[root])
require.Equal(t, 2, len(f.store.fullNodeByRoot))
}
func TestInsertPayload_WithoutEmptyNode_Errors(t *testing.T) {
f := setup(0, 0)
ctx := t.Context()
root := indexToHash(99)
pe, err := prepareGloasForkchoicePayload(root)
require.NoError(t, err)
err = f.InsertPayload(ctx, pe)
require.ErrorContains(t, ErrNilNode.Error(), err)
}
func TestGloasBlock_ChildBuildsOnEmpty(t *testing.T) {
f := setup(0, 0)
ctx := t.Context()
// Insert Gloas block A (empty only).
rootA := indexToHash(1)
blockHashA := indexToHash(100)
st, roblock, err := prepareGloasForkchoiceState(ctx, 1, rootA, params.BeaconConfig().ZeroHash, blockHashA, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
// Insert Gloas block B as child of (A, empty)
rootB := indexToHash(2)
blockHashB := indexToHash(200)
nonMatchingParentHash := indexToHash(999)
st, roblock, err = prepareGloasForkchoiceState(ctx, 2, rootB, rootA, blockHashB, nonMatchingParentHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
emptyA := f.store.emptyNodeByRoot[rootA]
require.NotNil(t, emptyA)
nodeB := f.store.emptyNodeByRoot[rootB]
require.NotNil(t, nodeB)
require.Equal(t, emptyA, nodeB.node.parent)
}
func TestGloasBlock_ChildrenOfEmptyAndFull(t *testing.T) {
f := setup(0, 0)
ctx := t.Context()
// Insert Gloas block A (empty only).
rootA := indexToHash(1)
blockHashA := indexToHash(100)
st, roblock, err := prepareGloasForkchoiceState(ctx, 1, rootA, params.BeaconConfig().ZeroHash, blockHashA, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
// Insert payload for A
pe, err := prepareGloasForkchoicePayload(rootA)
require.NoError(t, err)
require.NoError(t, f.InsertPayload(ctx, pe))
// Insert Gloas block B as child of (A, empty)
rootB := indexToHash(2)
blockHashB := indexToHash(200)
nonMatchingParentHash := indexToHash(999)
st, roblock, err = prepareGloasForkchoiceState(ctx, 2, rootB, rootA, blockHashB, nonMatchingParentHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
// Insert Gloas block C as child of (A, full)
rootC := indexToHash(3)
blockHashC := indexToHash(201)
st, roblock, err = prepareGloasForkchoiceState(ctx, 3, rootC, rootA, blockHashC, blockHashA, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
emptyA := f.store.emptyNodeByRoot[rootA]
require.NotNil(t, emptyA)
nodeB := f.store.emptyNodeByRoot[rootB]
require.NotNil(t, nodeB)
require.Equal(t, emptyA, nodeB.node.parent)
nodeC := f.store.emptyNodeByRoot[rootC]
require.NotNil(t, nodeC)
fullA := f.store.fullNodeByRoot[rootA]
require.NotNil(t, fullA)
require.Equal(t, fullA, nodeC.node.parent)
}
func TestGloasBlock_ChildBuildsOnFull(t *testing.T) {
f := setup(0, 0)
ctx := t.Context()
// Insert Gloas block A (empty only).
rootA := indexToHash(1)
blockHashA := indexToHash(100)
st, roblock, err := prepareGloasForkchoiceState(ctx, 1, rootA, params.BeaconConfig().ZeroHash, blockHashA, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
// Insert payload for A → creates the full node.
pe, err := prepareGloasForkchoicePayload(rootA)
require.NoError(t, err)
require.NoError(t, f.InsertPayload(ctx, pe))
fullA := f.store.fullNodeByRoot[rootA]
require.NotNil(t, fullA)
// Child for (A, full)
rootB := indexToHash(2)
blockHashB := indexToHash(200)
st, roblock, err = prepareGloasForkchoiceState(ctx, 2, rootB, rootA, blockHashB, blockHashA, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, roblock))
nodeB := f.store.emptyNodeByRoot[rootB]
require.NotNil(t, nodeB)
assert.Equal(t, fullA, nodeB.node.parent)
}

View File

@@ -0,0 +1,26 @@
package doublylinkedtree
import (
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/time/slots"
)
// LastRoot returns the last canonical block root in the given epoch
func (f *ForkChoice) LastRoot(epoch primitives.Epoch) [32]byte {
head := f.store.headNode
headEpoch := slots.ToEpoch(head.slot)
epochEnd, err := slots.EpochEnd(epoch)
if err != nil {
return [32]byte{}
}
if headEpoch <= epoch {
return head.root
}
for head != nil && head.slot > epochEnd {
head = head.parent
}
if head == nil {
return [32]byte{}
}
return head.root
}

View File

@@ -0,0 +1,38 @@
package doublylinkedtree
import (
"testing"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestLastRoot(t *testing.T) {
f := setup(0, 0)
ctx := t.Context()
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, [32]byte{'1'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'2'}, [32]byte{'1'}, [32]byte{'2'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 3, [32]byte{'3'}, [32]byte{'1'}, [32]byte{'3'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 32, [32]byte{'4'}, [32]byte{'3'}, [32]byte{'4'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 33, [32]byte{'5'}, [32]byte{'2'}, [32]byte{'5'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
st, root, err = prepareForkchoiceState(ctx, 34, [32]byte{'6'}, [32]byte{'5'}, [32]byte{'6'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
headNode := f.store.nodeByRoot[[32]byte{'6'}]
f.store.headNode = headNode
require.Equal(t, [32]byte{'6'}, f.store.headNode.root)
require.Equal(t, [32]byte{'2'}, f.LastRoot(0))
require.Equal(t, [32]byte{'6'}, f.LastRoot(1))
require.Equal(t, [32]byte{'6'}, f.LastRoot(2))
}

View File

@@ -1,17 +1,95 @@
package doublylinkedtree
import (
"bytes"
"context"
"time"
"github.com/OffchainLabs/prysm/v7/config/params"
forkchoice2 "github.com/OffchainLabs/prysm/v7/consensus-types/forkchoice"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
)
// ProcessAttestationsThreshold is the amount of time after which we
// process attestations for the current slot
const ProcessAttestationsThreshold = 10 * time.Second
// applyWeightChanges recomputes the weight of the node passed as an argument and all of its descendants,
// using the current balance stored in each node.
func (n *Node) applyWeightChanges(ctx context.Context) error {
// Recursively calling the children to sum their weights.
childrenWeight := uint64(0)
for _, child := range n.children {
if ctx.Err() != nil {
return ctx.Err()
}
if err := child.applyWeightChanges(ctx); err != nil {
return err
}
childrenWeight += child.weight
}
if n.root == params.BeaconConfig().ZeroHash {
return nil
}
n.weight = n.balance + childrenWeight
return nil
}
// updateBestDescendant updates the best descendant of this node and its
// children.
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch, currentEpoch primitives.Epoch) error {
if ctx.Err() != nil {
return ctx.Err()
}
if len(n.children) == 0 {
n.bestDescendant = nil
return nil
}
var bestChild *Node
bestWeight := uint64(0)
hasViableDescendant := false
for _, child := range n.children {
if child == nil {
return errors.Wrap(ErrNilNode, "could not update best descendant")
}
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
return err
}
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, currentEpoch)
if childLeadsToViableHead && !hasViableDescendant {
// The child leads to a viable head, but the current
// parent's best child doesn't.
bestWeight = child.weight
bestChild = child
hasViableDescendant = true
} else if childLeadsToViableHead {
// If both are viable, compare their weights.
if child.weight == bestWeight {
// Tie-breaker of equal weights by root.
if bytes.Compare(child.root[:], bestChild.root[:]) > 0 {
bestChild = child
}
} else if child.weight > bestWeight {
bestChild = child
bestWeight = child.weight
}
}
}
if hasViableDescendant {
if bestChild.bestDescendant == nil {
n.bestDescendant = bestChild
} else {
n.bestDescendant = bestChild.bestDescendant
}
} else {
n.bestDescendant = nil
}
return nil
}
// viableForHead returns true if the node is viable to head.
// Any node with different finalized or justified epoch than
// the ones in fork choice store should not be viable to head.
@@ -32,13 +110,30 @@ func (n *Node) leadsToViableHead(justifiedEpoch, currentEpoch primitives.Epoch)
return n.bestDescendant.viableForHead(justifiedEpoch, currentEpoch)
}
// setNodeAndParentValidated sets the current node and all the ancestors as validated (i.e. non-optimistic).
func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
if ctx.Err() != nil {
return ctx.Err()
}
if !n.optimistic {
return nil
}
n.optimistic = false
if n.parent == nil {
return nil
}
return n.parent.setNodeAndParentValidated(ctx)
}
// arrivedEarly returns whether this node was inserted before the first
// threshold to orphan a block.
// Note that genesisTime has seconds granularity, therefore we use a strict
// inequality < here. For example a block that arrives 3.9999 seconds into the
// slot will have secs = 3 below.
func (n *PayloadNode) arrivedEarly(genesis time.Time) (bool, error) {
sss, err := slots.SinceSlotStart(n.node.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 3.9999 seconds will have a value of 3.
func (n *Node) arrivedEarly(genesis time.Time) (bool, error) {
sss, err := slots.SinceSlotStart(n.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 3.9999 seconds will have a value of 3.
votingWindow := params.BeaconConfig().SlotComponentDuration(params.BeaconConfig().AttestationDueBPS)
return sss < votingWindow, err
}
@@ -48,7 +143,52 @@ func (n *PayloadNode) arrivedEarly(genesis time.Time) (bool, error) {
// Note that genesisTime has seconds granularity, therefore we use an
// inequality >= here. For example a block that arrives 10.00001 seconds into the
// slot will have secs = 10 below.
func (n *PayloadNode) arrivedAfterOrphanCheck(genesis time.Time) (bool, error) {
secs, err := slots.SinceSlotStart(n.node.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 10.00001 seconds will have a value of 10.
func (n *Node) arrivedAfterOrphanCheck(genesis time.Time) (bool, error) {
secs, err := slots.SinceSlotStart(n.slot, genesis, n.timestamp.Truncate(time.Second)) // Truncate such that 10.00001 seconds will have a value of 10.
return secs >= ProcessAttestationsThreshold, err
}
// nodeTreeDump appends to the given list all the nodes descending from this one
func (n *Node) nodeTreeDump(ctx context.Context, nodes []*forkchoice2.Node) ([]*forkchoice2.Node, error) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
var parentRoot [32]byte
if n.parent != nil {
parentRoot = n.parent.root
}
target := [32]byte{}
if n.target != nil {
target = n.target.root
}
thisNode := &forkchoice2.Node{
Slot: n.slot,
BlockRoot: n.root[:],
ParentRoot: parentRoot[:],
JustifiedEpoch: n.justifiedEpoch,
FinalizedEpoch: n.finalizedEpoch,
UnrealizedJustifiedEpoch: n.unrealizedJustifiedEpoch,
UnrealizedFinalizedEpoch: n.unrealizedFinalizedEpoch,
Balance: n.balance,
Weight: n.weight,
ExecutionOptimistic: n.optimistic,
ExecutionBlockHash: n.payloadHash[:],
Timestamp: n.timestamp,
Target: target[:],
}
if n.optimistic {
thisNode.Validity = forkchoice2.Optimistic
} else {
thisNode.Validity = forkchoice2.Valid
}
nodes = append(nodes, thisNode)
var err error
for _, child := range n.children {
nodes, err = child.nodeTreeDump(ctx, nodes)
if err != nil {
return nil, err
}
}
return nodes, nil
}

View File

@@ -27,15 +27,15 @@ func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
// The updated balances of each node is 100
s := f.store
s.emptyNodeByRoot[indexToHash(1)].balance = 100
s.emptyNodeByRoot[indexToHash(2)].balance = 100
s.emptyNodeByRoot[indexToHash(3)].balance = 100
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
assert.NoError(t, s.applyWeightChangesConsensusNode(ctx, s.treeRootNode))
assert.NoError(t, s.treeRootNode.applyWeightChanges(ctx))
assert.Equal(t, uint64(300), s.emptyNodeByRoot[indexToHash(1)].node.weight)
assert.Equal(t, uint64(200), s.emptyNodeByRoot[indexToHash(2)].node.weight)
assert.Equal(t, uint64(100), s.emptyNodeByRoot[indexToHash(3)].node.weight)
assert.Equal(t, uint64(300), s.nodeByRoot[indexToHash(1)].weight)
assert.Equal(t, uint64(200), s.nodeByRoot[indexToHash(2)].weight)
assert.Equal(t, uint64(100), s.nodeByRoot[indexToHash(3)].weight)
}
func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
@@ -53,19 +53,19 @@ func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
// The updated balances of each node is 100
s := f.store
s.emptyNodeByRoot[indexToHash(1)].weight = 400
s.emptyNodeByRoot[indexToHash(2)].weight = 400
s.emptyNodeByRoot[indexToHash(3)].weight = 400
s.nodeByRoot[indexToHash(1)].weight = 400
s.nodeByRoot[indexToHash(2)].weight = 400
s.nodeByRoot[indexToHash(3)].weight = 400
s.emptyNodeByRoot[indexToHash(1)].balance = 100
s.emptyNodeByRoot[indexToHash(2)].balance = 100
s.emptyNodeByRoot[indexToHash(3)].balance = 100
s.nodeByRoot[indexToHash(1)].balance = 100
s.nodeByRoot[indexToHash(2)].balance = 100
s.nodeByRoot[indexToHash(3)].balance = 100
assert.NoError(t, s.applyWeightChangesConsensusNode(ctx, s.treeRootNode))
assert.NoError(t, s.treeRootNode.applyWeightChanges(ctx))
assert.Equal(t, uint64(300), s.emptyNodeByRoot[indexToHash(1)].node.weight)
assert.Equal(t, uint64(200), s.emptyNodeByRoot[indexToHash(2)].node.weight)
assert.Equal(t, uint64(100), s.emptyNodeByRoot[indexToHash(3)].node.weight)
assert.Equal(t, uint64(300), s.nodeByRoot[indexToHash(1)].weight)
assert.Equal(t, uint64(200), s.nodeByRoot[indexToHash(2)].weight)
assert.Equal(t, uint64(100), s.nodeByRoot[indexToHash(3)].weight)
}
func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) {
@@ -78,7 +78,7 @@ func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) {
// Verify parent's best child and best descendant are `none`.
s := f.store
assert.Equal(t, 1, len(s.allConsensusChildren(s.treeRootNode)))
assert.Equal(t, 1, len(s.treeRootNode.children))
nilBestDescendant := s.treeRootNode.bestDescendant == nil
assert.Equal(t, true, nilBestDescendant)
}
@@ -92,9 +92,8 @@ func TestNode_UpdateBestDescendant_ViableChild(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blk))
s := f.store
children := s.allConsensusChildren(s.treeRootNode)
assert.Equal(t, 1, len(children))
assert.Equal(t, children[0], s.treeRootNode.bestDescendant)
assert.Equal(t, 1, len(s.treeRootNode.children))
assert.Equal(t, s.treeRootNode.children[0], s.treeRootNode.bestDescendant)
}
func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
@@ -109,34 +108,32 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blk))
s := f.store
s.emptyNodeByRoot[indexToHash(1)].weight = 100
s.emptyNodeByRoot[indexToHash(2)].weight = 200
assert.NoError(t, s.updateBestDescendantConsensusNode(ctx, s.treeRootNode, 1, 1, 1))
s.nodeByRoot[indexToHash(1)].weight = 100
s.nodeByRoot[indexToHash(2)].weight = 200
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
children := s.allConsensusChildren(s.treeRootNode)
assert.Equal(t, 2, len(children))
assert.Equal(t, children[1], s.treeRootNode.bestDescendant)
assert.Equal(t, 2, len(s.treeRootNode.children))
assert.Equal(t, s.treeRootNode.children[1], s.treeRootNode.bestDescendant)
}
func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
f := setup(1, 1)
ctx := t.Context()
// Input child is the best descendant
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, indexToHash(101), 1, 1)
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, indexToHash(102), 1, 1)
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
s := f.store
s.emptyNodeByRoot[indexToHash(1)].node.weight = 200
s.emptyNodeByRoot[indexToHash(2)].node.weight = 100
assert.NoError(t, s.updateBestDescendantConsensusNode(ctx, s.treeRootNode, 1, 1, 1))
s.nodeByRoot[indexToHash(1)].weight = 200
s.nodeByRoot[indexToHash(2)].weight = 100
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
children := s.allConsensusChildren(s.treeRootNode)
assert.Equal(t, 2, len(children))
assert.Equal(t, children[0], s.treeRootNode.bestDescendant)
assert.Equal(t, 2, len(s.treeRootNode.children))
assert.Equal(t, s.treeRootNode.children[0], s.treeRootNode.bestDescendant)
}
func TestNode_ViableForHead(t *testing.T) {
@@ -179,44 +176,44 @@ func TestNode_LeadsToViableHead(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blk))
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 5))
require.Equal(t, true, f.store.emptyNodeByRoot[indexToHash(5)].node.leadsToViableHead(4, 5))
require.Equal(t, false, f.store.emptyNodeByRoot[indexToHash(2)].node.leadsToViableHead(4, 5))
require.Equal(t, false, f.store.emptyNodeByRoot[indexToHash(4)].node.leadsToViableHead(4, 5))
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 5))
require.Equal(t, false, f.store.nodeByRoot[indexToHash(2)].leadsToViableHead(4, 5))
require.Equal(t, false, f.store.nodeByRoot[indexToHash(4)].leadsToViableHead(4, 5))
}
func TestNode_SetFullyValidated(t *testing.T) {
f := setup(1, 1)
ctx := t.Context()
storeNodes := make([]*PayloadNode, 6)
storeNodes[0] = f.store.fullNodeByRoot[params.BeaconConfig().ZeroHash]
storeNodes := make([]*Node, 6)
storeNodes[0] = f.store.treeRootNode
// insert blocks in the fork pattern (optimistic status in parenthesis)
//
// 0 (false) -- 1 (false) -- 2 (false) -- 3 (true) -- 4 (true)
// \
// -- 5 (true)
//
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, indexToHash(101), 1, 1)
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
storeNodes[1] = f.store.fullNodeByRoot[blk.Root()]
storeNodes[1] = f.store.nodeByRoot[blk.Root()]
require.NoError(t, f.SetOptimisticToValid(ctx, params.BeaconConfig().ZeroHash))
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
storeNodes[2] = f.store.nodeByRoot[blk.Root()]
require.NoError(t, f.SetOptimisticToValid(ctx, indexToHash(1)))
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), indexToHash(102), 1, 1)
state, blk, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
storeNodes[2] = f.store.fullNodeByRoot[blk.Root()]
require.NoError(t, f.SetOptimisticToValid(ctx, indexToHash(2)))
state, blk, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), indexToHash(103), 1, 1)
storeNodes[3] = f.store.nodeByRoot[blk.Root()]
state, blk, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
storeNodes[3] = f.store.fullNodeByRoot[blk.Root()]
state, blk, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(3), indexToHash(104), 1, 1)
storeNodes[4] = f.store.nodeByRoot[blk.Root()]
state, blk, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
storeNodes[4] = f.store.fullNodeByRoot[blk.Root()]
state, blk, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(1), indexToHash(105), 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
storeNodes[5] = f.store.fullNodeByRoot[blk.Root()]
storeNodes[5] = f.store.nodeByRoot[blk.Root()]
opt, err := f.IsOptimistic(indexToHash(5))
require.NoError(t, err)
@@ -226,7 +223,7 @@ func TestNode_SetFullyValidated(t *testing.T) {
require.NoError(t, err)
require.Equal(t, true, opt)
require.NoError(t, f.store.setNodeAndParentValidated(ctx, f.store.fullNodeByRoot[indexToHash(4)]))
require.NoError(t, f.store.nodeByRoot[indexToHash(4)].setNodeAndParentValidated(ctx))
// block 5 should still be optimistic
opt, err = f.IsOptimistic(indexToHash(5))
@@ -243,20 +240,20 @@ func TestNode_SetFullyValidated(t *testing.T) {
require.Equal(t, false, opt)
respNodes := make([]*forkchoice.Node, 0)
respNodes, err = f.store.nodeTreeDump(ctx, f.store.treeRootNode, respNodes)
respNodes, err = f.store.treeRootNode.nodeTreeDump(ctx, respNodes)
require.NoError(t, err)
require.Equal(t, len(respNodes), f.NodeCount())
for i, respNode := range respNodes {
require.Equal(t, storeNodes[i].node.slot, respNode.Slot)
require.DeepEqual(t, storeNodes[i].node.root[:], respNode.BlockRoot)
require.Equal(t, storeNodes[i].node.balance, respNode.Balance)
require.Equal(t, storeNodes[i].node.weight, respNode.Weight)
require.Equal(t, storeNodes[i].slot, respNode.Slot)
require.DeepEqual(t, storeNodes[i].root[:], respNode.BlockRoot)
require.Equal(t, storeNodes[i].balance, respNode.Balance)
require.Equal(t, storeNodes[i].weight, respNode.Weight)
require.Equal(t, storeNodes[i].optimistic, respNode.ExecutionOptimistic)
require.Equal(t, storeNodes[i].node.justifiedEpoch, respNode.JustifiedEpoch)
require.Equal(t, storeNodes[i].node.unrealizedJustifiedEpoch, respNode.UnrealizedJustifiedEpoch)
require.Equal(t, storeNodes[i].node.finalizedEpoch, respNode.FinalizedEpoch)
require.Equal(t, storeNodes[i].node.unrealizedFinalizedEpoch, respNode.UnrealizedFinalizedEpoch)
require.Equal(t, storeNodes[i].justifiedEpoch, respNode.JustifiedEpoch)
require.Equal(t, storeNodes[i].unrealizedJustifiedEpoch, respNode.UnrealizedJustifiedEpoch)
require.Equal(t, storeNodes[i].finalizedEpoch, respNode.FinalizedEpoch)
require.Equal(t, storeNodes[i].unrealizedFinalizedEpoch, respNode.UnrealizedFinalizedEpoch)
require.Equal(t, storeNodes[i].timestamp, respNode.Timestamp)
}
}
@@ -275,10 +272,10 @@ func TestNode_TimeStampsChecks(t *testing.T) {
headRoot, err := f.Head(ctx)
require.NoError(t, err)
require.Equal(t, root, headRoot)
early, err := f.store.choosePayloadContent(f.store.headNode).arrivedEarly(f.store.genesisTime)
early, err := f.store.headNode.arrivedEarly(f.store.genesisTime)
require.NoError(t, err)
require.Equal(t, true, early)
late, err := f.store.choosePayloadContent(f.store.headNode).arrivedAfterOrphanCheck(f.store.genesisTime)
late, err := f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
require.NoError(t, err)
require.Equal(t, false, late)
@@ -292,10 +289,10 @@ func TestNode_TimeStampsChecks(t *testing.T) {
headRoot, err = f.Head(ctx)
require.NoError(t, err)
require.Equal(t, root, headRoot)
early, err = f.store.choosePayloadContent(f.store.headNode).arrivedEarly(f.store.genesisTime)
early, err = f.store.headNode.arrivedEarly(f.store.genesisTime)
require.NoError(t, err)
require.Equal(t, false, early)
late, err = f.store.choosePayloadContent(f.store.headNode).arrivedAfterOrphanCheck(f.store.genesisTime)
late, err = f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
require.NoError(t, err)
require.Equal(t, false, late)
@@ -308,10 +305,10 @@ func TestNode_TimeStampsChecks(t *testing.T) {
headRoot, err = f.Head(ctx)
require.NoError(t, err)
require.Equal(t, root, headRoot)
early, err = f.store.choosePayloadContent(f.store.headNode).arrivedEarly(f.store.genesisTime)
early, err = f.store.headNode.arrivedEarly(f.store.genesisTime)
require.NoError(t, err)
require.Equal(t, false, early)
late, err = f.store.choosePayloadContent(f.store.headNode).arrivedAfterOrphanCheck(f.store.genesisTime)
late, err = f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
require.NoError(t, err)
require.Equal(t, true, late)
@@ -323,10 +320,10 @@ func TestNode_TimeStampsChecks(t *testing.T) {
headRoot, err = f.Head(ctx)
require.NoError(t, err)
require.Equal(t, root, headRoot)
early, err = f.store.choosePayloadContent(f.store.headNode).arrivedEarly(f.store.genesisTime)
early, err = f.store.headNode.arrivedEarly(f.store.genesisTime)
require.ErrorContains(t, "invalid timestamp", err)
require.Equal(t, true, early)
late, err = f.store.choosePayloadContent(f.store.headNode).arrivedAfterOrphanCheck(f.store.genesisTime)
late, err = f.store.headNode.arrivedAfterOrphanCheck(f.store.genesisTime)
require.ErrorContains(t, "invalid timestamp", err)
require.Equal(t, false, late)
}

View File

@@ -7,141 +7,93 @@ import (
"github.com/pkg/errors"
)
// setOptimisticToInvalid removes invalid nodes from forkchoice. It does NOT remove the empty node for the passed root.
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, parentHash, lastValidHash [32]byte) ([][32]byte, error) {
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, parentRoot, lastValidHash [32]byte) ([][32]byte, error) {
invalidRoots := make([][32]byte, 0)
n := s.fullNodeByRoot[root]
if n == nil {
// The offending node with its payload is not in forkchoice. Try with the parent
n = s.emptyNodeByRoot[parentRoot]
if n == nil {
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid, could not find consensus parent")
node, ok := s.nodeByRoot[root]
if !ok {
node, ok = s.nodeByRoot[parentRoot]
if !ok || node == nil {
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
}
if n.node.blockHash == lastValidHash {
// The parent node must have been full and with a valid payload
// return early if the parent is LVH
if node.payloadHash == lastValidHash {
return invalidRoots, nil
}
if n.node.blockHash == parentHash {
// The parent was full and invalid
n = s.fullNodeByRoot[parentRoot]
if n == nil {
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid, could not find full parent")
}
} else {
// The parent is empty and we don't yet know if it's valid or not
for n = n.node.parent; n != nil; n = n.node.parent {
if ctx.Err() != nil {
return invalidRoots, ctx.Err()
}
if n.node.blockHash == lastValidHash {
// The node built on empty and the whole chain was valid
return invalidRoots, nil
}
if n.node.blockHash == parentHash {
// The parent was full and invalid
break
}
}
if n == nil {
return nil, errors.Wrap(ErrNilNode, "could not set node to invalid, could not find full parent in ancestry")
}
}
} else {
// check consistency with the parent information
if n.node.parent == nil {
return nil, ErrNilNode
if node == nil {
return invalidRoots, errors.Wrap(ErrNilNode, "could not set node to invalid")
}
if n.node.parent.node.root != parentRoot {
return nil, errInvalidParentRoot
if node.parent.root != parentRoot {
return invalidRoots, errInvalidParentRoot
}
}
// n points to a full node that has an invalid payload in forkchoice. We need to find the fist node in the chain that is actually invalid.
startNode := n
fp := s.fullParent(n)
for ; fp != nil && fp.node.blockHash != lastValidHash; fp = s.fullParent(fp) {
firstInvalid := node
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != lastValidHash; firstInvalid = firstInvalid.parent {
if ctx.Err() != nil {
return invalidRoots, ctx.Err()
}
n = fp
}
// Deal with the case that the last valid payload is in a different fork
// This means we are dealing with an EE that does not follow the spec
if fp == nil {
if firstInvalid.parent == nil {
// return early if the invalid node was not imported
if startNode.node.root != root {
if node.root == parentRoot {
return invalidRoots, nil
}
// Remove just the imported invalid root
n = startNode
firstInvalid = node
}
return s.removeNode(ctx, n)
return s.removeNode(ctx, firstInvalid)
}
// removeNode removes the node with the given root and all of its children
// from the Fork Choice Store.
func (s *Store) removeNode(ctx context.Context, pn *PayloadNode) ([][32]byte, error) {
func (s *Store) removeNode(ctx context.Context, node *Node) ([][32]byte, error) {
invalidRoots := make([][32]byte, 0)
if pn == nil {
if node == nil {
return invalidRoots, errors.Wrap(ErrNilNode, "could not remove node")
}
if !pn.optimistic || pn.node.parent == nil {
if !node.optimistic || node.parent == nil {
return invalidRoots, errInvalidOptimisticStatus
}
children := pn.node.parent.children
children := node.parent.children
if len(children) == 1 {
pn.node.parent.children = []*Node{}
node.parent.children = []*Node{}
} else {
for i, n := range children {
if n == pn.node {
if n == node {
if i != len(children)-1 {
children[i] = children[len(children)-1]
}
pn.node.parent.children = children[:len(children)-1]
node.parent.children = children[:len(children)-1]
break
}
}
}
return s.removeNodeAndChildren(ctx, pn, invalidRoots)
return s.removeNodeAndChildren(ctx, node, invalidRoots)
}
// removeNodeAndChildren removes `node` and all of its descendant from the Store
func (s *Store) removeNodeAndChildren(ctx context.Context, pn *PayloadNode, invalidRoots [][32]byte) ([][32]byte, error) {
func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node, invalidRoots [][32]byte) ([][32]byte, error) {
var err error
// If we are removing an empty node, then remove the full node as well if it exists.
if !pn.full {
fn, ok := s.fullNodeByRoot[pn.node.root]
if ok {
invalidRoots, err = s.removeNodeAndChildren(ctx, fn, invalidRoots)
if err != nil {
return invalidRoots, err
}
}
}
// Now we remove the full node's children.
for _, child := range pn.children {
for _, child := range node.children {
if ctx.Err() != nil {
return invalidRoots, ctx.Err()
}
// We need to remove only the empty node here since the recursion will take care of the full one.
en := s.emptyNodeByRoot[child.root]
if invalidRoots, err = s.removeNodeAndChildren(ctx, en, invalidRoots); err != nil {
if invalidRoots, err = s.removeNodeAndChildren(ctx, child, invalidRoots); err != nil {
return invalidRoots, err
}
}
// Only append the root for the empty nodes.
if pn.full {
delete(s.fullNodeByRoot, pn.node.root)
} else {
invalidRoots = append(invalidRoots, pn.node.root)
if pn.node.root == s.proposerBoostRoot {
s.proposerBoostRoot = [32]byte{}
}
if pn.node.root == s.previousProposerBoostRoot {
s.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
s.previousProposerBoostScore = 0
}
delete(s.emptyNodeByRoot, pn.node.root)
invalidRoots = append(invalidRoots, node.root)
if node.root == s.proposerBoostRoot {
s.proposerBoostRoot = [32]byte{}
}
if node.root == s.previousProposerBoostRoot {
s.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
s.previousProposerBoostScore = 0
}
delete(s.nodeByRoot, node.root)
delete(s.nodeByPayload, node.payloadHash)
return invalidRoots, nil
}

View File

@@ -23,35 +23,93 @@ import (
// And every block in the Fork choice is optimistic.
func TestPruneInvalid(t *testing.T) {
tests := []struct {
name string
root [32]byte // the root of the new INVALID block
parentRoot [32]byte // the root of the parent block
parentHash [32]byte // the execution hash of the parent block
lastValidHash [32]byte // the last valid execution hash
payload [32]byte // the last valid hash
wantedNodeNumber int
wantedRoots [][32]byte
wantedErr error
}{
{ // Bogus LVH, root not in forkchoice
name: "bogus LVH not in forkchoice",
root: [32]byte{'x'}, parentRoot: [32]byte{'i'}, parentHash: [32]byte{'I'}, lastValidHash: [32]byte{'R'},
wantedNodeNumber: 13, wantedRoots: [][32]byte{},
},
{ // Bogus LVH
name: "bogus LVH",
root: [32]byte{'i'}, parentRoot: [32]byte{'h'}, parentHash: [32]byte{'H'}, lastValidHash: [32]byte{'R'},
wantedNodeNumber: 13, wantedRoots: [][32]byte{},
[32]byte{'x'},
[32]byte{'i'},
[32]byte{'R'},
13,
[][32]byte{},
nil,
},
{
name: "wanted j",
root: [32]byte{'j'}, parentRoot: [32]byte{'b'}, parentHash: [32]byte{'B'}, lastValidHash: [32]byte{'B'},
wantedNodeNumber: 13, wantedRoots: [][32]byte{},
// Bogus LVH
[32]byte{'i'},
[32]byte{'h'},
[32]byte{'R'},
12,
[][32]byte{{'i'}},
nil,
},
{
name: "wanted 5",
root: [32]byte{'c'}, parentRoot: [32]byte{'b'}, parentHash: [32]byte{'B'}, lastValidHash: [32]byte{'B'},
wantedNodeNumber: 5,
wantedRoots: [][32]byte{
[32]byte{'j'},
[32]byte{'b'},
[32]byte{'B'},
12,
[][32]byte{{'j'}},
nil,
},
{
[32]byte{'c'},
[32]byte{'b'},
[32]byte{'B'},
4,
[][32]byte{{'f'}, {'e'}, {'i'}, {'h'}, {'l'},
{'k'}, {'g'}, {'d'}, {'c'}},
nil,
},
{
[32]byte{'i'},
[32]byte{'h'},
[32]byte{'H'},
12,
[][32]byte{{'i'}},
nil,
},
{
[32]byte{'h'},
[32]byte{'g'},
[32]byte{'G'},
11,
[][32]byte{{'i'}, {'h'}},
nil,
},
{
[32]byte{'g'},
[32]byte{'d'},
[32]byte{'D'},
8,
[][32]byte{{'i'}, {'h'}, {'l'}, {'k'}, {'g'}},
nil,
},
{
[32]byte{'i'},
[32]byte{'h'},
[32]byte{'D'},
8,
[][32]byte{{'i'}, {'h'}, {'l'}, {'k'}, {'g'}},
nil,
},
{
[32]byte{'f'},
[32]byte{'e'},
[32]byte{'D'},
11,
[][32]byte{{'f'}, {'e'}},
nil,
},
{
[32]byte{'h'},
[32]byte{'g'},
[32]byte{'C'},
5,
[][32]byte{
{'f'},
{'e'},
{'i'},
@@ -61,118 +119,106 @@ func TestPruneInvalid(t *testing.T) {
{'g'},
{'d'},
},
nil,
},
{
name: "wanted i",
root: [32]byte{'i'}, parentRoot: [32]byte{'h'}, parentHash: [32]byte{'H'}, lastValidHash: [32]byte{'H'},
wantedNodeNumber: 13, wantedRoots: [][32]byte{},
[32]byte{'g'},
[32]byte{'d'},
[32]byte{'E'},
8,
[][32]byte{{'i'}, {'h'}, {'l'}, {'k'}, {'g'}},
nil,
},
{
name: "wanted i and h",
root: [32]byte{'h'}, parentRoot: [32]byte{'g'}, parentHash: [32]byte{'G'}, lastValidHash: [32]byte{'G'},
wantedNodeNumber: 12, wantedRoots: [][32]byte{{'i'}},
[32]byte{'z'},
[32]byte{'j'},
[32]byte{'B'},
12,
[][32]byte{{'j'}},
nil,
},
{
name: "wanted i--g",
root: [32]byte{'g'}, parentRoot: [32]byte{'d'}, parentHash: [32]byte{'D'}, lastValidHash: [32]byte{'D'},
wantedNodeNumber: 9, wantedRoots: [][32]byte{{'i'}, {'h'}, {'l'}, {'k'}},
[32]byte{'z'},
[32]byte{'j'},
[32]byte{'J'},
13,
[][32]byte{},
nil,
},
{
name: "wanted 9",
root: [32]byte{'i'}, parentRoot: [32]byte{'h'}, parentHash: [32]byte{'H'}, lastValidHash: [32]byte{'D'},
wantedNodeNumber: 9, wantedRoots: [][32]byte{{'i'}, {'h'}, {'l'}, {'k'}},
[32]byte{'j'},
[32]byte{'a'},
[32]byte{'B'},
0,
[][32]byte{},
errInvalidParentRoot,
},
{
name: "wanted f and e",
root: [32]byte{'f'}, parentRoot: [32]byte{'e'}, parentHash: [32]byte{'E'}, lastValidHash: [32]byte{'D'},
wantedNodeNumber: 12, wantedRoots: [][32]byte{{'f'}},
[32]byte{'z'},
[32]byte{'h'},
[32]byte{'D'},
8,
[][32]byte{{'i'}, {'h'}, {'l'}, {'k'}, {'g'}},
nil,
},
{
name: "wanted 6",
root: [32]byte{'h'}, parentRoot: [32]byte{'g'}, parentHash: [32]byte{'G'}, lastValidHash: [32]byte{'C'},
wantedNodeNumber: 6,
wantedRoots: [][32]byte{
{'f'}, {'e'}, {'i'}, {'h'}, {'l'}, {'k'}, {'g'},
},
},
{
name: "wanted 9 again",
root: [32]byte{'g'}, parentRoot: [32]byte{'d'}, parentHash: [32]byte{'D'}, lastValidHash: [32]byte{'E'},
wantedNodeNumber: 9, wantedRoots: [][32]byte{{'i'}, {'h'}, {'l'}, {'k'}},
},
{
name: "wanted 13",
root: [32]byte{'z'}, parentRoot: [32]byte{'j'}, parentHash: [32]byte{'J'}, lastValidHash: [32]byte{'B'},
wantedNodeNumber: 13, wantedRoots: [][32]byte{},
},
{
name: "wanted empty",
root: [32]byte{'z'}, parentRoot: [32]byte{'j'}, parentHash: [32]byte{'J'}, lastValidHash: [32]byte{'J'},
wantedNodeNumber: 13, wantedRoots: [][32]byte{},
},
{
name: "errInvalidParentRoot",
root: [32]byte{'j'}, parentRoot: [32]byte{'a'}, parentHash: [32]byte{'A'}, lastValidHash: [32]byte{'B'},
wantedErr: errInvalidParentRoot,
},
{
name: "root z",
root: [32]byte{'z'}, parentRoot: [32]byte{'h'}, parentHash: [32]byte{'H'}, lastValidHash: [32]byte{'D'},
wantedNodeNumber: 9, wantedRoots: [][32]byte{{'i'}, {'h'}, {'l'}, {'k'}},
[32]byte{'z'},
[32]byte{'h'},
[32]byte{'D'},
8,
[][32]byte{{'i'}, {'h'}, {'l'}, {'k'}, {'g'}},
nil,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
ctx := t.Context()
f := setup(1, 1)
require.NoError(t, f.SetOptimisticToValid(ctx, [32]byte{}))
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
ctx := t.Context()
f := setup(1, 1)
roots, err := f.store.setOptimisticToInvalid(t.Context(), tc.root, tc.parentRoot, tc.parentHash, tc.lastValidHash)
if tc.wantedErr == nil {
require.NoError(t, err)
require.Equal(t, len(tc.wantedRoots), len(roots))
require.DeepEqual(t, tc.wantedRoots, roots)
require.Equal(t, tc.wantedNodeNumber, f.NodeCount())
} else {
require.ErrorIs(t, tc.wantedErr, err)
}
})
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
roots, err := f.store.setOptimisticToInvalid(t.Context(), tc.root, tc.parentRoot, tc.payload)
if tc.wantedErr == nil {
require.NoError(t, err)
require.DeepEqual(t, tc.wantedRoots, roots)
require.Equal(t, tc.wantedNodeNumber, f.NodeCount())
} else {
require.ErrorIs(t, tc.wantedErr, err)
}
}
}
@@ -194,40 +240,11 @@ func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
f.store.previousProposerBoostScore = 10
f.store.previousProposerBoostRoot = [32]byte{'b'}
_, err = f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'B'}, [32]byte{'A'})
_, err = f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'A'})
require.NoError(t, err)
// proposer boost is still applied to c
require.Equal(t, uint64(10), f.store.previousProposerBoostScore)
require.Equal(t, [32]byte{}, f.store.proposerBoostRoot)
require.Equal(t, [32]byte{'b'}, f.store.previousProposerBoostRoot)
}
func TestSetOptimisticToInvalid_ProposerBoost_Older(t *testing.T) {
ctx := t.Context()
f := setup(1, 1)
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
f.store.proposerBoostRoot = [32]byte{'d'}
f.store.previousProposerBoostScore = 10
f.store.previousProposerBoostRoot = [32]byte{'c'}
_, err = f.SetOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'C'}, [32]byte{'A'})
require.NoError(t, err)
// proposer boost is still applied to c
require.Equal(t, uint64(0), f.store.previousProposerBoostScore)
require.Equal(t, [32]byte{}, f.store.proposerBoostRoot)
require.Equal(t, [32]byte{}, f.store.previousProposerBoostRoot)
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
}
// This is a regression test (10565)
@@ -255,9 +272,10 @@ func TestSetOptimisticToInvalid_CorrectChildren(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
_, err = f.store.setOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'A'}, [32]byte{'A'})
_, err = f.store.setOptimisticToInvalid(ctx, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'A'})
require.NoError(t, err)
require.Equal(t, 2, len(f.store.fullNodeByRoot[[32]byte{'a'}].children))
require.Equal(t, 2, len(f.store.nodeByRoot[[32]byte{'a'}].children))
}
// Pow | Pos
@@ -304,13 +322,13 @@ func TestSetOptimisticToInvalid_ForkAtMerge(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{'D'}, [32]byte{})
roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{})
require.NoError(t, err)
require.Equal(t, 3, len(roots))
require.Equal(t, 4, len(roots))
sort.Slice(roots, func(i, j int) bool {
return bytesutil.BytesToUint64BigEndian(roots[i][:]) < bytesutil.BytesToUint64BigEndian(roots[j][:])
})
require.DeepEqual(t, roots, [][32]byte{{'c'}, {'d'}, {'e'}})
require.DeepEqual(t, roots, [][32]byte{{'b'}, {'c'}, {'d'}, {'e'}})
}
// Pow | Pos
@@ -357,13 +375,13 @@ func TestSetOptimisticToInvalid_ForkAtMerge_bis(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, root))
roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{'D'}, [32]byte{})
roots, err := f.SetOptimisticToInvalid(ctx, [32]byte{'x'}, [32]byte{'d'}, [32]byte{})
require.NoError(t, err)
require.Equal(t, 3, len(roots))
require.Equal(t, 4, len(roots))
sort.Slice(roots, func(i, j int) bool {
return bytesutil.BytesToUint64BigEndian(roots[i][:]) < bytesutil.BytesToUint64BigEndian(roots[j][:])
})
require.DeepEqual(t, roots, [][32]byte{{'c'}, {'d'}, {'e'}})
require.DeepEqual(t, roots, [][32]byte{{'b'}, {'c'}, {'d'}, {'e'}})
}
func TestSetOptimisticToValid(t *testing.T) {

View File

@@ -11,7 +11,7 @@ func (f *ForkChoice) applyProposerBoostScore() error {
s := f.store
proposerScore := uint64(0)
if s.previousProposerBoostRoot != params.BeaconConfig().ZeroHash {
previousNode, ok := s.emptyNodeByRoot[s.previousProposerBoostRoot]
previousNode, ok := s.nodeByRoot[s.previousProposerBoostRoot]
if !ok || previousNode == nil {
log.WithError(errInvalidProposerBoostRoot).Errorf("invalid prev root %#x", s.previousProposerBoostRoot)
} else {
@@ -20,7 +20,7 @@ func (f *ForkChoice) applyProposerBoostScore() error {
}
if s.proposerBoostRoot != params.BeaconConfig().ZeroHash {
currentNode, ok := s.emptyNodeByRoot[s.proposerBoostRoot]
currentNode, ok := s.nodeByRoot[s.proposerBoostRoot]
if !ok || currentNode == nil {
log.WithError(errInvalidProposerBoostRoot).Errorf("invalid current root %#x", s.proposerBoostRoot)
} else {

View File

@@ -166,14 +166,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// (1: 48) -> (2: 38) -> (3: 10)
// \--------------->(4: 18)
//
node1 := f.store.emptyNodeByRoot[indexToHash(1)]
require.Equal(t, node1.node.weight, uint64(48))
node2 := f.store.emptyNodeByRoot[indexToHash(2)]
require.Equal(t, node2.node.weight, uint64(38))
node3 := f.store.emptyNodeByRoot[indexToHash(3)]
require.Equal(t, node3.node.weight, uint64(10))
node4 := f.store.emptyNodeByRoot[indexToHash(4)]
require.Equal(t, node4.node.weight, uint64(18))
node1 := f.store.nodeByRoot[indexToHash(1)]
require.Equal(t, node1.weight, uint64(48))
node2 := f.store.nodeByRoot[indexToHash(2)]
require.Equal(t, node2.weight, uint64(38))
node3 := f.store.nodeByRoot[indexToHash(3)]
require.Equal(t, node3.weight, uint64(10))
node4 := f.store.nodeByRoot[indexToHash(4)]
require.Equal(t, node4.weight, uint64(18))
// Regression: process attestations for C, check that it
// becomes head, we need two attestations to have C.weight = 30 > 24 = D.weight

View File

@@ -34,23 +34,22 @@ const orphanLateBlockProposingEarly = 2
func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
override = false
// We only need to override FCU if our current consensusHead is from the current
// We only need to override FCU if our current head is from the current
// slot. This differs from the spec implementation in that we assume
// that we will call this function in the previous slot to proposing.
consensusHead := f.store.headNode
if consensusHead == nil {
head := f.store.headNode
if head == nil {
return
}
if consensusHead.slot != slots.CurrentSlot(f.store.genesisTime) {
if head.slot != slots.CurrentSlot(f.store.genesisTime) {
return
}
// Do not reorg on epoch boundaries
if (consensusHead.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
if (head.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
return
}
head := f.store.choosePayloadContent(consensusHead)
// Only reorg blocks that arrive late
early, err := head.arrivedEarly(f.store.genesisTime)
if err != nil {
@@ -62,15 +61,15 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
}
// Only reorg if we have been finalizing
finalizedEpoch := f.store.finalizedCheckpoint.Epoch
if slots.ToEpoch(consensusHead.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
if slots.ToEpoch(head.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
return
}
// Only orphan a single block
parent := consensusHead.parent
parent := head.parent
if parent == nil {
return
}
if consensusHead.slot > parent.node.slot+1 {
if head.slot > parent.slot+1 {
return
}
// Do not orphan a block that has higher justification than the parent
@@ -79,12 +78,12 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
// }
// Only orphan a block if the head LMD vote is weak
if consensusHead.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
return
}
// Return early if we are checking before 10 seconds into the slot
sss, err := slots.SinceSlotStart(consensusHead.slot, f.store.genesisTime, time.Now())
sss, err := slots.SinceSlotStart(head.slot, f.store.genesisTime, time.Now())
if err != nil {
log.WithError(err).Error("could not check current slot")
return true
@@ -93,7 +92,7 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
return true
}
// Only orphan a block if the parent LMD vote is strong
if parent.node.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
if parent.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
return
}
return true
@@ -107,61 +106,60 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
// This function needs to be called only when proposing a block and all
// attestation processing has already happened.
func (f *ForkChoice) GetProposerHead() [32]byte {
consensusHead := f.store.headNode
if consensusHead == nil {
head := f.store.headNode
if head == nil {
return [32]byte{}
}
// Only reorg blocks from the previous slot.
currentSlot := slots.CurrentSlot(f.store.genesisTime)
if consensusHead.slot+1 != currentSlot {
return consensusHead.root
if head.slot+1 != currentSlot {
return head.root
}
// Do not reorg on epoch boundaries
if (consensusHead.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
return consensusHead.root
if (head.slot+1)%params.BeaconConfig().SlotsPerEpoch == 0 {
return head.root
}
// Only reorg blocks that arrive late
head := f.store.choosePayloadContent(consensusHead)
early, err := head.arrivedEarly(f.store.genesisTime)
if err != nil {
log.WithError(err).Error("could not check if block arrived early")
return consensusHead.root
return head.root
}
if early {
return consensusHead.root
return head.root
}
// Only reorg if we have been finalizing
finalizedEpoch := f.store.finalizedCheckpoint.Epoch
if slots.ToEpoch(consensusHead.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
return consensusHead.root
if slots.ToEpoch(head.slot+1) > finalizedEpoch+params.BeaconConfig().ReorgMaxEpochsSinceFinalization {
return head.root
}
// Only orphan a single block
parent := consensusHead.parent
parent := head.parent
if parent == nil {
return consensusHead.root
return head.root
}
if consensusHead.slot > parent.node.slot+1 {
return consensusHead.root
if head.slot > parent.slot+1 {
return head.root
}
// Only orphan a block if the head LMD vote is weak
if consensusHead.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
return consensusHead.root
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
return head.root
}
// Only orphan a block if the parent LMD vote is strong
if parent.node.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
return consensusHead.root
if parent.weight*100 < f.store.committeeWeight*params.BeaconConfig().ReorgParentWeightThreshold {
return head.root
}
// Only reorg if we are proposing early
sss, err := slots.SinceSlotStart(currentSlot, f.store.genesisTime, time.Now())
if err != nil {
log.WithError(err).Error("could not check if proposing early")
return consensusHead.root
return head.root
}
if sss >= orphanLateBlockProposingEarly*time.Second {
return consensusHead.root
return head.root
}
return parent.node.root
return parent.root
}

View File

@@ -38,6 +38,7 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
require.Equal(t, blk.Root(), headRoot)
t.Run("head is weak", func(t *testing.T) {
require.Equal(t, true, f.ShouldOverrideFCU())
})
t.Run("head is nil", func(t *testing.T) {
saved := f.store.headNode
@@ -59,11 +60,10 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
f.store.headNode.slot = saved
})
t.Run("head is early", func(t *testing.T) {
fn := f.store.fullNodeByRoot[f.store.headNode.root]
saved := fn.timestamp
fn.timestamp = saved.Add(-2 * time.Second)
saved := f.store.headNode.timestamp
f.store.headNode.timestamp = saved.Add(-2 * time.Second)
require.Equal(t, false, f.ShouldOverrideFCU())
fn.timestamp = saved
f.store.headNode.timestamp = saved
})
t.Run("chain not finalizing", func(t *testing.T) {
saved := f.store.headNode.slot
@@ -74,10 +74,10 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+time.Second)
})
t.Run("Not single block reorg", func(t *testing.T) {
saved := f.store.headNode.parent.node.slot
f.store.headNode.parent.node.slot = 0
saved := f.store.headNode.parent.slot
f.store.headNode.parent.slot = 0
require.Equal(t, false, f.ShouldOverrideFCU())
f.store.headNode.parent.node.slot = saved
f.store.headNode.parent.slot = saved
})
t.Run("parent is nil", func(t *testing.T) {
saved := f.store.headNode.parent
@@ -86,17 +86,17 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
f.store.headNode.parent = saved
})
t.Run("parent is weak early call", func(t *testing.T) {
saved := f.store.headNode.parent.node.weight
f.store.headNode.parent.node.weight = 0
saved := f.store.headNode.parent.weight
f.store.headNode.parent.weight = 0
require.Equal(t, true, f.ShouldOverrideFCU())
f.store.headNode.parent.node.weight = saved
f.store.headNode.parent.weight = saved
})
t.Run("parent is weak late call", func(t *testing.T) {
saved := f.store.headNode.parent.node.weight
saved := f.store.headNode.parent.weight
driftGenesisTime(f, 2, 11*time.Second)
f.store.headNode.parent.node.weight = 0
f.store.headNode.parent.weight = 0
require.Equal(t, false, f.ShouldOverrideFCU())
f.store.headNode.parent.node.weight = saved
f.store.headNode.parent.weight = saved
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+time.Second)
})
t.Run("Head is strong", func(t *testing.T) {
@@ -135,8 +135,7 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
require.NoError(t, err)
require.Equal(t, blk.Root(), headRoot)
orphanLateBlockFirstThreshold := params.BeaconConfig().SlotComponentDuration(params.BeaconConfig().AttestationDueBPS)
fn := f.store.fullNodeByRoot[f.store.headNode.root]
fn.timestamp = fn.timestamp.Add(-1 * (params.BeaconConfig().SlotDuration() - orphanLateBlockFirstThreshold))
f.store.headNode.timestamp.Add(-1 * (params.BeaconConfig().SlotDuration() - orphanLateBlockFirstThreshold))
t.Run("head is weak", func(t *testing.T) {
require.Equal(t, parentRoot, f.GetProposerHead())
})
@@ -160,12 +159,11 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
f.store.headNode.slot = saved
})
t.Run("head is early", func(t *testing.T) {
fn := f.store.fullNodeByRoot[f.store.headNode.root]
saved := fn.timestamp
saved := f.store.headNode.timestamp
headTimeStamp := f.store.genesisTime.Add(time.Duration(uint64(f.store.headNode.slot)*params.BeaconConfig().SecondsPerSlot+1) * time.Second)
fn.timestamp = headTimeStamp
f.store.headNode.timestamp = headTimeStamp
require.Equal(t, childRoot, f.GetProposerHead())
fn.timestamp = saved
f.store.headNode.timestamp = saved
})
t.Run("chain not finalizing", func(t *testing.T) {
saved := f.store.headNode.slot
@@ -176,10 +174,10 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
driftGenesisTime(f, 3, 1*time.Second)
})
t.Run("Not single block reorg", func(t *testing.T) {
saved := f.store.headNode.parent.node.slot
f.store.headNode.parent.node.slot = 0
saved := f.store.headNode.parent.slot
f.store.headNode.parent.slot = 0
require.Equal(t, childRoot, f.GetProposerHead())
f.store.headNode.parent.node.slot = saved
f.store.headNode.parent.slot = saved
})
t.Run("parent is nil", func(t *testing.T) {
saved := f.store.headNode.parent

View File

@@ -13,7 +13,6 @@ import (
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// head starts from justified root and then follows the best descendant links
@@ -27,16 +26,13 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
}
// JustifiedRoot has to be known
var jn *Node
ej := s.emptyNodeByRoot[s.justifiedCheckpoint.Root]
if ej != nil {
jn = ej.node
} else {
justifiedNode, ok := s.nodeByRoot[s.justifiedCheckpoint.Root]
if !ok || justifiedNode == nil {
// If the justifiedCheckpoint is from genesis, then the root is
// zeroHash. In this case it should be the root of forkchoice
// tree.
if s.justifiedCheckpoint.Epoch == params.BeaconConfig().GenesisEpoch {
jn = s.treeRootNode
justifiedNode = s.treeRootNode
} else {
return [32]byte{}, errors.WithMessage(errUnknownJustifiedRoot, fmt.Sprintf("%#x", s.justifiedCheckpoint.Root))
}
@@ -44,9 +40,9 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
// If the justified node doesn't have a best descendant,
// the best node is itself.
bestDescendant := jn.bestDescendant
bestDescendant := justifiedNode.bestDescendant
if bestDescendant == nil {
bestDescendant = jn
bestDescendant = justifiedNode
}
currentEpoch := slots.EpochsSinceGenesis(s.genesisTime)
if !bestDescendant.viableForHead(s.justifiedCheckpoint.Epoch, currentEpoch) {
@@ -70,42 +66,29 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
// It then updates the new node's parent with the best child and descendant node.
func (s *Store) insert(ctx context.Context,
roblock consensus_blocks.ROBlock,
justifiedEpoch, finalizedEpoch primitives.Epoch,
) (*PayloadNode, error) {
justifiedEpoch, finalizedEpoch primitives.Epoch) (*Node, error) {
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.insert")
defer span.End()
root := roblock.Root()
block := roblock.Block()
slot := block.Slot()
parentRoot := block.ParentRoot()
var payloadHash [32]byte
if block.Version() >= version.Bellatrix {
execution, err := block.Body().Execution()
if err != nil {
return nil, err
}
copy(payloadHash[:], execution.BlockHash())
}
// Return if the block has been inserted into Store before.
if n, ok := s.emptyNodeByRoot[root]; ok {
if n, ok := s.nodeByRoot[root]; ok {
return n, nil
}
block := roblock.Block()
slot := block.Slot()
var parent *PayloadNode
blockHash := &[32]byte{}
if block.Version() >= version.Gloas {
if err := s.resolveParentPayloadStatus(block, &parent, blockHash); err != nil {
return nil, err
}
} else {
if block.Version() >= version.Bellatrix {
execution, err := block.Body().Execution()
if err != nil {
return nil, err
}
copy(blockHash[:], execution.BlockHash())
}
parentRoot := block.ParentRoot()
en := s.emptyNodeByRoot[parentRoot]
parent = s.fullNodeByRoot[parentRoot]
if parent == nil && en != nil {
// pre-Gloas only full parents are allowed.
return nil, errInvalidParentRoot
}
}
parent := s.nodeByRoot[parentRoot]
n := &Node{
slot: slot,
root: root,
@@ -114,52 +97,32 @@ func (s *Store) insert(ctx context.Context,
unrealizedJustifiedEpoch: justifiedEpoch,
finalizedEpoch: finalizedEpoch,
unrealizedFinalizedEpoch: finalizedEpoch,
blockHash: *blockHash,
optimistic: true,
payloadHash: payloadHash,
timestamp: time.Now(),
}
// Set the node's target checkpoint
if slot%params.BeaconConfig().SlotsPerEpoch == 0 {
n.target = n
} else if parent != nil {
if slots.ToEpoch(slot) == slots.ToEpoch(parent.node.slot) {
n.target = parent.node.target
if slots.ToEpoch(slot) == slots.ToEpoch(parent.slot) {
n.target = parent.target
} else {
n.target = parent.node
n.target = parent
}
}
var ret *PayloadNode
optimistic := true
if parent != nil {
optimistic = n.parent.optimistic
}
// Make the empty node.It's optimistic status equals it's parent's status.
pn := &PayloadNode{
node: n,
optimistic: optimistic,
timestamp: time.Now(),
children: make([]*Node, 0),
}
s.emptyNodeByRoot[root] = pn
ret = pn
if block.Version() < version.Gloas {
// Make also the full node, this is optimistic until the engine returns the execution payload validation.
fn := &PayloadNode{
node: n,
optimistic: true,
timestamp: time.Now(),
full: true,
}
ret = fn
s.fullNodeByRoot[root] = fn
}
s.nodeByPayload[payloadHash] = n
s.nodeByRoot[root] = n
if parent == nil {
if s.treeRootNode == nil {
s.treeRootNode = n
s.headNode = n
s.highestReceivedNode = n
} else {
delete(s.emptyNodeByRoot, root)
delete(s.fullNodeByRoot, root)
delete(s.nodeByRoot, root)
delete(s.nodeByPayload, payloadHash)
return nil, errInvalidParentRoot
}
} else {
@@ -167,7 +130,7 @@ func (s *Store) insert(ctx context.Context,
// Apply proposer boost
now := time.Now()
if now.Before(s.genesisTime) {
return ret, nil
return n, nil
}
currentSlot := slots.CurrentSlot(s.genesisTime)
sss, err := slots.SinceSlotStart(currentSlot, s.genesisTime, now)
@@ -183,16 +146,17 @@ func (s *Store) insert(ctx context.Context,
// Update best descendants
jEpoch := s.justifiedCheckpoint.Epoch
fEpoch := s.finalizedCheckpoint.Epoch
if err := s.updateBestDescendantConsensusNode(ctx, s.treeRootNode, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
log.WithError(err).WithFields(logrus.Fields{
"slot": slot,
"root": root,
}).Error("Could not update best descendant")
if err := s.treeRootNode.updateBestDescendant(ctx, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
_, remErr := s.removeNode(ctx, n)
if remErr != nil {
log.WithError(remErr).Error("could not remove node")
}
return nil, errors.Wrap(err, "could not update best descendants")
}
}
// Update metrics.
processedBlockCount.Inc()
nodeCount.Set(float64(len(s.emptyNodeByRoot)))
nodeCount.Set(float64(len(s.nodeByRoot)))
// Only update received block slot if it's within epoch from current time.
if slot+params.BeaconConfig().SlotsPerEpoch > slots.CurrentSlot(s.genesisTime) {
@@ -203,10 +167,10 @@ func (s *Store) insert(ctx context.Context,
s.highestReceivedNode = n
}
return ret, nil
return n, nil
}
// pruneFinalizedNodeByRootMap prunes the `nodeByRoot` maps
// pruneFinalizedNodeByRootMap prunes the `nodeByRoot` map
// starting from `node` down to the finalized Node or to a leaf of the Fork
// choice store.
func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalizedNode *Node) error {
@@ -219,51 +183,45 @@ func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalized
}
return nil
}
for _, child := range s.allConsensusChildren(node) {
for _, child := range node.children {
if err := s.pruneFinalizedNodeByRootMap(ctx, child, finalizedNode); err != nil {
return err
}
}
en := s.emptyNodeByRoot[node.root]
en.children = nil
delete(s.emptyNodeByRoot, node.root)
fn := s.fullNodeByRoot[node.root]
if fn != nil {
fn.children = nil
delete(s.fullNodeByRoot, node.root)
}
node.children = nil
delete(s.nodeByRoot, node.root)
delete(s.nodeByPayload, node.payloadHash)
return nil
}
// prune prunes the fork choice store. It removes all nodes that compete with the finalized root.
// This function does not prune for invalid optimistically synced nodes, it deals only with pruning upon finalization
// TODO: GLOAS, to ensure that chains up to a full node are found, we may want to consider pruning only up to the latest full block that was finalized
func (s *Store) prune(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.Prune")
defer span.End()
finalizedRoot := s.finalizedCheckpoint.Root
finalizedEpoch := s.finalizedCheckpoint.Epoch
fen, ok := s.emptyNodeByRoot[finalizedRoot]
if !ok || fen == nil {
finalizedNode, ok := s.nodeByRoot[finalizedRoot]
if !ok || finalizedNode == nil {
return errors.WithMessage(errUnknownFinalizedRoot, fmt.Sprintf("%#x", finalizedRoot))
}
fn := fen.node
// return early if we haven't changed the finalized checkpoint
if fn.parent == nil {
if finalizedNode.parent == nil {
return nil
}
// Save the new finalized dependent root because it will be pruned
s.finalizedDependentRoot = fn.parent.node.root
s.finalizedDependentRoot = finalizedNode.parent.root
// Prune nodeByRoot starting from root
if err := s.pruneFinalizedNodeByRootMap(ctx, s.treeRootNode, fn); err != nil {
if err := s.pruneFinalizedNodeByRootMap(ctx, s.treeRootNode, finalizedNode); err != nil {
return err
}
fn.parent = nil
s.treeRootNode = fn
finalizedNode.parent = nil
s.treeRootNode = finalizedNode
prunedCount.Inc()
// Prune all children of the finalized checkpoint block that are incompatible with it
@@ -271,13 +229,13 @@ func (s *Store) prune(ctx context.Context) error {
if err != nil {
return errors.Wrap(err, "could not compute epoch start")
}
if fn.slot == checkpointMaxSlot {
if finalizedNode.slot == checkpointMaxSlot {
return nil
}
for _, child := range fen.children {
for _, child := range finalizedNode.children {
if child != nil && child.slot <= checkpointMaxSlot {
if err := s.pruneFinalizedNodeByRootMap(ctx, child, fn); err != nil {
if err := s.pruneFinalizedNodeByRootMap(ctx, child, finalizedNode); err != nil {
return errors.Wrap(err, "could not prune incompatible finalized child")
}
}
@@ -291,10 +249,10 @@ func (s *Store) tips() ([][32]byte, []primitives.Slot) {
var roots [][32]byte
var slots []primitives.Slot
for root, n := range s.emptyNodeByRoot {
if len(s.allConsensusChildren(n.node)) == 0 {
for root, node := range s.nodeByRoot {
if len(node.children) == 0 {
roots = append(roots, root)
slots = append(slots, n.node.slot)
slots = append(slots, node.slot)
}
}
return roots, slots
@@ -315,6 +273,21 @@ func (f *ForkChoice) HighestReceivedBlockSlot() primitives.Slot {
return f.store.highestReceivedNode.slot
}
// HighestReceivedBlockDelay returns the number of slots that the highest
// received block was late when receiving it. For example, a block was late by 12 slots,
// then this method is expected to return 12.
func (f *ForkChoice) HighestReceivedBlockDelay() primitives.Slot {
n := f.store.highestReceivedNode
if n == nil {
return 0
}
sss, err := slots.SinceSlotStart(n.slot, f.store.genesisTime, n.timestamp)
if err != nil {
return 0
}
return primitives.Slot(uint64(sss/time.Second) / params.BeaconConfig().SecondsPerSlot)
}
// ReceivedBlocksLastEpoch returns the number of blocks received in the last epoch
func (f *ForkChoice) ReceivedBlocksLastEpoch() (uint64, error) {
count := uint64(0)

View File

@@ -1,6 +1,7 @@
package doublylinkedtree
import (
"context"
"testing"
"time"
@@ -40,18 +41,18 @@ func TestStore_NodeByRoot(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(t.Context(), 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
node0 := f.store.emptyNodeByRoot[params.BeaconConfig().ZeroHash]
node1 := f.store.emptyNodeByRoot[indexToHash(1)]
node2 := f.store.emptyNodeByRoot[indexToHash(2)]
node0 := f.store.treeRootNode
node1 := node0.children[0]
node2 := node1.children[0]
expectedRoots := map[[32]byte]*PayloadNode{
expectedRoots := map[[32]byte]*Node{
params.BeaconConfig().ZeroHash: node0,
indexToHash(1): node1,
indexToHash(2): node2,
}
require.Equal(t, 3, f.NodeCount())
for root, node := range f.store.emptyNodeByRoot {
for root, node := range f.store.nodeByRoot {
v, ok := expectedRoots[root]
require.Equal(t, ok, true)
require.Equal(t, v, node)
@@ -110,28 +111,38 @@ func TestStore_Head_BestDescendant(t *testing.T) {
require.Equal(t, h, indexToHash(4))
}
func TestStore_UpdateBestDescendant_ContextCancelled(t *testing.T) {
ctx, cancel := context.WithCancel(t.Context())
f := setup(0, 0)
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
cancel()
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
err = f.InsertNode(ctx, state, blkRoot)
require.ErrorContains(t, "context canceled", err)
}
func TestStore_Insert(t *testing.T) {
// The new node does not have a parent.
treeRootNode := &Node{slot: 0, root: indexToHash(0)}
emptyRootPN := &PayloadNode{node: treeRootNode}
fullRootPN := &PayloadNode{node: treeRootNode, full: true, optimistic: true}
emptyNodeByRoot := map[[32]byte]*PayloadNode{indexToHash(0): emptyRootPN}
fullNodeByRoot := map[[32]byte]*PayloadNode{indexToHash(0): fullRootPN}
nodeByRoot := map[[32]byte]*Node{indexToHash(0): treeRootNode}
nodeByPayload := map[[32]byte]*Node{indexToHash(0): treeRootNode}
jc := &forkchoicetypes.Checkpoint{Epoch: 0}
fc := &forkchoicetypes.Checkpoint{Epoch: 0}
s := &Store{emptyNodeByRoot: emptyNodeByRoot, fullNodeByRoot: fullNodeByRoot, treeRootNode: treeRootNode, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, nodeByPayload: nodeByPayload, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
payloadHash := [32]byte{'a'}
ctx := t.Context()
_, blk, err := prepareForkchoiceState(ctx, 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1)
require.NoError(t, err)
_, err = s.insert(ctx, blk, 1, 1)
require.NoError(t, err)
assert.Equal(t, 2, len(s.emptyNodeByRoot), "Did not insert block")
assert.Equal(t, (*PayloadNode)(nil), treeRootNode.parent, "Incorrect parent")
children := s.allConsensusChildren(treeRootNode)
assert.Equal(t, 1, len(children), "Incorrect children number")
assert.Equal(t, payloadHash, children[0].blockHash, "Incorrect payload hash")
child := children[0]
assert.Equal(t, 2, len(s.nodeByRoot), "Did not insert block")
assert.Equal(t, (*Node)(nil), treeRootNode.parent, "Incorrect parent")
assert.Equal(t, 1, len(treeRootNode.children), "Incorrect children number")
assert.Equal(t, payloadHash, treeRootNode.children[0].payloadHash, "Incorrect payload hash")
child := treeRootNode.children[0]
assert.Equal(t, primitives.Epoch(1), child.justifiedEpoch, "Incorrect justification")
assert.Equal(t, primitives.Epoch(1), child.finalizedEpoch, "Incorrect finalization")
assert.Equal(t, indexToHash(100), child.root, "Incorrect root")
@@ -156,7 +167,7 @@ func TestStore_Prune_MoreThanThreshold(t *testing.T) {
// Finalized root is at index 99 so everything before 99 should be pruned.
s.finalizedCheckpoint.Root = indexToHash(99)
require.NoError(t, s.prune(t.Context()))
assert.Equal(t, 1, len(s.emptyNodeByRoot), "Incorrect nodes count")
assert.Equal(t, 1, len(s.nodeByRoot), "Incorrect nodes count")
}
func TestStore_Prune_MoreThanOnce(t *testing.T) {
@@ -178,12 +189,12 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
// Finalized root is at index 11 so everything before 11 should be pruned.
s.finalizedCheckpoint.Root = indexToHash(10)
require.NoError(t, s.prune(t.Context()))
assert.Equal(t, 90, len(s.emptyNodeByRoot), "Incorrect nodes count")
assert.Equal(t, 90, len(s.nodeByRoot), "Incorrect nodes count")
// One more time.
s.finalizedCheckpoint.Root = indexToHash(20)
require.NoError(t, s.prune(t.Context()))
assert.Equal(t, 80, len(s.emptyNodeByRoot), "Incorrect nodes count")
assert.Equal(t, 80, len(s.nodeByRoot), "Incorrect nodes count")
}
func TestStore_Prune_ReturnEarly(t *testing.T) {
@@ -226,7 +237,8 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
s := f.store
s.finalizedCheckpoint.Root = indexToHash(1)
require.NoError(t, s.prune(t.Context()))
require.Equal(t, len(s.emptyNodeByRoot), 1)
require.Equal(t, len(s.nodeByRoot), 1)
require.Equal(t, len(s.nodeByPayload), 1)
}
// This test starts with the following branching diagram
@@ -306,7 +318,9 @@ func TestStore_PruneMapsNodes(t *testing.T) {
s := f.store
s.finalizedCheckpoint.Root = indexToHash(1)
require.NoError(t, s.prune(t.Context()))
require.Equal(t, len(s.emptyNodeByRoot), 1)
require.Equal(t, len(s.nodeByRoot), 1)
require.Equal(t, len(s.nodeByPayload), 1)
}
func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
@@ -325,6 +339,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, uint64(1), count)
require.Equal(t, primitives.Slot(1), f.HighestReceivedBlockSlot())
require.Equal(t, primitives.Slot(0), f.HighestReceivedBlockDelay())
// 64
// Received block last epoch is 1
@@ -337,6 +352,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, uint64(1), count)
require.Equal(t, primitives.Slot(64), f.HighestReceivedBlockSlot())
require.Equal(t, primitives.Slot(0), f.HighestReceivedBlockDelay())
// 64 65
// Received block last epoch is 2
@@ -349,6 +365,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, uint64(2), count)
require.Equal(t, primitives.Slot(65), f.HighestReceivedBlockSlot())
require.Equal(t, primitives.Slot(1), f.HighestReceivedBlockDelay())
// 64 65 66
// Received block last epoch is 3
@@ -700,3 +717,17 @@ func TestStore_CleanupInserting(t *testing.T) {
require.NotNil(t, f.InsertNode(ctx, st, blk))
require.Equal(t, false, f.HasNode(blk.Root()))
}
func TestStore_HighestReceivedBlockDelay(t *testing.T) {
f := ForkChoice{
store: &Store{
genesisTime: time.Unix(0, 0),
highestReceivedNode: &Node{
slot: 10,
timestamp: time.Unix(int64(((10 + 12) * params.BeaconConfig().SecondsPerSlot)), 0), // 12 slots late
},
},
}
require.Equal(t, primitives.Slot(12), f.HighestReceivedBlockDelay())
}

View File

@@ -21,26 +21,24 @@ type ForkChoice struct {
balancesByRoot forkchoice.BalancesByRooter // handler to obtain balances for the state with a given root
}
var _ forkchoice.ForkChoicer = (*ForkChoice)(nil)
// Store defines the fork choice store which includes block nodes and the last view of checkpoint information.
type Store struct {
justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified epoch in store.
unrealizedJustifiedCheckpoint *forkchoicetypes.Checkpoint // best unrealized justified checkpoint in store.
unrealizedFinalizedCheckpoint *forkchoicetypes.Checkpoint // best unrealized finalized checkpoint in store.
prevJustifiedCheckpoint *forkchoicetypes.Checkpoint // previous justified checkpoint in store.
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized epoch in store.
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
previousProposerBoostScore uint64 // previous proposer boosted root score.
finalizedDependentRoot [fieldparams.RootLength]byte // dependent root at finalized checkpoint.
committeeWeight uint64 // tracks the total active validator balance divided by the number of slots per Epoch.
treeRootNode *Node // the root node of the store tree.
headNode *Node // last head Node
emptyNodeByRoot map[[fieldparams.RootLength]byte]*PayloadNode // nodes indexed by roots.
fullNodeByRoot map[[fieldparams.RootLength]byte]*PayloadNode // full nodes (the payload was present) indexed by beacon block root.
slashedIndices map[primitives.ValidatorIndex]bool // the list of equivocating validator indices
originRoot [fieldparams.RootLength]byte // The genesis block root
justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified epoch in store.
unrealizedJustifiedCheckpoint *forkchoicetypes.Checkpoint // best unrealized justified checkpoint in store.
unrealizedFinalizedCheckpoint *forkchoicetypes.Checkpoint // best unrealized finalized checkpoint in store.
prevJustifiedCheckpoint *forkchoicetypes.Checkpoint // previous justified checkpoint in store.
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized epoch in store.
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
previousProposerBoostScore uint64 // previous proposer boosted root score.
finalizedDependentRoot [fieldparams.RootLength]byte // dependent root at finalized checkpoint.
committeeWeight uint64 // tracks the total active validator balance divided by the number of slots per Epoch.
treeRootNode *Node // the root node of the store tree.
headNode *Node // last head Node
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
nodeByPayload map[[fieldparams.RootLength]byte]*Node // nodes indexed by payload Hash
slashedIndices map[primitives.ValidatorIndex]bool // the list of equivocating validator indices
originRoot [fieldparams.RootLength]byte // The genesis block root
genesisTime time.Time
highestReceivedNode *Node // The highest slot node.
receivedBlocksLastEpoch [fieldparams.SlotsPerEpoch]primitives.Slot // Using `highestReceivedSlot`. The slot of blocks received in the last epoch.
@@ -52,28 +50,19 @@ type Store struct {
type Node struct {
slot primitives.Slot // slot of the block converted to the node.
root [fieldparams.RootLength]byte // root of the block converted to the node.
blockHash [fieldparams.RootLength]byte // payloadHash of the block converted to the node.
parent *PayloadNode // parent index of this node.
payloadHash [fieldparams.RootLength]byte // payloadHash of the block converted to the node.
parent *Node // parent index of this node.
target *Node // target checkpoint for
bestDescendant *Node // bestDescendant node of this node.
children []*Node // the list of direct children of this Node
justifiedEpoch primitives.Epoch // justifiedEpoch of this node.
unrealizedJustifiedEpoch primitives.Epoch // the epoch that would be justified if the block would be advanced to the next epoch.
finalizedEpoch primitives.Epoch // finalizedEpoch of this node.
unrealizedFinalizedEpoch primitives.Epoch // the epoch that would be finalized if the block would be advanced to the next epoch.
balance uint64 // the balance that voted for this node directly
weight uint64 // weight of this node: the total balance including children
}
// PayloadNode defines a full Forkchoice node after the Gloas fork, with the payload status either empty of full
type PayloadNode struct {
optimistic bool // whether the block has been fully validated or not
full bool // whether this node represents a payload present or not
weight uint64 // weight of this node: the total balance including children
balance uint64 // the balance that voted for this node directly
bestDescendant *Node // bestDescendant node of this payload node.
node *Node // the consensus part of this full forkchoice node
timestamp time.Time // The timestamp when the node was inserted.
children []*Node // the list of direct children of this Node
bestDescendant *Node // bestDescendant node of this node.
optimistic bool // whether the block has been fully validated or not
timestamp time.Time // The timestamp when the node was inserted.
}
// Vote defines an individual validator's vote.

View File

@@ -15,34 +15,33 @@ import (
)
func (s *Store) setUnrealizedJustifiedEpoch(root [32]byte, epoch primitives.Epoch) error {
en, ok := s.emptyNodeByRoot[root]
if !ok || en == nil {
node, ok := s.nodeByRoot[root]
if !ok || node == nil {
return errors.Wrap(ErrNilNode, "could not set unrealized justified epoch")
}
if epoch < en.node.unrealizedJustifiedEpoch {
if epoch < node.unrealizedJustifiedEpoch {
return errInvalidUnrealizedJustifiedEpoch
}
en.node.unrealizedJustifiedEpoch = epoch
node.unrealizedJustifiedEpoch = epoch
return nil
}
func (s *Store) setUnrealizedFinalizedEpoch(root [32]byte, epoch primitives.Epoch) error {
en, ok := s.emptyNodeByRoot[root]
if !ok || en == nil {
node, ok := s.nodeByRoot[root]
if !ok || node == nil {
return errors.Wrap(ErrNilNode, "could not set unrealized finalized epoch")
}
if epoch < en.node.unrealizedFinalizedEpoch {
if epoch < node.unrealizedFinalizedEpoch {
return errInvalidUnrealizedFinalizedEpoch
}
en.node.unrealizedFinalizedEpoch = epoch
node.unrealizedFinalizedEpoch = epoch
return nil
}
// updateUnrealizedCheckpoints "realizes" the unrealized justified and finalized
// epochs stored within nodes. It should be called at the beginning of each epoch.
func (f *ForkChoice) updateUnrealizedCheckpoints(ctx context.Context) error {
for _, en := range f.store.emptyNodeByRoot {
node := en.node
for _, node := range f.store.nodeByRoot {
node.justifiedEpoch = node.unrealizedJustifiedEpoch
node.finalizedEpoch = node.unrealizedFinalizedEpoch
if node.justifiedEpoch > f.store.justifiedCheckpoint.Epoch {
@@ -63,17 +62,16 @@ func (s *Store) pullTips(state state.BeaconState, node *Node, jc, fc *ethpb.Chec
if node.parent == nil { // Nothing to do if the parent is nil.
return jc, fc
}
pn := node.parent.node
currentEpoch := slots.ToEpoch(slots.CurrentSlot(s.genesisTime))
stateSlot := state.Slot()
stateEpoch := slots.ToEpoch(stateSlot)
currJustified := pn.unrealizedJustifiedEpoch == currentEpoch
prevJustified := pn.unrealizedJustifiedEpoch+1 == currentEpoch
currJustified := node.parent.unrealizedJustifiedEpoch == currentEpoch
prevJustified := node.parent.unrealizedJustifiedEpoch+1 == currentEpoch
tooEarlyForCurr := slots.SinceEpochStarts(stateSlot)*3 < params.BeaconConfig().SlotsPerEpoch*2
// Exit early if it's justified or too early to be justified.
if currJustified || (stateEpoch == currentEpoch && prevJustified && tooEarlyForCurr) {
node.unrealizedJustifiedEpoch = pn.unrealizedJustifiedEpoch
node.unrealizedFinalizedEpoch = pn.unrealizedFinalizedEpoch
node.unrealizedJustifiedEpoch = node.parent.unrealizedJustifiedEpoch
node.unrealizedFinalizedEpoch = node.parent.unrealizedFinalizedEpoch
return jc, fc
}

View File

@@ -22,12 +22,12 @@ func TestStore_SetUnrealizedEpochs(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.Equal(t, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'b'}].node.unrealizedJustifiedEpoch)
require.Equal(t, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'b'}].node.unrealizedFinalizedEpoch)
require.Equal(t, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'b'}].unrealizedJustifiedEpoch)
require.Equal(t, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'b'}].unrealizedFinalizedEpoch)
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'b'}, 2))
require.NoError(t, f.store.setUnrealizedFinalizedEpoch([32]byte{'b'}, 2))
require.Equal(t, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'b'}].node.unrealizedJustifiedEpoch)
require.Equal(t, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'b'}].node.unrealizedFinalizedEpoch)
require.Equal(t, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'b'}].unrealizedJustifiedEpoch)
require.Equal(t, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'b'}].unrealizedFinalizedEpoch)
require.ErrorIs(t, errInvalidUnrealizedJustifiedEpoch, f.store.setUnrealizedJustifiedEpoch([32]byte{'b'}, 0))
require.ErrorIs(t, errInvalidUnrealizedFinalizedEpoch, f.store.setUnrealizedFinalizedEpoch([32]byte{'b'}, 0))
@@ -78,9 +78,9 @@ func TestStore_LongFork(t *testing.T) {
// Add an attestation to c, it is head
f.ProcessAttestation(ctx, []uint64{0}, [32]byte{'c'}, 1)
f.justifiedBalances = []uint64{100}
c := f.store.emptyNodeByRoot[[32]byte{'c'}]
require.Equal(t, primitives.Epoch(2), slots.ToEpoch(c.node.slot))
driftGenesisTime(f, c.node.slot, 0)
c := f.store.nodeByRoot[[32]byte{'c'}]
require.Equal(t, primitives.Epoch(2), slots.ToEpoch(c.slot))
driftGenesisTime(f, c.slot, 0)
headRoot, err := f.Head(ctx)
require.NoError(t, err)
require.Equal(t, [32]byte{'c'}, headRoot)
@@ -91,15 +91,15 @@ func TestStore_LongFork(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.NoError(t, f.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 2, Root: ha}))
d := f.store.emptyNodeByRoot[[32]byte{'d'}]
require.Equal(t, primitives.Epoch(3), slots.ToEpoch(d.node.slot))
driftGenesisTime(f, d.node.slot, 0)
require.Equal(t, true, d.node.viableForHead(f.store.justifiedCheckpoint.Epoch, slots.ToEpoch(d.node.slot)))
d := f.store.nodeByRoot[[32]byte{'d'}]
require.Equal(t, primitives.Epoch(3), slots.ToEpoch(d.slot))
driftGenesisTime(f, d.slot, 0)
require.Equal(t, true, d.viableForHead(f.store.justifiedCheckpoint.Epoch, slots.ToEpoch(d.slot)))
headRoot, err = f.Head(ctx)
require.NoError(t, err)
require.Equal(t, [32]byte{'c'}, headRoot)
require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'d'}].weight)
require.Equal(t, uint64(100), f.store.emptyNodeByRoot[[32]byte{'c'}].weight)
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'c'}].weight)
}
// Epoch 1 Epoch 2 Epoch 3
@@ -243,8 +243,8 @@ func TestStore_ForkNextEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, [32]byte{'d'}, headRoot)
require.Equal(t, primitives.Epoch(2), f.JustifiedCheckpoint().Epoch)
require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'d'}].weight)
require.Equal(t, uint64(100), f.store.emptyNodeByRoot[[32]byte{'h'}].weight)
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
// Set current epoch to 3, and H's unrealized checkpoint. Check it's head
driftGenesisTime(f, 99, 0)
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'h'}, 2))
@@ -252,8 +252,8 @@ func TestStore_ForkNextEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, [32]byte{'h'}, headRoot)
require.Equal(t, primitives.Epoch(2), f.JustifiedCheckpoint().Epoch)
require.Equal(t, uint64(0), f.store.emptyNodeByRoot[[32]byte{'d'}].weight)
require.Equal(t, uint64(100), f.store.emptyNodeByRoot[[32]byte{'h'}].weight)
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
}
func TestStore_PullTips_Heuristics(t *testing.T) {
@@ -263,14 +263,14 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
st, root, err := prepareForkchoiceState(ctx, 65, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
f.store.emptyNodeByRoot[[32]byte{'p'}].node.unrealizedJustifiedEpoch = primitives.Epoch(2)
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
driftGenesisTime(f, 66, 0)
st, root, err = prepareForkchoiceState(ctx, 66, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
require.Equal(tt, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedJustifiedEpoch)
require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedFinalizedEpoch)
require.Equal(tt, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedFinalizedEpoch)
})
t.Run("Previous Epoch is justified and too early for current", func(tt *testing.T) {
@@ -278,21 +278,21 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
st, root, err := prepareForkchoiceState(ctx, 95, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
f.store.emptyNodeByRoot[[32]byte{'p'}].node.unrealizedJustifiedEpoch = primitives.Epoch(2)
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
driftGenesisTime(f, 96, 0)
st, root, err = prepareForkchoiceState(ctx, 96, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
require.Equal(tt, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedJustifiedEpoch)
require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedFinalizedEpoch)
require.Equal(tt, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedFinalizedEpoch)
})
t.Run("Previous Epoch is justified and not too early for current", func(tt *testing.T) {
f := setup(1, 1)
st, root, err := prepareForkchoiceState(ctx, 95, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
f.store.emptyNodeByRoot[[32]byte{'p'}].node.unrealizedJustifiedEpoch = primitives.Epoch(2)
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
driftGenesisTime(f, 127, 0)
st, root, err = prepareForkchoiceState(ctx, 127, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
@@ -302,14 +302,14 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
// This test checks that the heuristics in pullTips did not apply and
// the test continues to compute a bogus unrealized
// justification
require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedJustifiedEpoch)
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
})
t.Run("Block from previous Epoch", func(tt *testing.T) {
f := setup(1, 1)
st, root, err := prepareForkchoiceState(ctx, 94, [32]byte{'p'}, [32]byte{}, [32]byte{}, 1, 1)
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
f.store.emptyNodeByRoot[[32]byte{'p'}].node.unrealizedJustifiedEpoch = primitives.Epoch(2)
f.store.nodeByRoot[[32]byte{'p'}].unrealizedJustifiedEpoch = primitives.Epoch(2)
driftGenesisTime(f, 96, 0)
st, root, err = prepareForkchoiceState(ctx, 95, [32]byte{'h'}, [32]byte{'p'}, [32]byte{}, 1, 1)
@@ -319,7 +319,7 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
// This test checks that the heuristics in pullTips did not apply and
// the test continues to compute a bogus unrealized
// justification
require.Equal(tt, primitives.Epoch(1), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedJustifiedEpoch)
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
})
t.Run("Previous Epoch is not justified", func(tt *testing.T) {
f := setup(1, 1)
@@ -335,6 +335,6 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
// This test checks that the heuristics in pullTips did not apply and
// the test continues to compute a bogus unrealized
// justification
require.Equal(tt, primitives.Epoch(2), f.store.emptyNodeByRoot[[32]byte{'h'}].node.unrealizedJustifiedEpoch)
require.Equal(tt, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
})
}

View File

@@ -284,7 +284,7 @@ func TestVotes_CanFindHead(t *testing.T) {
// 9 10
f.store.finalizedCheckpoint.Root = indexToHash(5)
require.NoError(t, f.store.prune(t.Context()))
assert.Equal(t, 5, len(f.store.emptyNodeByRoot), "Incorrect nodes length after prune")
assert.Equal(t, 5, len(f.store.nodeByRoot), "Incorrect nodes length after prune")
// we pruned artificially the justified root.
f.store.justifiedCheckpoint.Root = indexToHash(5)

View File

@@ -9,7 +9,6 @@ import (
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
consensus_blocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
forkchoice2 "github.com/OffchainLabs/prysm/v7/consensus-types/forkchoice"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
)
@@ -24,7 +23,6 @@ type ForkChoicer interface {
Unlock()
HeadRetriever // to compute head.
BlockProcessor // to track new block for fork choice.
PayloadProcessor // to track new payloads for fork choice.
AttestationProcessor // to track new attestation for fork choice.
Getter // to retrieve fork choice information.
Setter // to set fork choice information.
@@ -49,11 +47,6 @@ type BlockProcessor interface {
InsertChain(context.Context, []*forkchoicetypes.BlockAndCheckpoints) error
}
// PayloadProcessor processes a payload envelope
type PayloadProcessor interface {
InsertPayload(context.Context, interfaces.ROExecutionPayloadEnvelope) error
}
// AttestationProcessor processes the attestation that's used for accounting fork choice.
type AttestationProcessor interface {
ProcessAttestation(context.Context, []uint64, [32]byte, primitives.Epoch)
@@ -74,11 +67,13 @@ type FastGetter interface {
HasNode([32]byte) bool
HighestReceivedBlockSlot() primitives.Slot
HighestReceivedBlockRoot() [32]byte
HighestReceivedBlockDelay() primitives.Slot
IsCanonical(root [32]byte) bool
IsOptimistic(root [32]byte) (bool, error)
IsViableForCheckpoint(*forkchoicetypes.Checkpoint) (bool, error)
JustifiedCheckpoint() *forkchoicetypes.Checkpoint
JustifiedPayloadBlockHash() [32]byte
LastRoot(primitives.Epoch) [32]byte
NodeCount() int
PreviousJustifiedCheckpoint() *forkchoicetypes.Checkpoint
ProposerBoost() [fieldparams.RootLength]byte
@@ -96,7 +91,7 @@ type FastGetter interface {
// Setter allows to set forkchoice information
type Setter interface {
SetOptimisticToValid(context.Context, [fieldparams.RootLength]byte) error
SetOptimisticToInvalid(context.Context, [32]byte, [32]byte, [32]byte, [32]byte) ([][32]byte, error)
SetOptimisticToInvalid(context.Context, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte) ([][32]byte, error)
UpdateJustifiedCheckpoint(context.Context, *forkchoicetypes.Checkpoint) error
UpdateFinalizedCheckpoint(*forkchoicetypes.Checkpoint) error
SetGenesisTime(time.Time)

View File

@@ -121,6 +121,13 @@ func (ro *ROForkChoice) HighestReceivedBlockRoot() [32]byte {
return ro.getter.HighestReceivedBlockRoot()
}
// HighestReceivedBlockDelay delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) HighestReceivedBlockDelay() primitives.Slot {
ro.l.RLock()
defer ro.l.RUnlock()
return ro.getter.HighestReceivedBlockDelay()
}
// ReceivedBlocksLastEpoch delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) ReceivedBlocksLastEpoch() (uint64, error) {
ro.l.RLock()
@@ -156,6 +163,13 @@ func (ro *ROForkChoice) Slot(root [32]byte) (primitives.Slot, error) {
return ro.getter.Slot(root)
}
// LastRoot delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) LastRoot(e primitives.Epoch) [32]byte {
ro.l.RLock()
defer ro.l.RUnlock()
return ro.getter.LastRoot(e)
}
// DependentRoot delegates to the underlying forkchoice call, under a lock.
func (ro *ROForkChoice) DependentRoot(epoch primitives.Epoch) ([32]byte, error) {
ro.l.RLock()

View File

@@ -30,6 +30,7 @@ const (
nodeCountCalled
highestReceivedBlockSlotCalled
highestReceivedBlockRootCalled
highestReceivedBlockDelayCalled
receivedBlocksLastEpochCalled
weightCalled
isOptimisticCalled
@@ -117,6 +118,11 @@ func TestROLocking(t *testing.T) {
call: highestReceivedBlockSlotCalled,
cb: func(g FastGetter) { g.HighestReceivedBlockSlot() },
},
{
name: "highestReceivedBlockDelayCalled",
call: highestReceivedBlockDelayCalled,
cb: func(g FastGetter) { g.HighestReceivedBlockDelay() },
},
{
name: "receivedBlocksLastEpochCalled",
call: receivedBlocksLastEpochCalled,
@@ -142,6 +148,11 @@ func TestROLocking(t *testing.T) {
call: slotCalled,
cb: func(g FastGetter) { _, err := g.Slot([32]byte{}); _discard(t, err) },
},
{
name: "lastRootCalled",
call: lastRootCalled,
cb: func(g FastGetter) { g.LastRoot(0) },
},
{
name: "targetRootForEpochCalled",
call: targetRootForEpochCalled,
@@ -254,6 +265,11 @@ func (ro *mockROForkchoice) HighestReceivedBlockRoot() [32]byte {
return [32]byte{}
}
func (ro *mockROForkchoice) HighestReceivedBlockDelay() primitives.Slot {
ro.calls = append(ro.calls, highestReceivedBlockDelayCalled)
return 0
}
func (ro *mockROForkchoice) ReceivedBlocksLastEpoch() (uint64, error) {
ro.calls = append(ro.calls, receivedBlocksLastEpochCalled)
return 0, nil
@@ -279,6 +295,11 @@ func (ro *mockROForkchoice) Slot(_ [32]byte) (primitives.Slot, error) {
return 0, nil
}
func (ro *mockROForkchoice) LastRoot(_ primitives.Epoch) [32]byte {
ro.calls = append(ro.calls, lastRootCalled)
return [32]byte{}
}
// DependentRoot impoements FastGetter.
func (ro *mockROForkchoice) DependentRoot(_ primitives.Epoch) ([32]byte, error) {
ro.calls = append(ro.calls, dependentRootCalled)

View File

@@ -678,6 +678,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
DB: b.db,
StateGen: b.stateGen,
ClockWaiter: b.ClockWaiter,
PartialDataColumns: b.cliCtx.Bool(flags.PartialDataColumns.Name),
})
if err != nil {
return err

View File

@@ -52,6 +52,7 @@ go_library(
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/kv:go_default_library",
"//beacon-chain/p2p/encoder:go_default_library",
"//beacon-chain/p2p/partialdatacolumnbroadcaster:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/p2p/peers/scorers:go_default_library",

View File

@@ -343,7 +343,7 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
// there is at least one peer in each needed subnet. If not, it will attempt to find one before broadcasting.
// This function is non-blocking. It stops trying to broadcast a given sidecar when more than one slot has passed, or the context is
// cancelled (whichever comes first).
func (s *Service) BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn) error {
func (s *Service) BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn, partialColumns []blocks.PartialDataColumn) error {
// Increase the number of broadcast attempts.
dataColumnSidecarBroadcastAttempts.Add(float64(len(sidecars)))
@@ -353,16 +353,15 @@ func (s *Service) BroadcastDataColumnSidecars(ctx context.Context, sidecars []bl
return errors.Wrap(err, "current fork digest")
}
go s.broadcastDataColumnSidecars(ctx, forkDigest, sidecars)
go s.broadcastDataColumnSidecars(ctx, forkDigest, sidecars, partialColumns)
return nil
}
// broadcastDataColumnSidecars broadcasts multiple data column sidecars to the p2p network.
// For sidecars with available peers, it uses batch publishing.
// For sidecars without peers, it finds peers first and then publishes individually.
// Both paths run in parallel. It returns when all broadcasts are complete, or the context is cancelled.
func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [fieldparams.VersionLength]byte, sidecars []blocks.VerifiedRODataColumn) {
// broadcastDataColumnSidecars broadcasts multiple data column sidecars to the p2p network, after ensuring
// there is at least one peer in each needed subnet. If not, it will attempt to find one before broadcasting.
// It returns when all broadcasts are complete, or the context is cancelled (whichever comes first).
func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [fieldparams.VersionLength]byte, sidecars []blocks.VerifiedRODataColumn, partialColumns []blocks.PartialDataColumn) {
type rootAndIndex struct {
root [fieldparams.RootLength]byte
index uint64
@@ -372,8 +371,8 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
logLevel := logrus.GetLevel()
slotPerRoot := make(map[[fieldparams.RootLength]byte]primitives.Slot, 1)
topicFunc := func(sidecar blocks.VerifiedRODataColumn) (topic string, wrappedSubIdx uint64, subnet uint64) {
subnet = peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
topicFunc := func(dcIndex uint64) (topic string, wrappedSubIdx uint64, subnet uint64) {
subnet = peerdas.ComputeSubnetForDataColumnSidecar(dcIndex)
topic = dataColumnSubnetToTopic(subnet, forkDigest)
wrappedSubIdx = subnet + dataColumnSubnetVal
return
@@ -386,7 +385,7 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
for _, sidecar := range sidecars {
slotPerRoot[sidecar.BlockRoot()] = sidecar.Slot()
topic, wrappedSubIdx, _ := topicFunc(sidecar)
topic, wrappedSubIdx, _ := topicFunc(sidecar.Index)
// Check if we have a peer for this subnet (use RLock for read-only check).
mu := s.subnetLocker(wrappedSubIdx)
mu.RLock()
@@ -411,7 +410,7 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
ctx := trace.NewContext(s.ctx, span)
defer span.End()
topic, _, _ := topicFunc(sidecar)
topic, _, _ := topicFunc(sidecar.Index)
if err := s.batchObject(ctx, &messageBatch, sidecar, topic); err != nil {
tracing.AnnotateError(span, err)
@@ -419,6 +418,10 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
return
}
// Increase the number of successful broadcasts.
dataColumnSidecarBroadcasts.Inc()
// Record the timing for log purposes.
if logLevel >= logrus.DebugLevel {
root := sidecar.BlockRoot()
timings.Store(rootAndIndex{root: root, index: sidecar.Index}, time.Now())
@@ -433,7 +436,7 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
ctx := trace.NewContext(s.ctx, span)
defer span.End()
topic, wrappedSubIdx, subnet := topicFunc(sidecar)
topic, wrappedSubIdx, subnet := topicFunc(sidecar.Index)
// Find peers for this sidecar's subnet.
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, subnet); err != nil {
@@ -458,6 +461,32 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
})
}
if s.partialColumnBroadcaster != nil {
// Note: There is not batch publish for partial columns.
for _, partialColumn := range partialColumns {
individualWg.Go(func() {
_, span := trace.StartSpan(ctx, "p2p.broadcastPartialDataColumn")
ctx := trace.NewContext(s.ctx, span)
defer span.End()
topic, wrappedSubIdx, subnet := topicFunc(partialColumn.Index)
// Find peers for this sidecar's subnet.
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, subnet); err != nil {
tracing.AnnotateError(span, err)
log.WithError(err).Error("Cannot find peers if needed")
return
}
fullTopicStr := topic + s.Encoding().ProtocolSuffix()
if err := s.partialColumnBroadcaster.Publish(fullTopicStr, partialColumn); err != nil {
tracing.AnnotateError(span, err)
log.WithError(err).Error("Cannot partial broadcast data column sidecar")
}
})
}
}
// Wait for batch to be populated, then publish.
batchWg.Wait()
if len(sidecarsWithPeers) > 0 {

View File

@@ -803,7 +803,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
// Broadcast to peers and wait.
err = service.BroadcastDataColumnSidecars(ctx, []blocks.VerifiedRODataColumn{verifiedRoSidecar})
err = service.BroadcastDataColumnSidecars(ctx, []blocks.VerifiedRODataColumn{verifiedRoSidecar}, nil)
require.NoError(t, err)
// Receive the message.
@@ -969,7 +969,7 @@ func TestService_BroadcastDataColumnRoundRobin(t *testing.T) {
time.Sleep(100 * time.Millisecond)
// Broadcast all sidecars.
err = service.BroadcastDataColumnSidecars(ctx, verifiedRoSidecars)
err = service.BroadcastDataColumnSidecars(ctx, verifiedRoSidecars, nil)
require.NoError(t, err)
// Give some time for messages to be sent.
time.Sleep(100 * time.Millisecond)

View File

@@ -26,6 +26,7 @@ const (
// Config for the p2p service. These parameters are set from application level flags
// to initialize the p2p service.
type Config struct {
PartialDataColumns bool
NoDiscovery bool
EnableUPnP bool
StaticPeerID bool

View File

@@ -25,7 +25,6 @@ var gossipTopicMappings = map[string]func() proto.Message{
LightClientOptimisticUpdateTopicFormat: func() proto.Message { return &ethpb.LightClientOptimisticUpdateAltair{} },
LightClientFinalityUpdateTopicFormat: func() proto.Message { return &ethpb.LightClientFinalityUpdateAltair{} },
DataColumnSubnetTopicFormat: func() proto.Message { return &ethpb.DataColumnSidecar{} },
PayloadAttestationMessageTopicFormat: func() proto.Message { return &ethpb.PayloadAttestationMessage{} },
}
// GossipTopicMappings is a function to return the assigned data type
@@ -145,7 +144,4 @@ func init() {
// Specially handle Fulu objects.
GossipTypeMapping[reflect.TypeFor[*ethpb.SignedBeaconBlockFulu]()] = BlockSubnetTopicFormat
// Payload attestation messages.
GossipTypeMapping[reflect.TypeFor[*ethpb.PayloadAttestationMessage]()] = PayloadAttestationMessageTopicFormat
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
@@ -28,6 +29,7 @@ type (
Broadcaster
SetStreamHandler
PubSubProvider
PartialColumnBroadcasterProvider
PubSubTopicUser
SenderEncoder
PeerManager
@@ -52,7 +54,7 @@ type (
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
BroadcastLightClientOptimisticUpdate(ctx context.Context, update interfaces.LightClientOptimisticUpdate) error
BroadcastLightClientFinalityUpdate(ctx context.Context, update interfaces.LightClientFinalityUpdate) error
BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn) error
BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn, partialColumns []blocks.PartialDataColumn) error
}
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
@@ -92,6 +94,11 @@ type (
PubSub() *pubsub.PubSub
}
// PubSubProvider provides the p2p pubsub protocol.
PartialColumnBroadcasterProvider interface {
PartialColumnBroadcaster() *partialdatacolumnbroadcaster.PartialColumnBroadcaster
}
// PeerManager abstracts some peer management methods from libp2p.
PeerManager interface {
Disconnect(peer.ID) error

View File

@@ -157,6 +157,11 @@ var (
Help: "The number of publish messages received via rpc for a particular topic",
},
[]string{"topic"})
pubsubRPCPubRecvSize = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "p2p_pubsub_rpc_recv_pub_size_total",
Help: "The total size of publish messages received via rpc for a particular topic",
},
[]string{"topic", "is_partial"})
pubsubRPCDrop = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "p2p_pubsub_rpc_drop_total",
Help: "The number of messages dropped via rpc for a particular control message",
@@ -171,6 +176,11 @@ var (
Help: "The number of publish messages dropped via rpc for a particular topic",
},
[]string{"topic"})
pubsubRPCPubDropSize = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "p2p_pubsub_rpc_drop_pub_size_total",
Help: "The total size of publish messages dropped via rpc for a particular topic",
},
[]string{"topic", "is_partial"})
pubsubRPCSent = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "p2p_pubsub_rpc_sent_total",
Help: "The number of messages sent via rpc for a particular control message",
@@ -185,6 +195,16 @@ var (
Help: "The number of publish messages sent via rpc for a particular topic",
},
[]string{"topic"})
pubsubRPCPubSentSize = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "gossipsub_pubsub_rpc_sent_pub_size_total",
Help: "The total size of publish messages sent via rpc for a particular topic",
},
[]string{"topic", "is_partial"})
pubsubMeshPeers = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "gossipsub_mesh_peers",
Help: "The number of capable peers in mesh",
},
[]string{"topic", "supports_partial"})
)
func (s *Service) updateMetrics() {

View File

@@ -0,0 +1,28 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"log.go",
"metrics.go",
"partial.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster",
visibility = ["//visibility:public"],
deps = [
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//internal/logrusadapter:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//partialmessages:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//partialmessages/bitmap:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -0,0 +1,25 @@
load("@prysm//tools/go:def.bzl", "go_test")
go_test(
name = "go_default_test",
size = "medium",
srcs = ["two_node_test.go"],
deps = [
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/p2p/encoder:go_default_library",
"//beacon-chain/p2p/partialdatacolumnbroadcaster:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_libp2p_go_libp2p//x/simlibp2p:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
"@com_github_marcopolo_simnet//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -0,0 +1,239 @@
package integrationtest
import (
"context"
"crypto/rand"
"fmt"
"testing"
"testing/synctest"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/peer"
simlibp2p "github.com/libp2p/go-libp2p/x/simlibp2p"
"github.com/marcopolo/simnet"
"github.com/sirupsen/logrus"
)
// TestTwoNodePartialColumnExchange tests that two nodes can exchange partial columns
// and reconstruct the complete column. Node 1 has cells 0-2, Node 2 has cells 3-5.
// After exchange, both should have all cells.
func TestTwoNodePartialColumnExchange(t *testing.T) {
synctest.Test(t, func(t *testing.T) {
// Create a simulated libp2p network
latency := time.Millisecond * 10
network, meta, err := simlibp2p.SimpleLibp2pNetwork([]simlibp2p.NodeLinkSettingsAndCount{
{LinkSettings: simnet.NodeBiDiLinkSettings{
Downlink: simnet.LinkSettings{BitsPerSecond: 20 * simlibp2p.OneMbps, Latency: latency / 2},
Uplink: simnet.LinkSettings{BitsPerSecond: 20 * simlibp2p.OneMbps, Latency: latency / 2},
}, Count: 2},
}, simlibp2p.NetworkSettings{UseBlankHost: true})
require.NoError(t, err)
require.NoError(t, network.Start())
defer func() {
require.NoError(t, network.Close())
}()
defer func() {
for _, node := range meta.Nodes {
err := node.Close()
if err != nil {
panic(err)
}
}
}()
h1 := meta.Nodes[0]
h2 := meta.Nodes[1]
logger := logrus.New()
logger.SetLevel(logrus.DebugLevel)
broadcaster1 := partialdatacolumnbroadcaster.NewBroadcaster(logger)
broadcaster2 := partialdatacolumnbroadcaster.NewBroadcaster(logger)
opts1 := broadcaster1.AppendPubSubOpts([]pubsub.Option{
pubsub.WithMessageSigning(false),
pubsub.WithStrictSignatureVerification(false),
})
opts2 := broadcaster2.AppendPubSubOpts([]pubsub.Option{
pubsub.WithMessageSigning(false),
pubsub.WithStrictSignatureVerification(false),
})
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ps1, err := pubsub.NewGossipSub(ctx, h1, opts1...)
require.NoError(t, err)
ps2, err := pubsub.NewGossipSub(ctx, h2, opts2...)
require.NoError(t, err)
go broadcaster1.Start()
go broadcaster2.Start()
defer func() {
broadcaster1.Stop()
broadcaster2.Stop()
}()
// Generate Test Data
var blockRoot [fieldparams.RootLength]byte
copy(blockRoot[:], []byte("test-block-root"))
numCells := 6
commitments := make([][]byte, numCells)
cells := make([][]byte, numCells)
proofs := make([][]byte, numCells)
for i := range numCells {
commitments[i] = make([]byte, 48)
cells[i] = make([]byte, 2048)
_, err := rand.Read(cells[i])
require.NoError(t, err)
proofs[i] = make([]byte, 48)
_ = fmt.Appendf(proofs[i][:0], "proof %d", i)
}
roDC, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
{
BodyRoot: blockRoot[:],
KzgCommitments: commitments,
Column: cells,
KzgProofs: proofs,
},
})
pc1, err := blocks.NewPartialDataColumn(roDC[0].DataColumnSidecar.SignedBlockHeader, roDC[0].Index, roDC[0].KzgCommitments, roDC[0].KzgCommitmentsInclusionProof)
require.NoError(t, err)
pc2, err := blocks.NewPartialDataColumn(roDC[0].DataColumnSidecar.SignedBlockHeader, roDC[0].Index, roDC[0].KzgCommitments, roDC[0].KzgCommitmentsInclusionProof)
require.NoError(t, err)
// Split data
for i := range numCells {
if i%2 == 0 {
pc1.ExtendFromVerfifiedCell(uint64(i), roDC[0].Column[i], roDC[0].KzgProofs[i])
} else {
pc2.ExtendFromVerfifiedCell(uint64(i), roDC[0].Column[i], roDC[0].KzgProofs[i])
}
}
// Setup Topic and Subscriptions
digest := params.ForkDigest(0)
columnIndex := uint64(12)
subnet := peerdas.ComputeSubnetForDataColumnSidecar(columnIndex)
topicStr := fmt.Sprintf(p2p.DataColumnSubnetTopicFormat, digest, subnet) +
encoder.SszNetworkEncoder{}.ProtocolSuffix()
time.Sleep(100 * time.Millisecond)
topic1, err := ps1.Join(topicStr, pubsub.RequestPartialMessages())
require.NoError(t, err)
topic2, err := ps2.Join(topicStr, pubsub.RequestPartialMessages())
require.NoError(t, err)
// Header validator that verifies the inclusion proof
headerValidator := func(header *ethpb.PartialDataColumnHeader) (reject bool, err error) {
if header == nil {
return false, fmt.Errorf("nil header")
}
if header.SignedBlockHeader == nil || header.SignedBlockHeader.Header == nil {
return true, fmt.Errorf("nil signed block header")
}
if len(header.KzgCommitments) == 0 {
return true, fmt.Errorf("empty kzg commitments")
}
// Verify inclusion proof
if err := peerdas.VerifyPartialDataColumnHeaderInclusionProof(header); err != nil {
return true, fmt.Errorf("invalid inclusion proof: %w", err)
}
t.Log("Header validation passed")
return false, nil
}
cellValidator := func(_ []blocks.CellProofBundle) error {
return nil
}
node1Complete := make(chan blocks.VerifiedRODataColumn, 1)
node2Complete := make(chan blocks.VerifiedRODataColumn, 1)
handler1 := func(topic string, col blocks.VerifiedRODataColumn) {
t.Logf("Node 1: Completed! Column has %d cells", len(col.Column))
node1Complete <- col
}
handler2 := func(topic string, col blocks.VerifiedRODataColumn) {
t.Logf("Node 2: Completed! Column has %d cells", len(col.Column))
node2Complete <- col
}
// Connect hosts
err = h1.Connect(context.Background(), peer.AddrInfo{
ID: h2.ID(),
Addrs: h2.Addrs(),
})
require.NoError(t, err)
time.Sleep(300 * time.Millisecond)
// Subscribe to regular GossipSub (critical for partial message RPC exchange!)
sub1, err := topic1.Subscribe()
require.NoError(t, err)
defer sub1.Cancel()
sub2, err := topic2.Subscribe()
require.NoError(t, err)
defer sub2.Cancel()
err = broadcaster1.Subscribe(topic1, headerValidator, cellValidator, handler1)
require.NoError(t, err)
err = broadcaster2.Subscribe(topic2, headerValidator, cellValidator, handler2)
require.NoError(t, err)
// Wait for mesh to form
time.Sleep(2 * time.Second)
// Publish
t.Log("Publishing from Node 1")
err = broadcaster1.Publish(topicStr, pc1)
require.NoError(t, err)
time.Sleep(200 * time.Millisecond)
t.Log("Publishing from Node 2")
err = broadcaster2.Publish(topicStr, pc2)
require.NoError(t, err)
// Wait for Completion
timeout := time.After(10 * time.Second)
var col1, col2 blocks.VerifiedRODataColumn
receivedCount := 0
for receivedCount < 2 {
select {
case col1 = <-node1Complete:
t.Log("Node 1 completed reconstruction")
receivedCount++
case col2 = <-node2Complete:
t.Log("Node 2 completed reconstruction")
receivedCount++
case <-timeout:
t.Fatalf("Timeout: Only %d/2 nodes completed", receivedCount)
}
}
// Verify both columns have all cells
assert.Equal(t, numCells, len(col1.Column), "Node 1 should have all cells")
assert.Equal(t, numCells, len(col2.Column), "Node 2 should have all cells")
assert.DeepSSZEqual(t, cells, col1.Column, "Node 1 cell mismatch")
assert.DeepSSZEqual(t, cells, col2.Column, "Node 2 cell mismatch")
})
}

View File

@@ -0,0 +1,9 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package partialdatacolumnbroadcaster
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "beacon-chain/p2p/partialdatacolumnbroadcaster")

View File

@@ -0,0 +1,18 @@
package partialdatacolumnbroadcaster
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
partialMessageUsefulCellsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "beacon_partial_message_useful_cells_total",
Help: "Number of useful cells received via a partial message",
}, []string{"column_index"})
partialMessageCellsReceivedTotal = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "beacon_partial_message_cells_received_total",
Help: "Number of total cells received via a partial message",
}, []string{"column_index"})
)

View File

@@ -0,0 +1,540 @@
package partialdatacolumnbroadcaster
import (
"bytes"
"log/slog"
"regexp"
"strconv"
"time"
"github.com/OffchainLabs/go-bitfield"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/internal/logrusadapter"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p-pubsub/partialmessages"
"github.com/libp2p/go-libp2p-pubsub/partialmessages/bitmap"
pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// TODOs:
// different eager push strategies:
// - no eager push
// - full column eager push
// - With debouncing - some factor of RTT
// - eager push missing cells
const TTLInSlots = 3
const maxConcurrentValidators = 128
var dataColumnTopicRegex = regexp.MustCompile(`data_column_sidecar_(\d+)`)
func extractColumnIndexFromTopic(topic string) (uint64, error) {
matches := dataColumnTopicRegex.FindStringSubmatch(topic)
if len(matches) < 2 {
return 0, errors.New("could not extract column index from topic")
}
return strconv.ParseUint(matches[1], 10, 64)
}
// HeaderValidator validates a PartialDataColumnHeader.
// Returns (reject, err) where:
// - reject=true, err!=nil: REJECT - peer should be penalized
// - reject=false, err!=nil: IGNORE - don't penalize, just ignore
// - reject=false, err=nil: valid header
type HeaderValidator func(header *ethpb.PartialDataColumnHeader) (reject bool, err error)
type ColumnValidator func(cells []blocks.CellProofBundle) error
type PartialColumnBroadcaster struct {
logger *logrus.Logger
ps *pubsub.PubSub
stop chan struct{}
// map topic -> headerValidators
headerValidators map[string]HeaderValidator
// map topic -> Validator
validators map[string]ColumnValidator
// map topic -> handler
handlers map[string]SubHandler
// map topic -> *pubsub.Topic
topics map[string]*pubsub.Topic
concurrentValidatorSemaphore chan struct{}
// map topic -> map[groupID]PartialColumn
partialMsgStore map[string]map[string]*blocks.PartialDataColumn
groupTTL map[string]int8
// validHeaderCache caches validated headers by group ID (works across topics)
validHeaderCache map[string]*ethpb.PartialDataColumnHeader
incomingReq chan request
}
type requestKind uint8
const (
requestKindPublish requestKind = iota
requestKindSubscribe
requestKindUnsubscribe
requestKindHandleIncomingRPC
requestKindCellsValidated
)
type request struct {
kind requestKind
response chan error
sub subscribe
unsub unsubscribe
publish publish
incomingRPC rpcWithFrom
cellsValidated *cellsValidated
}
type publish struct {
topic string
c blocks.PartialDataColumn
}
type subscribe struct {
t *pubsub.Topic
headerValidator HeaderValidator
validator ColumnValidator
handler SubHandler
}
type unsubscribe struct {
topic string
}
type rpcWithFrom struct {
*pubsub_pb.PartialMessagesExtension
from peer.ID
}
type cellsValidated struct {
validationTook time.Duration
topic string
group []byte
cellIndices []uint64
cells []blocks.CellProofBundle
}
func NewBroadcaster(logger *logrus.Logger) *PartialColumnBroadcaster {
return &PartialColumnBroadcaster{
validators: make(map[string]ColumnValidator),
headerValidators: make(map[string]HeaderValidator),
handlers: make(map[string]SubHandler),
topics: make(map[string]*pubsub.Topic),
partialMsgStore: make(map[string]map[string]*blocks.PartialDataColumn),
groupTTL: make(map[string]int8),
validHeaderCache: make(map[string]*ethpb.PartialDataColumnHeader),
// GossipSub sends the messages to this channel. The buffer should be
// big enough to avoid dropping messages. We don't want to block the gossipsub event loop for this.
incomingReq: make(chan request, 128*16),
logger: logger,
concurrentValidatorSemaphore: make(chan struct{}, maxConcurrentValidators),
}
}
// AppendPubSubOpts adds the necessary pubsub options to enable partial messages.
func (p *PartialColumnBroadcaster) AppendPubSubOpts(opts []pubsub.Option) []pubsub.Option {
slogger := slog.New(logrusadapter.Handler{Logger: p.logger})
opts = append(opts,
pubsub.WithPartialMessagesExtension(&partialmessages.PartialMessagesExtension{
Logger: slogger,
MergePartsMetadata: func(topic string, left, right partialmessages.PartsMetadata) partialmessages.PartsMetadata {
if len(left) == 0 {
return right
}
merged, err := bitfield.Bitlist(left).Or(bitfield.Bitlist(right))
if err != nil {
p.logger.Warn("Failed to merge bitfields", "err", err, "left", left, "right", right)
return left
}
return partialmessages.PartsMetadata(merged)
},
ValidateRPC: func(from peer.ID, rpc *pubsub_pb.PartialMessagesExtension) error {
// TODO. Add some basic and fast sanity checks
return nil
},
OnIncomingRPC: func(from peer.ID, rpc *pubsub_pb.PartialMessagesExtension) error {
select {
case p.incomingReq <- request{
kind: requestKindHandleIncomingRPC,
incomingRPC: rpcWithFrom{rpc, from},
}:
default:
p.logger.Warn("Dropping incoming partial RPC", "rpc", rpc)
}
return nil
},
}),
func(ps *pubsub.PubSub) error {
p.ps = ps
return nil
},
)
return opts
}
// Start starts the event loop of the PartialColumnBroadcaster. Should be called
// within a goroutine (go p.Start())
func (p *PartialColumnBroadcaster) Start() {
p.stop = make(chan struct{})
p.loop()
}
func (p *PartialColumnBroadcaster) loop() {
cleanup := time.NewTicker(time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot))
defer cleanup.Stop()
for {
select {
case <-p.stop:
return
case <-cleanup.C:
for groupID, ttl := range p.groupTTL {
if ttl > 0 {
p.groupTTL[groupID] = ttl - 1
continue
}
delete(p.groupTTL, groupID)
delete(p.validHeaderCache, groupID)
for topic, msgStore := range p.partialMsgStore {
delete(msgStore, groupID)
if len(msgStore) == 0 {
delete(p.partialMsgStore, topic)
}
}
}
case req := <-p.incomingReq:
switch req.kind {
case requestKindPublish:
req.response <- p.publish(req.publish.topic, req.publish.c)
case requestKindSubscribe:
req.response <- p.subscribe(req.sub.t, req.sub.headerValidator, req.sub.validator, req.sub.handler)
case requestKindUnsubscribe:
req.response <- p.unsubscribe(req.unsub.topic)
case requestKindHandleIncomingRPC:
err := p.handleIncomingRPC(req.incomingRPC)
if err != nil {
p.logger.Error("Failed to handle incoming partial RPC", "err", err)
}
case requestKindCellsValidated:
err := p.handleCellsValidated(req.cellsValidated)
if err != nil {
p.logger.Error("Failed to handle cells validated", "err", err)
}
default:
p.logger.Error("Unknown request kind", "kind", req.kind)
}
}
}
}
func (p *PartialColumnBroadcaster) getDataColumn(topic string, group []byte) *blocks.PartialDataColumn {
topicStore, ok := p.partialMsgStore[topic]
if !ok {
return nil
}
msg, ok := topicStore[string(group)]
if !ok {
return nil
}
return msg
}
func (p *PartialColumnBroadcaster) handleIncomingRPC(rpcWithFrom rpcWithFrom) error {
if p.ps == nil {
return errors.New("pubsub not initialized")
}
hasMessage := len(rpcWithFrom.PartialMessage) > 0
var message ethpb.PartialDataColumnSidecar
if hasMessage {
err := message.UnmarshalSSZ(rpcWithFrom.PartialMessage)
if err != nil {
return errors.Wrap(err, "failed to unmarshal partial message data")
}
}
topicID := rpcWithFrom.GetTopicID()
groupID := rpcWithFrom.GroupID
ourDataColumn := p.getDataColumn(topicID, groupID)
var shouldRepublish bool
if ourDataColumn == nil && hasMessage {
var header *ethpb.PartialDataColumnHeader
// Check cache first for this group
if cachedHeader, ok := p.validHeaderCache[string(groupID)]; ok {
header = cachedHeader
} else {
// We haven't seen this group before. Check if we have a valid header.
if len(message.Header) == 0 {
p.logger.Debug("No partial column found and no header in message, ignoring")
return nil
}
header = message.Header[0]
headerValidator, ok := p.headerValidators[topicID]
if !ok || headerValidator == nil {
p.logger.Debug("No header validator registered for topic")
return nil
}
reject, err := headerValidator(header)
if err != nil {
p.logger.Debug("Header validation failed", "err", err, "reject", reject)
if reject {
// REJECT case: penalize the peer
_ = p.ps.PeerFeedback(topicID, rpcWithFrom.from, pubsub.PeerFeedbackInvalidMessage)
}
// Both REJECT and IGNORE: don't process further
return nil
}
// Cache the valid header
p.validHeaderCache[string(groupID)] = header
// TODO: We now have the information we need to call GetBlobsV3, we should do that to see what we have locally.
}
columnIndex, err := extractColumnIndexFromTopic(topicID)
if err != nil {
return err
}
newColumn, err := blocks.NewPartialDataColumn(
header.SignedBlockHeader,
columnIndex,
header.KzgCommitments,
header.KzgCommitmentsInclusionProof,
)
if err != nil {
p.logger.WithError(err).WithFields(logrus.Fields{
"topic": topicID,
"columnIndex": columnIndex,
"numCommitments": len(header.KzgCommitments),
}).Error("Failed to create partial data column from header")
return err
}
// Save to store
topicStore, ok := p.partialMsgStore[topicID]
if !ok {
topicStore = make(map[string]*blocks.PartialDataColumn)
p.partialMsgStore[topicID] = topicStore
}
topicStore[string(newColumn.GroupID())] = &newColumn
p.groupTTL[string(newColumn.GroupID())] = TTLInSlots
ourDataColumn = &newColumn
shouldRepublish = true
}
if ourDataColumn == nil {
// We don't have a partial column for this. Can happen if we got cells
// without a header.
return nil
}
logger := p.logger.WithFields(logrus.Fields{
"from": rpcWithFrom.from,
"topic": topicID,
"group": groupID,
})
validator, validatorOK := p.validators[topicID]
if len(rpcWithFrom.PartialMessage) > 0 && validatorOK {
// TODO: is there any penalty we want to consider for giving us data we didn't request?
// Note that we need to be careful around race conditions and eager data.
// Also note that protobufs by design allow extra data that we don't parse.
// Marco's thoughts. No, we don't need to do anything else here.
cellIndices, cellsToVerify, err := ourDataColumn.CellsToVerifyFromPartialMessage(&message)
if err != nil {
return err
}
// Track cells received via partial message
if len(cellIndices) > 0 {
columnIndexStr := strconv.FormatUint(ourDataColumn.Index, 10)
partialMessageCellsReceivedTotal.WithLabelValues(columnIndexStr).Add(float64(len(cellIndices)))
}
if len(cellsToVerify) > 0 {
p.concurrentValidatorSemaphore <- struct{}{}
go func() {
defer func() {
<-p.concurrentValidatorSemaphore
}()
start := time.Now()
err := validator(cellsToVerify)
if err != nil {
logger.Error("failed to validate cells", "err", err)
_ = p.ps.PeerFeedback(topicID, rpcWithFrom.from, pubsub.PeerFeedbackInvalidMessage)
return
}
_ = p.ps.PeerFeedback(topicID, rpcWithFrom.from, pubsub.PeerFeedbackUsefulMessage)
p.incomingReq <- request{
kind: requestKindCellsValidated,
cellsValidated: &cellsValidated{
validationTook: time.Since(start),
topic: topicID,
group: groupID,
cells: cellsToVerify,
cellIndices: cellIndices,
},
}
}()
}
}
peerHas := bitmap.Bitmap(rpcWithFrom.PartsMetadata)
iHave := bitmap.Bitmap(ourDataColumn.PartsMetadata())
if !shouldRepublish && len(peerHas) > 0 && !bytes.Equal(peerHas, iHave) {
// Either we have something they don't or vice versa
shouldRepublish = true
logger.Debug("republishing due to parts metadata difference")
}
if shouldRepublish {
err := p.ps.PublishPartialMessage(topicID, ourDataColumn, partialmessages.PublishOptions{})
if err != nil {
return err
}
}
return nil
}
func (p *PartialColumnBroadcaster) handleCellsValidated(cells *cellsValidated) error {
ourDataColumn := p.getDataColumn(cells.topic, cells.group)
if ourDataColumn == nil {
return errors.New("data column not found for verified cells")
}
extended := ourDataColumn.ExtendFromVerfifiedCells(cells.cellIndices, cells.cells)
p.logger.Debug("Extended partial message", "duration", cells.validationTook, "extended", extended)
columnIndexStr := strconv.FormatUint(ourDataColumn.Index, 10)
if extended {
// Track useful cells (cells that extended our data)
partialMessageUsefulCellsTotal.WithLabelValues(columnIndexStr).Add(float64(len(cells.cells)))
// TODO: we could use the heuristic here that if this data was
// useful to us, it's likely useful to our peers and we should
// republish eagerly
if col, ok := ourDataColumn.Complete(p.logger); ok {
p.logger.Info("Completed partial column", "topic", cells.topic, "group", cells.group)
handler, handlerOK := p.handlers[cells.topic]
if handlerOK {
go handler(cells.topic, col)
}
} else {
p.logger.Info("Extended partial column", "topic", cells.topic, "group", cells.group)
}
err := p.ps.PublishPartialMessage(cells.topic, ourDataColumn, partialmessages.PublishOptions{})
if err != nil {
return err
}
}
return nil
}
func (p *PartialColumnBroadcaster) Stop() {
if p.stop != nil {
close(p.stop)
}
}
// Publish publishes the partial column.
func (p *PartialColumnBroadcaster) Publish(topic string, c blocks.PartialDataColumn) error {
if p.ps == nil {
return errors.New("pubsub not initialized")
}
respCh := make(chan error)
p.incomingReq <- request{
kind: requestKindPublish,
response: respCh,
publish: publish{
topic: topic,
c: c,
},
}
return <-respCh
}
func (p *PartialColumnBroadcaster) publish(topic string, c blocks.PartialDataColumn) error {
topicStore, ok := p.partialMsgStore[topic]
if !ok {
topicStore = make(map[string]*blocks.PartialDataColumn)
p.partialMsgStore[topic] = topicStore
}
topicStore[string(c.GroupID())] = &c
p.groupTTL[string(c.GroupID())] = TTLInSlots
return p.ps.PublishPartialMessage(topic, &c, partialmessages.PublishOptions{})
}
type SubHandler func(topic string, col blocks.VerifiedRODataColumn)
func (p *PartialColumnBroadcaster) Subscribe(t *pubsub.Topic, headerValidator HeaderValidator, validator ColumnValidator, handler SubHandler) error {
respCh := make(chan error)
p.incomingReq <- request{
kind: requestKindSubscribe,
sub: subscribe{
t: t,
headerValidator: headerValidator,
validator: validator,
handler: handler,
},
response: respCh,
}
return <-respCh
}
func (p *PartialColumnBroadcaster) subscribe(t *pubsub.Topic, headerValidator HeaderValidator, validator ColumnValidator, handler SubHandler) error {
topic := t.String()
if _, ok := p.topics[topic]; ok {
return errors.New("already subscribed")
}
p.topics[topic] = t
p.headerValidators[topic] = headerValidator
p.validators[topic] = validator
p.handlers[topic] = handler
return nil
}
func (p *PartialColumnBroadcaster) Unsubscribe(topic string) error {
respCh := make(chan error)
p.incomingReq <- request{
kind: requestKindUnsubscribe,
unsub: unsubscribe{
topic: topic,
},
response: respCh,
}
return <-respCh
}
func (p *PartialColumnBroadcaster) unsubscribe(topic string) error {
t, ok := p.topics[topic]
if !ok {
return errors.New("topic not found")
}
delete(p.topics, topic)
delete(p.partialMsgStore, topic)
delete(p.headerValidators, topic)
delete(p.validators, topic)
delete(p.handlers, topic)
return t.Close()
}

View File

@@ -58,7 +58,7 @@ func TestPeerExplicitAdd(t *testing.T) {
resAddress, err := p.Address(id)
require.NoError(t, err)
assert.Equal(t, address, resAddress, "Unexpected address")
assert.Equal(t, address.Equal(resAddress), true, "Unexpected address")
resDirection, err := p.Direction(id)
require.NoError(t, err)
@@ -72,7 +72,7 @@ func TestPeerExplicitAdd(t *testing.T) {
resAddress2, err := p.Address(id)
require.NoError(t, err)
assert.Equal(t, address2, resAddress2, "Unexpected address")
assert.Equal(t, address2.Equal(resAddress2), true, "Unexpected address")
resDirection2, err := p.Direction(id)
require.NoError(t, err)

View File

@@ -170,7 +170,7 @@ func (s *Service) pubsubOptions() []pubsub.Option {
pubsub.WithPeerScore(peerScoringParams(s.cfg.IPColocationWhitelist)),
pubsub.WithPeerScoreInspect(s.peerInspector, time.Minute),
pubsub.WithGossipSubParams(pubsubGossipParam()),
pubsub.WithRawTracer(gossipTracer{host: s.host}),
pubsub.WithRawTracer(&gossipTracer{host: s.host, allowedTopics: filt}),
}
if len(s.cfg.StaticPeers) > 0 {
@@ -181,6 +181,9 @@ func (s *Service) pubsubOptions() []pubsub.Option {
}
psOpts = append(psOpts, pubsub.WithDirectPeers(directPeersAddrInfos))
}
if s.partialColumnBroadcaster != nil {
psOpts = s.partialColumnBroadcaster.AppendPubSubOpts(psOpts)
}
return psOpts
}

View File

@@ -1,6 +1,8 @@
package p2p
import (
"sync"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
@@ -8,7 +10,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
var _ = pubsub.RawTracer(gossipTracer{})
var _ = pubsub.RawTracer(&gossipTracer{})
// Initializes the values for the pubsub rpc action.
type action int
@@ -23,85 +25,160 @@ const (
// and broadcasted through gossipsub.
type gossipTracer struct {
host host.Host
allowedTopics pubsub.SubscriptionFilter
mu sync.Mutex
// map topic -> Set(peerID). Peer is in set if it supports partial messages.
partialMessagePeers map[string]map[peer.ID]struct{}
// map topic -> Set(peerID). Peer is in set if in the mesh.
meshPeers map[string]map[peer.ID]struct{}
}
// AddPeer .
func (g gossipTracer) AddPeer(p peer.ID, proto protocol.ID) {
func (g *gossipTracer) AddPeer(p peer.ID, proto protocol.ID) {
// no-op
}
// RemovePeer .
func (g gossipTracer) RemovePeer(p peer.ID) {
// no-op
func (g *gossipTracer) RemovePeer(p peer.ID) {
g.mu.Lock()
defer g.mu.Unlock()
for _, peers := range g.partialMessagePeers {
delete(peers, p)
}
for topic, peers := range g.meshPeers {
if _, ok := peers[p]; ok {
delete(peers, p)
g.updateMeshPeersMetric(topic)
}
}
}
// Join .
func (g gossipTracer) Join(topic string) {
func (g *gossipTracer) Join(topic string) {
pubsubTopicsActive.WithLabelValues(topic).Set(1)
g.mu.Lock()
defer g.mu.Unlock()
if g.partialMessagePeers == nil {
g.partialMessagePeers = make(map[string]map[peer.ID]struct{})
}
if g.partialMessagePeers[topic] == nil {
g.partialMessagePeers[topic] = make(map[peer.ID]struct{})
}
if g.meshPeers == nil {
g.meshPeers = make(map[string]map[peer.ID]struct{})
}
if g.meshPeers[topic] == nil {
g.meshPeers[topic] = make(map[peer.ID]struct{})
}
}
// Leave .
func (g gossipTracer) Leave(topic string) {
func (g *gossipTracer) Leave(topic string) {
pubsubTopicsActive.WithLabelValues(topic).Set(0)
g.mu.Lock()
defer g.mu.Unlock()
delete(g.partialMessagePeers, topic)
delete(g.meshPeers, topic)
}
// Graft .
func (g gossipTracer) Graft(p peer.ID, topic string) {
func (g *gossipTracer) Graft(p peer.ID, topic string) {
pubsubTopicsGraft.WithLabelValues(topic).Inc()
g.mu.Lock()
defer g.mu.Unlock()
if m, ok := g.meshPeers[topic]; ok {
m[p] = struct{}{}
}
g.updateMeshPeersMetric(topic)
}
// Prune .
func (g gossipTracer) Prune(p peer.ID, topic string) {
func (g *gossipTracer) Prune(p peer.ID, topic string) {
pubsubTopicsPrune.WithLabelValues(topic).Inc()
g.mu.Lock()
defer g.mu.Unlock()
if m, ok := g.meshPeers[topic]; ok {
delete(m, p)
}
g.updateMeshPeersMetric(topic)
}
// ValidateMessage .
func (g gossipTracer) ValidateMessage(msg *pubsub.Message) {
func (g *gossipTracer) ValidateMessage(msg *pubsub.Message) {
pubsubMessageValidate.WithLabelValues(*msg.Topic).Inc()
}
// DeliverMessage .
func (g gossipTracer) DeliverMessage(msg *pubsub.Message) {
func (g *gossipTracer) DeliverMessage(msg *pubsub.Message) {
pubsubMessageDeliver.WithLabelValues(*msg.Topic).Inc()
}
// RejectMessage .
func (g gossipTracer) RejectMessage(msg *pubsub.Message, reason string) {
func (g *gossipTracer) RejectMessage(msg *pubsub.Message, reason string) {
pubsubMessageReject.WithLabelValues(*msg.Topic, reason).Inc()
}
// DuplicateMessage .
func (g gossipTracer) DuplicateMessage(msg *pubsub.Message) {
func (g *gossipTracer) DuplicateMessage(msg *pubsub.Message) {
pubsubMessageDuplicate.WithLabelValues(*msg.Topic).Inc()
}
// UndeliverableMessage .
func (g gossipTracer) UndeliverableMessage(msg *pubsub.Message) {
func (g *gossipTracer) UndeliverableMessage(msg *pubsub.Message) {
pubsubMessageUndeliverable.WithLabelValues(*msg.Topic).Inc()
}
// ThrottlePeer .
func (g gossipTracer) ThrottlePeer(p peer.ID) {
func (g *gossipTracer) ThrottlePeer(p peer.ID) {
agent := agentFromPid(p, g.host.Peerstore())
pubsubPeerThrottle.WithLabelValues(agent).Inc()
}
// RecvRPC .
func (g gossipTracer) RecvRPC(rpc *pubsub.RPC) {
g.setMetricFromRPC(recv, pubsubRPCSubRecv, pubsubRPCPubRecv, pubsubRPCRecv, rpc)
func (g *gossipTracer) RecvRPC(rpc *pubsub.RPC) {
from := rpc.From()
g.setMetricFromRPC(recv, pubsubRPCSubRecv, pubsubRPCPubRecv, pubsubRPCPubRecvSize, pubsubRPCRecv, rpc)
g.mu.Lock()
defer g.mu.Unlock()
for _, sub := range rpc.Subscriptions {
topic := sub.GetTopicid()
if !g.allowedTopics.CanSubscribe(topic) {
continue
}
if g.partialMessagePeers == nil {
g.partialMessagePeers = make(map[string]map[peer.ID]struct{})
}
m, ok := g.partialMessagePeers[topic]
if !ok {
m = make(map[peer.ID]struct{})
g.partialMessagePeers[topic] = m
}
if sub.GetSubscribe() && sub.GetRequestsPartial() {
m[from] = struct{}{}
} else {
delete(m, from)
if len(m) == 0 {
delete(g.partialMessagePeers, topic)
}
}
}
}
// SendRPC .
func (g gossipTracer) SendRPC(rpc *pubsub.RPC, p peer.ID) {
g.setMetricFromRPC(send, pubsubRPCSubSent, pubsubRPCPubSent, pubsubRPCSent, rpc)
func (g *gossipTracer) SendRPC(rpc *pubsub.RPC, p peer.ID) {
g.setMetricFromRPC(send, pubsubRPCSubSent, pubsubRPCPubSent, pubsubRPCPubSentSize, pubsubRPCSent, rpc)
}
// DropRPC .
func (g gossipTracer) DropRPC(rpc *pubsub.RPC, p peer.ID) {
g.setMetricFromRPC(drop, pubsubRPCSubDrop, pubsubRPCPubDrop, pubsubRPCDrop, rpc)
func (g *gossipTracer) DropRPC(rpc *pubsub.RPC, p peer.ID) {
g.setMetricFromRPC(drop, pubsubRPCSubDrop, pubsubRPCPubDrop, pubsubRPCPubDropSize, pubsubRPCDrop, rpc)
}
func (g gossipTracer) setMetricFromRPC(act action, subCtr prometheus.Counter, pubCtr, ctrlCtr *prometheus.CounterVec, rpc *pubsub.RPC) {
func (g *gossipTracer) setMetricFromRPC(act action, subCtr prometheus.Counter, pubCtr, pubSizeCtr, ctrlCtr *prometheus.CounterVec, rpc *pubsub.RPC) {
subCtr.Add(float64(len(rpc.Subscriptions)))
if rpc.Control != nil {
ctrlCtr.WithLabelValues("graft").Add(float64(len(rpc.Control.Graft)))
@@ -110,12 +187,41 @@ func (g gossipTracer) setMetricFromRPC(act action, subCtr prometheus.Counter, pu
ctrlCtr.WithLabelValues("iwant").Add(float64(len(rpc.Control.Iwant)))
ctrlCtr.WithLabelValues("idontwant").Add(float64(len(rpc.Control.Idontwant)))
}
// For incoming messages from pubsub, we do not record metrics for them as these values
// could be junk.
if act == recv {
return
}
for _, msg := range rpc.Publish {
// For incoming messages from pubsub, we do not record metrics for them as these values
// could be junk.
if act == recv {
continue
}
pubCtr.WithLabelValues(*msg.Topic).Inc()
pubCtr.WithLabelValues(msg.GetTopic()).Inc()
pubSizeCtr.WithLabelValues(msg.GetTopic(), "false").Add(float64(msg.Size()))
}
if rpc.Partial != nil {
pubCtr.WithLabelValues(rpc.Partial.GetTopicID()).Inc()
pubSizeCtr.WithLabelValues(rpc.Partial.GetTopicID(), "true").Add(float64(rpc.Partial.Size()))
}
}
// updateMeshPeersMetric requires the caller to hold the state mutex
func (g *gossipTracer) updateMeshPeersMetric(topic string) {
meshPeers, ok := g.meshPeers[topic]
if !ok {
return
}
partialPeers, ok := g.partialMessagePeers[topic]
if !ok {
return
}
var supportsPartial, doesNotSupportPartial float64
for p := range meshPeers {
if _, ok := partialPeers[p]; ok {
supportsPartial++
} else {
doesNotSupportPartial++
}
}
pubsubMeshPeers.WithLabelValues(topic, "true").Set(supportsPartial)
pubsubMeshPeers.WithLabelValues(topic, "false").Set(doesNotSupportPartial)
}

View File

@@ -11,6 +11,7 @@ import (
"github.com/OffchainLabs/prysm/v7/async"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/types"
@@ -77,6 +78,7 @@ type Service struct {
privKey *ecdsa.PrivateKey
metaData metadata.Metadata
pubsub *pubsub.PubSub
partialColumnBroadcaster *partialdatacolumnbroadcaster.PartialColumnBroadcaster
joinedTopics map[string]*pubsub.Topic
joinedTopicsLock sync.RWMutex
subnetsLock map[uint64]*sync.RWMutex
@@ -147,6 +149,10 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
custodyInfoSet: make(chan struct{}),
}
if cfg.PartialDataColumns {
s.partialColumnBroadcaster = partialdatacolumnbroadcaster.NewBroadcaster(log.Logger)
}
ipAddr := prysmnetwork.IPAddr()
opts, err := s.buildOptions(ipAddr, s.privKey)
@@ -305,6 +311,10 @@ func (s *Service) Start() {
logExternalDNSAddr(s.host.ID(), p2pHostDNS, p2pTCPPort)
}
go s.forkWatcher()
if s.partialColumnBroadcaster != nil {
go s.partialColumnBroadcaster.Start()
}
}
// Stop the p2p service and terminate all peer connections.
@@ -314,6 +324,10 @@ func (s *Service) Stop() error {
if s.dv5Listener != nil {
s.dv5Listener.Close()
}
if s.partialColumnBroadcaster != nil {
s.partialColumnBroadcaster.Stop()
}
return nil
}
@@ -350,6 +364,10 @@ func (s *Service) PubSub() *pubsub.PubSub {
return s.pubsub
}
func (s *Service) PartialColumnBroadcaster() *partialdatacolumnbroadcaster.PartialColumnBroadcaster {
return s.partialColumnBroadcaster
}
// Host returns the currently running libp2p
// host of the service.
func (s *Service) Host() host.Host {

View File

@@ -21,6 +21,7 @@ go_library(
deps = [
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/p2p/encoder:go_default_library",
"//beacon-chain/p2p/partialdatacolumnbroadcaster:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/p2p/peers/scorers:go_default_library",
"//config/fieldparams:go_default_library",

View File

@@ -4,6 +4,7 @@ import (
"context"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
@@ -108,6 +109,10 @@ func (*FakeP2P) PubSub() *pubsub.PubSub {
return nil
}
func (*FakeP2P) PartialColumnBroadcaster() *partialdatacolumnbroadcaster.PartialColumnBroadcaster {
return nil
}
// MetadataSeq -- fake.
func (*FakeP2P) MetadataSeq() uint64 {
return 0
@@ -169,7 +174,7 @@ func (*FakeP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interfac
}
// BroadcastDataColumnSidecar -- fake.
func (*FakeP2P) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn) error {
func (*FakeP2P) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn, _ []blocks.PartialDataColumn) error {
return nil
}

View File

@@ -63,7 +63,7 @@ func (m *MockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
}
// BroadcastDataColumnSidecar broadcasts a data column for mock.
func (m *MockBroadcaster) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn) error {
func (m *MockBroadcaster) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn, []blocks.PartialDataColumn) error {
m.BroadcastCalled.Store(true)
return nil
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/partialdatacolumnbroadcaster"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
@@ -138,9 +139,6 @@ func connect(a, b host.Host) error {
func (p *TestP2P) ReceiveRPC(topic string, msg proto.Message) {
h, err := libp2p.New(libp2p.ResourceManager(&network.NullResourceManager{}))
require.NoError(p.t, err)
p.t.Cleanup(func() {
require.NoError(p.t, h.Close())
})
if err := connect(h, p.BHost); err != nil {
p.t.Fatalf("Failed to connect two peers for RPC: %v", err)
}
@@ -172,9 +170,6 @@ func (p *TestP2P) ReceiveRPC(topic string, msg proto.Message) {
func (p *TestP2P) ReceivePubSub(topic string, msg proto.Message) {
h, err := libp2p.New(libp2p.ResourceManager(&network.NullResourceManager{}))
require.NoError(p.t, err)
p.t.Cleanup(func() {
require.NoError(p.t, h.Close())
})
ps, err := pubsub.NewFloodSub(context.Background(), h,
pubsub.WithMessageSigning(false),
pubsub.WithStrictSignatureVerification(false),
@@ -248,7 +243,7 @@ func (p *TestP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interf
}
// BroadcastDataColumnSidecar broadcasts a data column for mock.
func (p *TestP2P) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn) error {
func (p *TestP2P) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn, []blocks.PartialDataColumn) error {
p.BroadcastCalled.Store(true)
return nil
}
@@ -314,6 +309,10 @@ func (p *TestP2P) PubSub() *pubsub.PubSub {
return p.pubsub
}
func (p *TestP2P) PartialColumnBroadcaster() *partialdatacolumnbroadcaster.PartialColumnBroadcaster {
return nil
}
// Disconnect from a peer.
func (p *TestP2P) Disconnect(pid peer.ID) error {
return p.BHost.Network().ClosePeer(pid)

View File

@@ -46,8 +46,6 @@ const (
GossipLightClientOptimisticUpdateMessage = "light_client_optimistic_update"
// GossipDataColumnSidecarMessage is the name for the data column sidecar message type.
GossipDataColumnSidecarMessage = "data_column_sidecar"
// GossipPayloadAttestationMessage is the name for the payload attestation message type.
GossipPayloadAttestationMessage = "payload_attestation_message"
// Topic Formats
//
@@ -77,8 +75,6 @@ const (
LightClientOptimisticUpdateTopicFormat = GossipProtocolAndDigest + GossipLightClientOptimisticUpdateMessage
// DataColumnSubnetTopicFormat is the topic format for the data column subnet.
DataColumnSubnetTopicFormat = GossipProtocolAndDigest + GossipDataColumnSidecarMessage + "_%d"
// PayloadAttestationMessageTopicFormat is the topic format for payload attestation messages.
PayloadAttestationMessageTopicFormat = GossipProtocolAndDigest + GossipPayloadAttestationMessage
)
// topic is a struct representing a single gossipsub topic.
@@ -145,7 +141,7 @@ func (s *Service) allTopics() []topic {
cfg := params.BeaconConfig()
// bellatrix: no special topics; electra: blobs topics handled all together
genesis, altair, capella := cfg.GenesisEpoch, cfg.AltairForkEpoch, cfg.CapellaForkEpoch
deneb, fulu, gloas, future := cfg.DenebForkEpoch, cfg.FuluForkEpoch, cfg.GloasForkEpoch, cfg.FarFutureEpoch
deneb, fulu, future := cfg.DenebForkEpoch, cfg.FuluForkEpoch, cfg.FarFutureEpoch
// Templates are starter topics - they have a placeholder digest and the subnet is set to the maximum value
// for the subnet (see how this is used in allSubnetsBelow). These are not directly returned by the method,
// they are copied and modified for each digest where they apply based on the start and end epochs.
@@ -162,7 +158,6 @@ func (s *Service) allTopics() []topic {
newTopic(altair, future, empty, GossipLightClientOptimisticUpdateMessage),
newTopic(altair, future, empty, GossipLightClientFinalityUpdateMessage),
newTopic(capella, future, empty, GossipBlsToExecutionChangeMessage),
newTopic(gloas, future, empty, GossipPayloadAttestationMessage),
}
last := params.GetNetworkScheduleEntry(genesis)
schedule := []params.NetworkScheduleEntry{last}

Some files were not shown because too many files have changed in this diff Show More