mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 22:07:59 -05:00
Compare commits
62 Commits
eip-7251
...
peerDAS-sp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
82ff8bb8e4 | ||
|
|
5f8aad4057 | ||
|
|
b25c9186a2 | ||
|
|
2aede8baad | ||
|
|
30f507acad | ||
|
|
f61517ba0f | ||
|
|
1d3399fdba | ||
|
|
82af2d6270 | ||
|
|
8f783b7d37 | ||
|
|
f5c46cf059 | ||
|
|
e0f2fc89d5 | ||
|
|
78abcc3635 | ||
|
|
1f63cec825 | ||
|
|
a5717132e7 | ||
|
|
87f838bb89 | ||
|
|
df524a8afc | ||
|
|
b48870942f | ||
|
|
1f1dac934e | ||
|
|
299209f2ae | ||
|
|
4620559500 | ||
|
|
d8bd4baa1a | ||
|
|
9678b508d0 | ||
|
|
85fa2a2b4d | ||
|
|
f0b5d15ccc | ||
|
|
08db57ca2a | ||
|
|
4e219d8f90 | ||
|
|
020fb06380 | ||
|
|
d022651ca0 | ||
|
|
890cb23040 | ||
|
|
5128cd4888 | ||
|
|
95071278dc | ||
|
|
435ee12bd5 | ||
|
|
ae59700f22 | ||
|
|
f6294f134b | ||
|
|
8cdb386ddf | ||
|
|
3413d05b34 | ||
|
|
070a765d24 | ||
|
|
8ac1647436 | ||
|
|
dfe31c9242 | ||
|
|
b7866be3a9 | ||
|
|
8413660d5f | ||
|
|
e037491756 | ||
|
|
ea2624b5ab | ||
|
|
1b40f941cf | ||
|
|
57830435d7 | ||
|
|
44d850de51 | ||
|
|
b08e691127 | ||
|
|
968e82b02d | ||
|
|
de04ce8329 | ||
|
|
5efecff631 | ||
|
|
3ab759e163 | ||
|
|
836d369c6c | ||
|
|
568273453b | ||
|
|
7a4ecb6060 | ||
|
|
82f0ea5b11 | ||
|
|
6fddd13cb2 | ||
|
|
43c7659d18 | ||
|
|
2d15e53dab | ||
|
|
2f2152e039 | ||
|
|
2542189efc | ||
|
|
8e6d39a44b | ||
|
|
c35889d4c6 |
1
.bazelrc
1
.bazelrc
@@ -22,6 +22,7 @@ coverage --define=coverage_enabled=1
|
||||
build --workspace_status_command=./hack/workspace_status.sh
|
||||
|
||||
build --define blst_disabled=false
|
||||
build --compilation_mode=opt
|
||||
run --define blst_disabled=false
|
||||
|
||||
build:blst_disabled --define blst_disabled=true
|
||||
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -227,7 +227,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.5.0-alpha.2"
|
||||
consensus_spec_version = "v1.5.0-alpha.3"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -243,7 +243,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-NNXBa7SZ2sFb68HPNahgu1p0yDBpjuKJuLfRCl7vvoQ=",
|
||||
integrity = "sha256-+byv+GUOQytex5GgtjBGVoNDseJZbiBdAjEtlgCbjEo=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -259,7 +259,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-7BnlBvGWU92iAB100cMaAXVQhRrqpMQbavgrI+/paCw=",
|
||||
integrity = "sha256-JJUy/jT1h3kGQkinTuzL7gMOA1+qgmPgJXVrYuH63Cg",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -275,7 +275,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-VCHhcNt+fynf/sHK11qbRBAy608u9T1qAafvAGfxQhA=",
|
||||
integrity = "sha256-T2VM4Qd0SwgGnTjWxjOX297DqEsovO9Ueij1UEJy48Y=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -290,7 +290,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-a2aCNFyFkYLtf6QSwGOHdx7xXHjA2NNT8x8ZuxB0aes=",
|
||||
integrity = "sha256-OP9BCBcQ7i+93bwj7ktY8pZ5uWsGjgTe4XTp7BDhX+I=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -48,8 +48,13 @@ func (n *NodeHealthTracker) CheckHealth(ctx context.Context) bool {
|
||||
if isStatusChanged {
|
||||
// Update the health status
|
||||
n.isHealthy = &newStatus
|
||||
// Send the new status to the health channel
|
||||
n.healthChan <- newStatus
|
||||
// Send the new status to the health channel, potentially overwriting the existing value
|
||||
select {
|
||||
case <-n.healthChan:
|
||||
n.healthChan <- newStatus
|
||||
default:
|
||||
n.healthChan <- newStatus
|
||||
}
|
||||
}
|
||||
return newStatus
|
||||
}
|
||||
|
||||
@@ -87,12 +87,6 @@ func TestNodeHealth_Concurrency(t *testing.T) {
|
||||
// Number of goroutines to spawn for both reading and writing
|
||||
numGoroutines := 6
|
||||
|
||||
go func() {
|
||||
for range n.HealthUpdates() {
|
||||
// Consume values to avoid blocking on channel send.
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(numGoroutines * 2) // for readers and writers
|
||||
|
||||
// Concurrently update health status
|
||||
|
||||
@@ -3,6 +3,7 @@ package testing
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface"
|
||||
"go.uber.org/mock/gomock"
|
||||
@@ -16,6 +17,7 @@ var (
|
||||
type MockHealthClient struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockHealthClientMockRecorder
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// MockHealthClientMockRecorder is the mock recorder for MockHealthClient.
|
||||
@@ -25,6 +27,8 @@ type MockHealthClientMockRecorder struct {
|
||||
|
||||
// IsHealthy mocks base method.
|
||||
func (m *MockHealthClient) IsHealthy(arg0 context.Context) bool {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "IsHealthy", arg0)
|
||||
ret0, ok := ret[0].(bool)
|
||||
@@ -41,6 +45,8 @@ func (m *MockHealthClient) EXPECT() *MockHealthClientMockRecorder {
|
||||
|
||||
// IsHealthy indicates an expected call of IsHealthy.
|
||||
func (mr *MockHealthClientMockRecorder) IsHealthy(arg0 any) *gomock.Call {
|
||||
mr.mock.Lock()
|
||||
defer mr.mock.Unlock()
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsHealthy", reflect.TypeOf((*MockHealthClient)(nil).IsHealthy), arg0)
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ go_library(
|
||||
"//validator:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//api/server:go_default_library",
|
||||
"//api/server/middleware:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/gorilla/mux"
|
||||
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/middleware"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
@@ -104,7 +104,7 @@ func (g *Gateway) Start() {
|
||||
}
|
||||
}
|
||||
|
||||
corsMux := server.CorsHandler(g.cfg.allowedOrigins).Middleware(g.cfg.router)
|
||||
corsMux := middleware.CorsHandler(g.cfg.allowedOrigins).Middleware(g.cfg.router)
|
||||
|
||||
if g.cfg.muxHandler != nil {
|
||||
g.cfg.router.PathPrefix("/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
@@ -2,29 +2,14 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"error.go",
|
||||
"middleware.go",
|
||||
"util.go",
|
||||
],
|
||||
srcs = ["error.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/server",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_rs_cors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"error_test.go",
|
||||
"middleware_test.go",
|
||||
"util_test.go",
|
||||
],
|
||||
srcs = ["error_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
deps = ["//testing/assert:go_default_library"],
|
||||
)
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
|
||||
// NormalizeQueryValuesHandler normalizes an input query of "key=value1,value2,value3" to "key=value1&key=value2&key=value3"
|
||||
func NormalizeQueryValuesHandler(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
query := r.URL.Query()
|
||||
NormalizeQueryValues(query)
|
||||
r.URL.RawQuery = query.Encode()
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// CorsHandler sets the cors settings on api endpoints
|
||||
func CorsHandler(allowOrigins []string) mux.MiddlewareFunc {
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: allowOrigins,
|
||||
AllowedMethods: []string{http.MethodPost, http.MethodGet, http.MethodDelete, http.MethodOptions},
|
||||
AllowCredentials: true,
|
||||
MaxAge: 600,
|
||||
AllowedHeaders: []string{"*"},
|
||||
})
|
||||
|
||||
return c.Handler
|
||||
}
|
||||
29
api/server/middleware/BUILD.bazel
Normal file
29
api/server/middleware/BUILD.bazel
Normal file
@@ -0,0 +1,29 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"middleware.go",
|
||||
"util.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/server/middleware",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_rs_cors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"middleware_test.go",
|
||||
"util_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
112
api/server/middleware/middleware.go
Normal file
112
api/server/middleware/middleware.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
|
||||
// NormalizeQueryValuesHandler normalizes an input query of "key=value1,value2,value3" to "key=value1&key=value2&key=value3"
|
||||
func NormalizeQueryValuesHandler(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
query := r.URL.Query()
|
||||
NormalizeQueryValues(query)
|
||||
r.URL.RawQuery = query.Encode()
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// CorsHandler sets the cors settings on api endpoints
|
||||
func CorsHandler(allowOrigins []string) mux.MiddlewareFunc {
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: allowOrigins,
|
||||
AllowedMethods: []string{http.MethodPost, http.MethodGet, http.MethodDelete, http.MethodOptions},
|
||||
AllowCredentials: true,
|
||||
MaxAge: 600,
|
||||
AllowedHeaders: []string{"*"},
|
||||
})
|
||||
|
||||
return c.Handler
|
||||
}
|
||||
|
||||
// ContentTypeHandler checks request for the appropriate media types otherwise returning a http.StatusUnsupportedMediaType error
|
||||
func ContentTypeHandler(acceptedMediaTypes []string) mux.MiddlewareFunc {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// skip the GET request
|
||||
if r.Method == http.MethodGet {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
if contentType == "" {
|
||||
http.Error(w, "Content-Type header is missing", http.StatusUnsupportedMediaType)
|
||||
return
|
||||
}
|
||||
|
||||
accepted := false
|
||||
for _, acceptedType := range acceptedMediaTypes {
|
||||
if strings.Contains(strings.TrimSpace(contentType), strings.TrimSpace(acceptedType)) {
|
||||
accepted = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !accepted {
|
||||
http.Error(w, fmt.Sprintf("Unsupported media type: %s", contentType), http.StatusUnsupportedMediaType)
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// AcceptHeaderHandler checks if the client's response preference is handled
|
||||
func AcceptHeaderHandler(serverAcceptedTypes []string) mux.MiddlewareFunc {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
acceptHeader := r.Header.Get("Accept")
|
||||
// header is optional and should skip if not provided
|
||||
if acceptHeader == "" {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
accepted := false
|
||||
acceptTypes := strings.Split(acceptHeader, ",")
|
||||
// follows rules defined in https://datatracker.ietf.org/doc/html/rfc2616#section-14.1
|
||||
for _, acceptType := range acceptTypes {
|
||||
acceptType = strings.TrimSpace(acceptType)
|
||||
if acceptType == "*/*" {
|
||||
accepted = true
|
||||
break
|
||||
}
|
||||
for _, serverAcceptedType := range serverAcceptedTypes {
|
||||
if strings.HasPrefix(acceptType, serverAcceptedType) {
|
||||
accepted = true
|
||||
break
|
||||
}
|
||||
if acceptType != "/*" && strings.HasSuffix(acceptType, "/*") && strings.HasPrefix(serverAcceptedType, acceptType[:len(acceptType)-2]) {
|
||||
accepted = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if accepted {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !accepted {
|
||||
http.Error(w, fmt.Sprintf("Not Acceptable: %s", acceptHeader), http.StatusNotAcceptable)
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
204
api/server/middleware/middleware_test.go
Normal file
204
api/server/middleware/middleware_test.go
Normal file
@@ -0,0 +1,204 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestNormalizeQueryValuesHandler(t *testing.T) {
|
||||
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, err := w.Write([]byte("next handler"))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
handler := NormalizeQueryValuesHandler(nextHandler)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
inputQuery string
|
||||
expectedQuery string
|
||||
}{
|
||||
{
|
||||
name: "3 values",
|
||||
inputQuery: "key=value1,value2,value3",
|
||||
expectedQuery: "key=value1&key=value2&key=value3", // replace with expected normalized value
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
req, err := http.NewRequest("GET", "/test?"+test.inputQuery, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusOK {
|
||||
t.Errorf("handler returned wrong status code: got %v want %v", rr.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
if req.URL.RawQuery != test.expectedQuery {
|
||||
t.Errorf("query not normalized: got %v want %v", req.URL.RawQuery, test.expectedQuery)
|
||||
}
|
||||
|
||||
if rr.Body.String() != "next handler" {
|
||||
t.Errorf("next handler was not executed")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestContentTypeHandler(t *testing.T) {
|
||||
acceptedMediaTypes := []string{api.JsonMediaType, api.OctetStreamMediaType}
|
||||
|
||||
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, err := w.Write([]byte("next handler"))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
handler := ContentTypeHandler(acceptedMediaTypes)(nextHandler)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
contentType string
|
||||
expectedStatusCode int
|
||||
isGet bool
|
||||
}{
|
||||
{
|
||||
name: "Accepted Content-Type - application/json",
|
||||
contentType: api.JsonMediaType,
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "Accepted Content-Type - ssz format",
|
||||
contentType: api.OctetStreamMediaType,
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "Unsupported Content-Type - text/plain",
|
||||
contentType: "text/plain",
|
||||
expectedStatusCode: http.StatusUnsupportedMediaType,
|
||||
},
|
||||
{
|
||||
name: "Missing Content-Type",
|
||||
contentType: "",
|
||||
expectedStatusCode: http.StatusUnsupportedMediaType,
|
||||
},
|
||||
{
|
||||
name: "GET request skips content type check",
|
||||
contentType: "",
|
||||
expectedStatusCode: http.StatusOK,
|
||||
isGet: true,
|
||||
},
|
||||
{
|
||||
name: "Content type contains charset is ok",
|
||||
contentType: "application/json; charset=utf-8",
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
httpMethod := http.MethodPost
|
||||
if tt.isGet {
|
||||
httpMethod = http.MethodGet
|
||||
}
|
||||
req := httptest.NewRequest(httpMethod, "/", nil)
|
||||
if tt.contentType != "" {
|
||||
req.Header.Set("Content-Type", tt.contentType)
|
||||
}
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if status := rr.Code; status != tt.expectedStatusCode {
|
||||
t.Errorf("handler returned wrong status code: got %v want %v", status, tt.expectedStatusCode)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAcceptHeaderHandler(t *testing.T) {
|
||||
acceptedTypes := []string{"application/json", "application/octet-stream"}
|
||||
|
||||
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, err := w.Write([]byte("next handler"))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
handler := AcceptHeaderHandler(acceptedTypes)(nextHandler)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
acceptHeader string
|
||||
expectedStatusCode int
|
||||
}{
|
||||
{
|
||||
name: "Accepted Accept-Type - application/json",
|
||||
acceptHeader: "application/json",
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "Accepted Accept-Type - application/octet-stream",
|
||||
acceptHeader: "application/octet-stream",
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "Accepted Accept-Type with parameters",
|
||||
acceptHeader: "application/json;q=0.9, application/octet-stream;q=0.8",
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "Unsupported Accept-Type - text/plain",
|
||||
acceptHeader: "text/plain",
|
||||
expectedStatusCode: http.StatusNotAcceptable,
|
||||
},
|
||||
{
|
||||
name: "Missing Accept header",
|
||||
acceptHeader: "",
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "*/* is accepted",
|
||||
acceptHeader: "*/*",
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "application/* is accepted",
|
||||
acceptHeader: "application/*",
|
||||
expectedStatusCode: http.StatusOK,
|
||||
},
|
||||
{
|
||||
name: "/* is unsupported",
|
||||
acceptHeader: "/*",
|
||||
expectedStatusCode: http.StatusNotAcceptable,
|
||||
},
|
||||
{
|
||||
name: "application/ is unsupported",
|
||||
acceptHeader: "application/",
|
||||
expectedStatusCode: http.StatusNotAcceptable,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", "/", nil)
|
||||
if tt.acceptHeader != "" {
|
||||
req.Header.Set("Accept", tt.acceptHeader)
|
||||
}
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if status := rr.Code; status != tt.expectedStatusCode {
|
||||
t.Errorf("handler returned wrong status code: got %v want %v", status, tt.expectedStatusCode)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package server
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
@@ -1,4 +1,4 @@
|
||||
package server
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"testing"
|
||||
@@ -1,54 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestNormalizeQueryValuesHandler(t *testing.T) {
|
||||
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, err := w.Write([]byte("next handler"))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
handler := NormalizeQueryValuesHandler(nextHandler)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
inputQuery string
|
||||
expectedQuery string
|
||||
}{
|
||||
{
|
||||
name: "3 values",
|
||||
inputQuery: "key=value1,value2,value3",
|
||||
expectedQuery: "key=value1&key=value2&key=value3", // replace with expected normalized value
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
req, err := http.NewRequest("GET", "/test?"+test.inputQuery, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusOK {
|
||||
t.Errorf("handler returned wrong status code: got %v want %v", rr.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
if req.URL.RawQuery != test.expectedQuery {
|
||||
t.Errorf("query not normalized: got %v want %v", req.URL.RawQuery, test.expectedQuery)
|
||||
}
|
||||
|
||||
if rr.Body.String() != "next handler" {
|
||||
t.Errorf("next handler was not executed")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -26,6 +26,7 @@ go_library(
|
||||
"receive_attestation.go",
|
||||
"receive_blob.go",
|
||||
"receive_block.go",
|
||||
"receive_data_column.go",
|
||||
"service.go",
|
||||
"tracked_proposer.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
@@ -48,6 +49,7 @@ go_library(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -67,6 +69,7 @@ go_library(
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -157,6 +160,7 @@ go_test(
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
|
||||
@@ -33,6 +33,7 @@ var (
|
||||
)
|
||||
|
||||
var errMaxBlobsExceeded = errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK")
|
||||
var errMaxDataColumnsExceeded = errors.New("Expected data columns for node exceeds NUMBER_OF_COLUMNS")
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
// The beacon node shall not be accepting nor building blocks that branch off from an invalid block.
|
||||
|
||||
@@ -12,6 +12,8 @@ go_library(
|
||||
deps = [
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"encoding/json"
|
||||
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
CKZG "github.com/ethereum/c-kzg-4844/bindings/go"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -12,17 +14,38 @@ var (
|
||||
//go:embed trusted_setup.json
|
||||
embeddedTrustedSetup []byte // 1.2Mb
|
||||
kzgContext *GoKZG.Context
|
||||
kzgLoaded bool
|
||||
)
|
||||
|
||||
func Start() error {
|
||||
parsedSetup := GoKZG.JSONTrustedSetup{}
|
||||
err := json.Unmarshal(embeddedTrustedSetup, &parsedSetup)
|
||||
parsedSetup := &GoKZG.JSONTrustedSetup{}
|
||||
err := json.Unmarshal(embeddedTrustedSetup, parsedSetup)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not parse trusted setup JSON")
|
||||
}
|
||||
kzgContext, err = GoKZG.NewContext4096(&parsedSetup)
|
||||
kzgContext, err = GoKZG.NewContext4096(parsedSetup)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize go-kzg context")
|
||||
}
|
||||
g1Lagrange := &parsedSetup.SetupG1Lagrange
|
||||
|
||||
// Length of a G1 point, converted from hex to binary.
|
||||
g1s := make([]byte, len(g1Lagrange)*(len(g1Lagrange[0])-2)/2)
|
||||
for i, g1 := range g1Lagrange {
|
||||
copy(g1s[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
|
||||
}
|
||||
// Length of a G2 point, converted from hex to binary.
|
||||
g2s := make([]byte, len(parsedSetup.SetupG2)*(len(parsedSetup.SetupG2[0])-2)/2)
|
||||
for i, g2 := range parsedSetup.SetupG2 {
|
||||
copy(g2s[i*(len(g2)-2)/2:], hexutil.MustDecode(g2))
|
||||
}
|
||||
if !kzgLoaded {
|
||||
// Free the current trusted setup before running this method. CKZG
|
||||
// panics if the same setup is run multiple times.
|
||||
if err = CKZG.LoadTrustedSetup(g1s, g2s); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
kzgLoaded = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -118,9 +118,9 @@ func WithBLSToExecPool(p blstoexec.PoolManager) Option {
|
||||
}
|
||||
|
||||
// WithP2PBroadcaster to broadcast messages after appropriate processing.
|
||||
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
|
||||
func WithP2PBroadcaster(p p2p.Acceser) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.P2p = p
|
||||
s.cfg.P2P = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,11 +80,11 @@ func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time
|
||||
}
|
||||
|
||||
// Use the target state to verify attesting indices are valid.
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, baseState, a.GetData().Slot, a.GetData().CommitteeIndex)
|
||||
committees, err := helpers.AttestationCommittees(ctx, baseState, a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, a, committee)
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, a, committees...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -7,9 +7,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
@@ -125,25 +127,36 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
eval := func(ctx context.Context, service *Service, genesisState state.BeaconState, pks []bls.SecretKey) {
|
||||
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
att, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(att[0].GetData().Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
ojc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
ofc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
|
||||
}
|
||||
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
att, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(att[0].Data.Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
ojc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
ofc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
|
||||
t.Run("pre-Electra", func(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
eval(ctx, service, genesisState, pks)
|
||||
})
|
||||
t.Run("post-Electra", func(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
genesisState, pks := util.DeterministicGenesisStateElectra(t, 64)
|
||||
eval(ctx, service, genesisState, pks)
|
||||
})
|
||||
}
|
||||
|
||||
func TestService_GetRecentPreState(t *testing.T) {
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
@@ -366,11 +368,11 @@ func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot,
|
||||
func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, st state.BeaconState) error {
|
||||
// Feed in block's attestations to fork choice store.
|
||||
for _, a := range blk.Body().Attestations() {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, st, a.GetData().Slot, a.GetData().CommitteeIndex)
|
||||
committees, err := helpers.AttestationCommittees(ctx, st, a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indices, err := attestation.AttestingIndices(a, committee)
|
||||
indices, err := attestation.AttestingIndices(a, committees...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -500,7 +502,7 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
}
|
||||
indices, err := bs.Indices(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "indices")
|
||||
}
|
||||
missing := make(map[uint64]struct{}, len(expected))
|
||||
for i := range expected {
|
||||
@@ -514,12 +516,35 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
func missingDataColumns(bs *filesystem.BlobStorage, root [32]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if len(expected) > int(params.BeaconConfig().NumberOfColumns) {
|
||||
return nil, errMaxDataColumnsExceeded
|
||||
}
|
||||
indices, err := bs.ColumnIndices(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
missing := make(map[uint64]bool, len(expected))
|
||||
for col := range expected {
|
||||
if !indices[col] {
|
||||
missing[col] = true
|
||||
}
|
||||
}
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
// isDataAvailable blocks until all BlobSidecars committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
// The function will first check the database to see if all sidecars have been persisted. If any
|
||||
// sidecars are missing, it will then read from the blobNotifier channel for the given root until the channel is
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if features.Get().EnablePeerDAS {
|
||||
return s.isDataAvailableDataColumns(ctx, root, signed)
|
||||
}
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
@@ -549,7 +574,7 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
// get a map of BlobSidecar indices that are not currently available.
|
||||
missing, err := missingIndices(s.blobStorage, root, kzgCommitments)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "missing indices")
|
||||
}
|
||||
// If there are no missing indices, all BlobSidecars are available.
|
||||
if len(missing) == 0 {
|
||||
@@ -568,8 +593,13 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
if len(missing) == 0 {
|
||||
return
|
||||
}
|
||||
log.WithFields(daCheckLogFields(root, signed.Block().Slot(), expected, len(missing))).
|
||||
Error("Still waiting for DA check at slot end.")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": signed.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": len(missing),
|
||||
}).Error("Still waiting for blobs DA check at slot end.")
|
||||
})
|
||||
defer nst.Stop()
|
||||
}
|
||||
@@ -591,12 +621,104 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
}
|
||||
}
|
||||
|
||||
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"slot": slot,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": missing,
|
||||
func (s *Service) isDataAvailableDataColumns(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
|
||||
block := signed.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(block.Slot()), slots.ToEpoch(s.CurrentSlot())) {
|
||||
return nil
|
||||
}
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get KZG commitments")
|
||||
}
|
||||
// If block has not commitments there is nothing to wait for.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
custodiedSubnetCount := params.BeaconConfig().CustodyRequirement
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
custodiedSubnetCount = params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
}
|
||||
|
||||
colMap, err := peerdas.CustodyColumns(s.cfg.P2P.NodeID(), custodiedSubnetCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Expected is the number of custodied data columnns a node is expected to have.
|
||||
expected := len(colMap)
|
||||
if expected == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Subscribe to newsly data columns stored in the database.
|
||||
rootIndexChan := make(chan filesystem.RootIndexPair)
|
||||
subscription := s.blobStorage.DataColumnFeed.Subscribe(rootIndexChan)
|
||||
defer subscription.Unsubscribe()
|
||||
|
||||
// Get a map of data column indices that are not currently available.
|
||||
missing, err := missingDataColumns(s.blobStorage, root, colMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
// This is the happy path.
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
nst := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
if len(missing) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": signed.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnsExpected": expected,
|
||||
"columnsWaiting": len(missing),
|
||||
}).Error("Still waiting for data columns DA check at slot end.")
|
||||
})
|
||||
defer nst.Stop()
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case rootIndex := <-rootIndexChan:
|
||||
if rootIndex.Root != root {
|
||||
// This is not the root we are looking for.
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove the index from the missing map.
|
||||
delete(missing, rootIndex.Index)
|
||||
|
||||
// Exit if there is no more missing data columns.
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
missingIndexes := make([]uint64, 0, len(missing))
|
||||
for val := range missing {
|
||||
copiedVal := val
|
||||
missingIndexes = append(missingIndexes, copiedVal)
|
||||
}
|
||||
return errors.Wrapf(ctx.Err(), "context deadline waiting for data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndexes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1963,68 +1963,130 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
t.Run("pre-Electra", func(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
// prepare another block that is not inserted
|
||||
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
|
||||
require.NoError(t, err)
|
||||
b3, err := util.GenerateFullBlock(st3, keys, util.DefaultBlockGenConfig(), 3)
|
||||
require.NoError(t, err)
|
||||
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
|
||||
require.NoError(t, err)
|
||||
// prepare another block that is not inserted
|
||||
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
|
||||
require.NoError(t, err)
|
||||
b3, err := util.GenerateFullBlock(st3, keys, util.DefaultBlockGenConfig(), 3)
|
||||
require.NoError(t, err)
|
||||
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a := wsb.Block().Body().Attestations()[0]
|
||||
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
|
||||
require.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r))
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a := wsb.Block().Body().Attestations()[0]
|
||||
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
|
||||
require.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r))
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a3 := wsb3.Block().Body().Attestations()[0]
|
||||
r3 := bytesutil.ToBytes32(a3.GetData().BeaconBlockRoot)
|
||||
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a3 := wsb3.Block().Body().Attestations()[0]
|
||||
r3 := bytesutil.ToBytes32(a3.GetData().BeaconBlockRoot)
|
||||
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
|
||||
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committee as st
|
||||
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committee as st
|
||||
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committee as st
|
||||
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committee as st
|
||||
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
|
||||
})
|
||||
t.Run("post-Electra", func(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisStateElectra(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis, err := blocks.NewGenesisBlockForState(ctx, st)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, genesis), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block().HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
// prepare another block that is not inserted
|
||||
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
|
||||
require.NoError(t, err)
|
||||
b3, err := util.GenerateFullBlockElectra(st3, keys, util.DefaultBlockGenConfig(), 3)
|
||||
require.NoError(t, err)
|
||||
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a := wsb.Block().Body().Attestations()[0]
|
||||
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
|
||||
require.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r))
|
||||
|
||||
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
|
||||
a3 := wsb3.Block().Body().Attestations()[0]
|
||||
r3 := bytesutil.ToBytes32(a3.GetData().BeaconBlockRoot)
|
||||
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
|
||||
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committee as st
|
||||
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
|
||||
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committee as st
|
||||
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
|
||||
})
|
||||
}
|
||||
|
||||
func TestFillMissingBlockPayloadId_DiffSlotExitEarly(t *testing.T) {
|
||||
|
||||
@@ -73,7 +73,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
atts, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(atts[0].Data.Target.Root)
|
||||
tRoot := bytesutil.ToBytes32(atts[0].GetData().Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
@@ -131,8 +131,8 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
}
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(attsToSave))
|
||||
// Verify the target is in forkchoice
|
||||
require.Equal(t, true, fcs.HasNode(bytesutil.ToBytes32(atts[0].Data.BeaconBlockRoot)))
|
||||
require.Equal(t, tRoot, bytesutil.ToBytes32(atts[0].Data.BeaconBlockRoot))
|
||||
require.Equal(t, true, fcs.HasNode(bytesutil.ToBytes32(atts[0].GetData().BeaconBlockRoot)))
|
||||
require.Equal(t, tRoot, bytesutil.ToBytes32(atts[0].GetData().BeaconBlockRoot))
|
||||
require.Equal(t, true, fcs.HasNode(service.originBlockRoot))
|
||||
|
||||
// Insert a new block to forkchoice
|
||||
|
||||
@@ -50,6 +50,12 @@ type BlobReceiver interface {
|
||||
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
|
||||
}
|
||||
|
||||
// DataColumnReceiver interface defines the methods of chain service for receiving new
|
||||
// data columns
|
||||
type DataColumnReceiver interface {
|
||||
ReceiveDataColumn(context.Context, blocks.VerifiedRODataColumn) error
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
type SlashingReceiver interface {
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing)
|
||||
@@ -479,12 +485,12 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
|
||||
// is done in the background to avoid adding more load to this critical code path.
|
||||
ctx := context.TODO()
|
||||
for _, att := range signed.Block().Body().Attestations() {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
committees, err := helpers.AttestationCommittees(ctx, preState, att)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attestation committee")
|
||||
log.WithError(err).Error("Could not get attestation committees")
|
||||
return
|
||||
}
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committees...)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert to indexed attestation")
|
||||
return
|
||||
|
||||
16
beacon-chain/blockchain/receive_data_column.go
Normal file
16
beacon-chain/blockchain/receive_data_column.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
)
|
||||
|
||||
func (s *Service) ReceiveDataColumn(ctx context.Context, ds blocks.VerifiedRODataColumn) error {
|
||||
if err := s.blobStorage.SaveDataColumn(ds); err != nil {
|
||||
return errors.Wrap(err, "save data column")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -82,7 +82,7 @@ type config struct {
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
BLSToExecPool blstoexec.PoolManager
|
||||
P2p p2p.Broadcaster
|
||||
P2P p2p.Acceser
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
@@ -107,15 +107,17 @@ var ErrMissingClockSetter = errors.New("blockchain Service initialized without a
|
||||
type blobNotifierMap struct {
|
||||
sync.RWMutex
|
||||
notifiers map[[32]byte]chan uint64
|
||||
seenIndex map[[32]byte][fieldparams.MaxBlobsPerBlock]bool
|
||||
seenIndex map[[32]byte][fieldparams.NumberOfColumns]bool
|
||||
}
|
||||
|
||||
// notifyIndex notifies a blob by its index for a given root.
|
||||
// It uses internal maps to keep track of seen indices and notifier channels.
|
||||
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64) {
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return
|
||||
}
|
||||
// TODO: Separate Data Columns from blobs
|
||||
/*
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return
|
||||
}*/
|
||||
|
||||
bn.Lock()
|
||||
seen := bn.seenIndex[root]
|
||||
@@ -129,7 +131,7 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64) {
|
||||
// Retrieve or create the notifier channel for the given root.
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
|
||||
@@ -143,7 +145,7 @@ func (bn *blobNotifierMap) forRoot(root [32]byte) chan uint64 {
|
||||
defer bn.Unlock()
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
return c
|
||||
@@ -169,7 +171,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bn := &blobNotifierMap{
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
|
||||
@@ -95,7 +95,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithSlashingPool(slashings.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithP2PBroadcaster(&mockBroadcaster{}),
|
||||
WithP2PBroadcaster(&mockAccesser{}),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(fc),
|
||||
WithAttestationService(attService),
|
||||
@@ -518,7 +518,7 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
|
||||
func TestNotifyIndex(t *testing.T) {
|
||||
// Initialize a blobNotifierMap
|
||||
bn := &blobNotifierMap{
|
||||
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
}
|
||||
|
||||
|
||||
@@ -13,11 +13,13 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
testDB "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2pTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -44,6 +46,11 @@ type mockBroadcaster struct {
|
||||
broadcastCalled bool
|
||||
}
|
||||
|
||||
type mockAccesser struct {
|
||||
mockBroadcaster
|
||||
p2pTesting.MockPeerManager
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
@@ -64,6 +71,11 @@ func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.B
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumn(_ context.Context, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
|
||||
}
|
||||
|
||||
@@ -120,6 +132,7 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
|
||||
WithTrackedValidatorsCache(cache.NewTrackedValidatorsCache()),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
|
||||
WithSyncChecker(mock.MockChecker{}),
|
||||
WithExecutionEngineCaller(&mockExecution.EngineClient{}),
|
||||
}
|
||||
// append the variadic opts so they override the defaults by being processed afterwards
|
||||
opts = append(defOpts, opts...)
|
||||
|
||||
@@ -628,6 +628,11 @@ func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) e
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveDataColumn implements the same method in chain service
|
||||
func (c *ChainService) ReceiveDataColumn(_ context.Context, _ blocks.VerifiedRODataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TargetRootForEpoch mocks the same method in the chain service
|
||||
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
return c.TargetRoot, nil
|
||||
|
||||
1
beacon-chain/cache/BUILD.bazel
vendored
1
beacon-chain/cache/BUILD.bazel
vendored
@@ -8,6 +8,7 @@ go_library(
|
||||
"attestation_data.go",
|
||||
"balance_cache_key.go",
|
||||
"checkpoint_state.go",
|
||||
"column_subnet_ids.go",
|
||||
"committee.go",
|
||||
"committee_disabled.go", # keep
|
||||
"committees.go",
|
||||
|
||||
65
beacon-chain/cache/column_subnet_ids.go
vendored
Normal file
65
beacon-chain/cache/column_subnet_ids.go
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
type columnSubnetIDs struct {
|
||||
colSubCache *cache.Cache
|
||||
colSubLock sync.RWMutex
|
||||
}
|
||||
|
||||
// ColumnSubnetIDs for column subnet participants
|
||||
var ColumnSubnetIDs = newColumnSubnetIDs()
|
||||
|
||||
const columnKey = "columns"
|
||||
|
||||
func newColumnSubnetIDs() *columnSubnetIDs {
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
// Set the default duration of a column subnet subscription as the column expiry period.
|
||||
subLength := epochDuration * time.Duration(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
persistentCache := cache.New(subLength*time.Second, epochDuration*time.Second)
|
||||
return &columnSubnetIDs{colSubCache: persistentCache}
|
||||
}
|
||||
|
||||
// GetColumnSubnets retrieves the data column subnets.
|
||||
func (s *columnSubnetIDs) GetColumnSubnets() ([]uint64, bool, time.Time) {
|
||||
s.colSubLock.RLock()
|
||||
defer s.colSubLock.RUnlock()
|
||||
|
||||
id, duration, ok := s.colSubCache.GetWithExpiration(columnKey)
|
||||
if !ok {
|
||||
return nil, false, time.Time{}
|
||||
}
|
||||
// Retrieve indices from the cache.
|
||||
idxs, ok := id.([]uint64)
|
||||
if !ok {
|
||||
return nil, false, time.Time{}
|
||||
}
|
||||
|
||||
return idxs, ok, duration
|
||||
}
|
||||
|
||||
// AddColumnSubnets adds the relevant data column subnets.
|
||||
func (s *columnSubnetIDs) AddColumnSubnets(colIdx []uint64) {
|
||||
s.colSubLock.Lock()
|
||||
defer s.colSubLock.Unlock()
|
||||
|
||||
s.colSubCache.Set(columnKey, colIdx, 0)
|
||||
}
|
||||
|
||||
// EmptyAllCaches empties out all the related caches and flushes any stored
|
||||
// entries on them. This should only ever be used for testing, in normal
|
||||
// production, handling of the relevant subnets for each role is done
|
||||
// separately.
|
||||
func (s *columnSubnetIDs) EmptyAllCaches() {
|
||||
// Clear the cache.
|
||||
s.colSubLock.Lock()
|
||||
defer s.colSubLock.Unlock()
|
||||
|
||||
s.colSubCache.Flush()
|
||||
}
|
||||
@@ -66,11 +66,11 @@ func ProcessAttestationNoVerifySignature(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
committees, err := helpers.AttestationCommittees(ctx, beaconState, att)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indices, err := attestation.AttestingIndices(att, committee)
|
||||
indices, err := attestation.AttestingIndices(att, committees...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -195,47 +195,95 @@ func TestProcessAttestations_InvalidAggregationBitsLength(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessAttestations_OK(t *testing.T) {
|
||||
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, 100)
|
||||
t.Run("pre-Electra", func(t *testing.T) {
|
||||
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, 100)
|
||||
|
||||
aggBits := bitfield.NewBitlist(3)
|
||||
aggBits.SetBitAt(0, true)
|
||||
var mockRoot [32]byte
|
||||
copy(mockRoot[:], "hello-world")
|
||||
att := util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
Target: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
aggBits := bitfield.NewBitlist(3)
|
||||
aggBits.SetBitAt(0, true)
|
||||
var mockRoot [32]byte
|
||||
copy(mockRoot[:], "hello-world")
|
||||
att := util.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
Target: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
})
|
||||
|
||||
cfc := beaconState.CurrentJustifiedCheckpoint()
|
||||
cfc.Root = mockRoot[:]
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, 0)
|
||||
require.NoError(t, err)
|
||||
attestingIndices, err := attestation.AttestingIndices(att, committee)
|
||||
require.NoError(t, err)
|
||||
sigs := make([]bls.Signature, len(attestingIndices))
|
||||
for i, indice := range attestingIndices {
|
||||
sb, err := signing.ComputeDomainAndSign(beaconState, 0, att.Data, params.BeaconConfig().DomainBeaconAttester, privKeys[indice])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
sigs[i] = sig
|
||||
}
|
||||
att.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
block := util.NewBeaconBlockAltair()
|
||||
block.Block.Body.Attestations = []*ethpb.Attestation{att}
|
||||
|
||||
err = beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
})
|
||||
t.Run("post-Electra", func(t *testing.T) {
|
||||
beaconState, privKeys := util.DeterministicGenesisStateElectra(t, 100)
|
||||
|
||||
cfc := beaconState.CurrentJustifiedCheckpoint()
|
||||
cfc.Root = mockRoot[:]
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
|
||||
aggBits := bitfield.NewBitlist(3)
|
||||
aggBits.SetBitAt(0, true)
|
||||
committeeBits := primitives.NewAttestationCommitteeBits()
|
||||
committeeBits.SetBitAt(0, true)
|
||||
var mockRoot [32]byte
|
||||
copy(mockRoot[:], "hello-world")
|
||||
att := util.HydrateAttestationElectra(ðpb.AttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
Target: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
CommitteeBits: committeeBits,
|
||||
})
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
attestingIndices, err := attestation.AttestingIndices(att, committee)
|
||||
require.NoError(t, err)
|
||||
sigs := make([]bls.Signature, len(attestingIndices))
|
||||
for i, indice := range attestingIndices {
|
||||
sb, err := signing.ComputeDomainAndSign(beaconState, 0, att.Data, params.BeaconConfig().DomainBeaconAttester, privKeys[indice])
|
||||
cfc := beaconState.CurrentJustifiedCheckpoint()
|
||||
cfc.Root = mockRoot[:]
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, 0)
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
attestingIndices, err := attestation.AttestingIndices(att, committee)
|
||||
require.NoError(t, err)
|
||||
sigs[i] = sig
|
||||
}
|
||||
att.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
sigs := make([]bls.Signature, len(attestingIndices))
|
||||
for i, indice := range attestingIndices {
|
||||
sb, err := signing.ComputeDomainAndSign(beaconState, 0, att.Data, params.BeaconConfig().DomainBeaconAttester, privKeys[indice])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
sigs[i] = sig
|
||||
}
|
||||
att.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
block := util.NewBeaconBlockAltair()
|
||||
block.Block.Body.Attestations = []*ethpb.Attestation{att}
|
||||
block := util.NewBeaconBlockElectra()
|
||||
block.Block.Body.Attestations = []*ethpb.AttestationElectra{att}
|
||||
|
||||
err = beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
err = beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProcessAttestationNoVerify_SourceTargetHead(t *testing.T) {
|
||||
|
||||
@@ -28,87 +28,87 @@ import (
|
||||
// process_historical_roots_update(state)
|
||||
// process_participation_flag_updates(state) # [New in Altair]
|
||||
// process_sync_committee_updates(state) # [New in Altair]
|
||||
func ProcessEpoch(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "altair.ProcessEpoch")
|
||||
defer span.End()
|
||||
|
||||
if state == nil || state.IsNil() {
|
||||
return nil, errors.New("nil state")
|
||||
return errors.New("nil state")
|
||||
}
|
||||
vp, bp, err := InitializePrecomputeValidators(ctx, state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// New in Altair.
|
||||
vp, bp, err = ProcessEpochParticipation(ctx, state, bp, vp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
state, err = precompute.ProcessJustificationAndFinalizationPreCompute(state, bp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process justification")
|
||||
return errors.Wrap(err, "could not process justification")
|
||||
}
|
||||
|
||||
// New in Altair.
|
||||
state, vp, err = ProcessInactivityScores(ctx, state, vp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process inactivity updates")
|
||||
return errors.Wrap(err, "could not process inactivity updates")
|
||||
}
|
||||
|
||||
// New in Altair.
|
||||
state, err = ProcessRewardsAndPenaltiesPrecompute(state, bp, vp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process rewards and penalties")
|
||||
return errors.Wrap(err, "could not process rewards and penalties")
|
||||
}
|
||||
|
||||
state, err = e.ProcessRegistryUpdates(ctx, state)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process registry updates")
|
||||
return errors.Wrap(err, "could not process registry updates")
|
||||
}
|
||||
|
||||
// Modified in Altair and Bellatrix.
|
||||
proportionalSlashingMultiplier, err := state.ProportionalSlashingMultiplier()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
state, err = e.ProcessSlashings(state, proportionalSlashingMultiplier)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
state, err = e.ProcessEth1DataReset(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
state, err = e.ProcessEffectiveBalanceUpdates(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
state, err = e.ProcessSlashingsReset(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
state, err = e.ProcessRandaoMixesReset(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
state, err = e.ProcessHistoricalDataUpdate(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// New in Altair.
|
||||
state, err = ProcessParticipationFlagUpdates(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// New in Altair.
|
||||
state, err = ProcessSyncCommitteeUpdates(ctx, state)
|
||||
_, err = ProcessSyncCommitteeUpdates(ctx, state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
return state, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -13,9 +13,9 @@ import (
|
||||
func TestProcessEpoch_CanProcess(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
require.NoError(t, st.SetSlot(10*params.BeaconConfig().SlotsPerEpoch))
|
||||
newState, err := altair.ProcessEpoch(context.Background(), st)
|
||||
err := altair.ProcessEpoch(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), newState.Slashings()[2], "Unexpected slashed balance")
|
||||
require.Equal(t, uint64(0), st.Slashings()[2], "Unexpected slashed balance")
|
||||
|
||||
b := st.Balances()
|
||||
require.Equal(t, params.BeaconConfig().MaxValidatorsPerCommittee, uint64(len(b)))
|
||||
@@ -45,9 +45,9 @@ func TestProcessEpoch_CanProcess(t *testing.T) {
|
||||
func TestProcessEpoch_CanProcessBellatrix(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
require.NoError(t, st.SetSlot(10*params.BeaconConfig().SlotsPerEpoch))
|
||||
newState, err := altair.ProcessEpoch(context.Background(), st)
|
||||
err := altair.ProcessEpoch(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), newState.Slashings()[2], "Unexpected slashed balance")
|
||||
require.Equal(t, uint64(0), st.Slashings()[2], "Unexpected slashed balance")
|
||||
|
||||
b := st.Balances()
|
||||
require.Equal(t, params.BeaconConfig().MaxValidatorsPerCommittee, uint64(len(b)))
|
||||
|
||||
@@ -154,7 +154,7 @@ func TranslateParticipation(ctx context.Context, state state.BeaconState, atts [
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -200,23 +200,6 @@ func ProcessAttestationNoVerifySignature(
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// VerifyAttestationSignature converts and attestation into an indexed attestation and verifies
|
||||
// the signature in that attestation.
|
||||
func VerifyAttestationSignature(ctx context.Context, beaconState state.ReadOnlyBeaconState, att ethpb.Att) error {
|
||||
if err := helpers.ValidateNilAttestation(att); err != nil {
|
||||
return err
|
||||
}
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return VerifyIndexedAttestation(ctx, beaconState, indexedAtt)
|
||||
}
|
||||
|
||||
// VerifyIndexedAttestation determines the validity of an indexed attestation.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
|
||||
@@ -578,53 +578,109 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
|
||||
}
|
||||
}
|
||||
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(5))
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
t.Run("pre-Electra", func(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(5))
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
|
||||
comm1, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 0 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
},
|
||||
comm1, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 0 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
att1 := util.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
},
|
||||
})
|
||||
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
root, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
require.NoError(t, err)
|
||||
var sigs []bls.Signature
|
||||
for i, u := range comm1 {
|
||||
att1.AggregationBits.SetBitAt(uint64(i), true)
|
||||
sigs = append(sigs, keys[u].Sign(root[:]))
|
||||
}
|
||||
att1.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
comm2, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 1 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
})
|
||||
root, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
require.NoError(t, err)
|
||||
sigs = nil
|
||||
for i, u := range comm2 {
|
||||
att2.AggregationBits.SetBitAt(uint64(i), true)
|
||||
sigs = append(sigs, keys[u].Sign(root[:]))
|
||||
}
|
||||
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
set, err := blocks.AttestationSignatureBatch(ctx, st, []ethpb.Att{att1, att2})
|
||||
require.NoError(t, err)
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, verified, "Multiple signatures were unable to be verified.")
|
||||
})
|
||||
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
root, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
require.NoError(t, err)
|
||||
var sigs []bls.Signature
|
||||
for i, u := range comm1 {
|
||||
att1.AggregationBits.SetBitAt(uint64(i), true)
|
||||
sigs = append(sigs, keys[u].Sign(root[:]))
|
||||
}
|
||||
att1.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
t.Run("post-Electra", func(t *testing.T) {
|
||||
st, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(5))
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
|
||||
comm2, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 1 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
att2 := util.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
comm1, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 0 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
commBits1 := primitives.NewAttestationCommitteeBits()
|
||||
commBits1.SetBitAt(0, true)
|
||||
att1 := util.HydrateAttestationElectra(ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
|
||||
CommitteeBits: commBits1,
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
},
|
||||
})
|
||||
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
root, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
require.NoError(t, err)
|
||||
var sigs []bls.Signature
|
||||
for i, u := range comm1 {
|
||||
att1.AggregationBits.SetBitAt(uint64(i), true)
|
||||
sigs = append(sigs, keys[u].Sign(root[:]))
|
||||
}
|
||||
att1.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
comm2, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 1 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
commBits2 := primitives.NewAttestationCommitteeBits()
|
||||
commBits2.SetBitAt(1, true)
|
||||
att2 := util.HydrateAttestationElectra(ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
|
||||
CommitteeBits: commBits2,
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
},
|
||||
})
|
||||
root, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
require.NoError(t, err)
|
||||
sigs = nil
|
||||
for i, u := range comm2 {
|
||||
att2.AggregationBits.SetBitAt(uint64(i), true)
|
||||
sigs = append(sigs, keys[u].Sign(root[:]))
|
||||
}
|
||||
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
set, err := blocks.AttestationSignatureBatch(ctx, st, []ethpb.Att{att1, att2})
|
||||
require.NoError(t, err)
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, verified, "Multiple signatures were unable to be verified.")
|
||||
})
|
||||
root, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
require.NoError(t, err)
|
||||
sigs = nil
|
||||
for i, u := range comm2 {
|
||||
att2.AggregationBits.SetBitAt(uint64(i), true)
|
||||
sigs = append(sigs, keys[u].Sign(root[:]))
|
||||
}
|
||||
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
set, err := blocks.AttestationSignatureBatch(ctx, st, []ethpb.Att{att1, att2})
|
||||
require.NoError(t, err)
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, verified, "Multiple signatures were unable to be verified.")
|
||||
}
|
||||
|
||||
func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
|
||||
|
||||
@@ -297,21 +297,6 @@ func TestFuzzVerifyIndexedAttestationn_10000(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzVerifyAttestation_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
attestation := ðpb.Attestation{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(attestation)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
err = VerifyAttestationSignature(ctx, s, attestation)
|
||||
_ = err
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessDeposits_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
|
||||
@@ -163,7 +163,42 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{ // Deneb difference.
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
},
|
||||
BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0),
|
||||
BlobKzgCommitments: make([][]byte, 0),
|
||||
},
|
||||
},
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
case *ethpb.BeaconStateElectra:
|
||||
return blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockElectra{
|
||||
Block: ðpb.BeaconBlockElectra{
|
||||
ParentRoot: params.BeaconConfig().ZeroHash[:],
|
||||
StateRoot: root[:],
|
||||
Body: ðpb.BeaconBlockBodyElectra{
|
||||
RandaoReveal: make([]byte, 96),
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
Graffiti: make([]byte, 32),
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadElectra{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
|
||||
@@ -96,6 +96,24 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
|
||||
currentEpoch := slots.ToEpoch(header.Header.Slot)
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(header.Header.ProposerIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
|
||||
// from the above method by not using fork data from the state and instead retrieving it
|
||||
// via the respective epoch.
|
||||
@@ -192,11 +210,11 @@ func createAttestationSignatureBatch(
|
||||
descs := make([]string, len(atts))
|
||||
for i, a := range atts {
|
||||
sigs[i] = a.GetSignature()
|
||||
c, err := helpers.BeaconCommitteeFromState(ctx, beaconState, a.GetData().Slot, a.GetData().CommitteeIndex)
|
||||
committees, err := helpers.AttestationCommittees(ctx, beaconState, a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ia, err := attestation.ConvertToIndexed(ctx, a, c)
|
||||
ia, err := attestation.ConvertToIndexed(ctx, a, committees...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"attestation.go",
|
||||
"churn.go",
|
||||
"consolidations.go",
|
||||
"deposits.go",
|
||||
@@ -22,6 +23,7 @@ go_library(
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -32,7 +34,9 @@ go_library(
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -43,8 +47,10 @@ go_test(
|
||||
"churn_test.go",
|
||||
"consolidations_test.go",
|
||||
"deposits_test.go",
|
||||
"effective_balance_updates_test.go",
|
||||
"upgrade_test.go",
|
||||
"validator_test.go",
|
||||
"withdrawals_test.go",
|
||||
],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
@@ -62,8 +68,12 @@ go_test(
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/interop:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
7
beacon-chain/core/electra/attestation.go
Normal file
7
beacon-chain/core/electra/attestation.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package electra
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
|
||||
var (
|
||||
ProcessAttestationsNoVerifySignature = altair.ProcessAttestationsNoVerifySignature
|
||||
)
|
||||
@@ -77,11 +77,11 @@ func ProcessPendingBalanceDeposits(ctx context.Context, st state.BeaconState, ac
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessDepositReceipts is a function as part of electra to process execution layer deposits
|
||||
func ProcessDepositReceipts(ctx context.Context, beaconState state.BeaconState, receipts []*enginev1.DepositReceipt) (state.BeaconState, error) {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessDepositReceipts")
|
||||
// ProcessDepositRequests is a function as part of electra to process execution layer deposits
|
||||
func ProcessDepositRequests(ctx context.Context, beaconState state.BeaconState, requests []*enginev1.DepositRequest) (state.BeaconState, error) {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessDepositRequests")
|
||||
defer span.End()
|
||||
// TODO: replace with 6110 logic
|
||||
// return b.ProcessDepositReceipts(beaconState, receipts)
|
||||
// return b.ProcessDepositRequests(beaconState, requests)
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,13 @@
|
||||
package electra
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// ProcessEffectiveBalanceUpdates processes effective balance updates during epoch processing.
|
||||
//
|
||||
@@ -24,6 +31,35 @@ import "github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
// ):
|
||||
// validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, EFFECTIVE_BALANCE_LIMIT)
|
||||
func ProcessEffectiveBalanceUpdates(state state.BeaconState) error {
|
||||
// TODO: replace with real implementation
|
||||
return nil
|
||||
effBalanceInc := params.BeaconConfig().EffectiveBalanceIncrement
|
||||
hysteresisInc := effBalanceInc / params.BeaconConfig().HysteresisQuotient
|
||||
downwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisDownwardMultiplier
|
||||
upwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisUpwardMultiplier
|
||||
|
||||
bals := state.Balances()
|
||||
|
||||
// Update effective balances with hysteresis.
|
||||
validatorFunc := func(idx int, val *ethpb.Validator) (bool, *ethpb.Validator, error) {
|
||||
if val == nil {
|
||||
return false, nil, fmt.Errorf("validator %d is nil in state", idx)
|
||||
}
|
||||
if idx >= len(bals) {
|
||||
return false, nil, fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(state.Balances()))
|
||||
}
|
||||
balance := bals[idx]
|
||||
|
||||
effectiveBalanceLimit := params.BeaconConfig().MinActivationBalance
|
||||
if helpers.HasCompoundingWithdrawalCredential(val) {
|
||||
effectiveBalanceLimit = params.BeaconConfig().MaxEffectiveBalanceElectra
|
||||
}
|
||||
|
||||
if balance+downwardThreshold < val.EffectiveBalance || val.EffectiveBalance+upwardThreshold < balance {
|
||||
effectiveBal := min(balance-balance%effBalanceInc, effectiveBalanceLimit)
|
||||
val.EffectiveBalance = effectiveBal
|
||||
return false, val, nil
|
||||
}
|
||||
return false, val, nil
|
||||
}
|
||||
|
||||
return state.ApplyToEveryValidator(validatorFunc)
|
||||
}
|
||||
|
||||
144
beacon-chain/core/electra/effective_balance_updates_test.go
Normal file
144
beacon-chain/core/electra/effective_balance_updates_test.go
Normal file
@@ -0,0 +1,144 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestProcessEffectiveBalnceUpdates(t *testing.T) {
|
||||
effBalanceInc := params.BeaconConfig().EffectiveBalanceIncrement
|
||||
hysteresisInc := effBalanceInc / params.BeaconConfig().HysteresisQuotient
|
||||
downwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisDownwardMultiplier
|
||||
upwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisUpwardMultiplier
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
wantErr bool
|
||||
check func(*testing.T, state.BeaconState)
|
||||
}{
|
||||
{
|
||||
name: "validator with compounding withdrawal credentials updates effective balance",
|
||||
state: func() state.BeaconState {
|
||||
pb := ð.BeaconStateElectra{
|
||||
Validators: []*eth.Validator{
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0x11},
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MaxEffectiveBalanceElectra * 2,
|
||||
},
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoElectra(pb)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, bs state.BeaconState) {
|
||||
val, err := bs.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MaxEffectiveBalanceElectra, val.EffectiveBalance)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "validator without compounding withdrawal credentials updates effective balance",
|
||||
state: func() state.BeaconState {
|
||||
pb := ð.BeaconStateElectra{
|
||||
Validators: []*eth.Validator{
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance / 2,
|
||||
WithdrawalCredentials: nil,
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MaxEffectiveBalanceElectra,
|
||||
},
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoElectra(pb)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, bs state.BeaconState) {
|
||||
val, err := bs.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, val.EffectiveBalance)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "validator effective balance moves only when outside of threshold",
|
||||
state: func() state.BeaconState {
|
||||
pb := ð.BeaconStateElectra{
|
||||
Validators: []*eth.Validator{
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0x11},
|
||||
},
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0x11},
|
||||
},
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0x11},
|
||||
},
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0x11},
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MinActivationBalance - downwardThreshold - 1, // beyond downward threshold
|
||||
params.BeaconConfig().MinActivationBalance - downwardThreshold + 1, // within downward threshold
|
||||
params.BeaconConfig().MinActivationBalance + upwardThreshold + 1, // beyond upward threshold
|
||||
params.BeaconConfig().MinActivationBalance + upwardThreshold - 1, // within upward threshold
|
||||
},
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoElectra(pb)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, bs state.BeaconState) {
|
||||
// validator 0 has a balance diff exceeding the threshold so a diff should be applied to
|
||||
// effective balance and it moves by effective balance increment.
|
||||
val, err := bs.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance-params.BeaconConfig().EffectiveBalanceIncrement, val.EffectiveBalance)
|
||||
|
||||
// validator 1 has a balance diff within the threshold so the effective balance should not
|
||||
// have changed.
|
||||
val, err = bs.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, val.EffectiveBalance)
|
||||
|
||||
// Validator 2 has a balance diff exceeding the threshold so a diff should be applied to the
|
||||
// effective balance and it moves by effective balance increment.
|
||||
val, err = bs.ValidatorAtIndex(2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+params.BeaconConfig().EffectiveBalanceIncrement, val.EffectiveBalance)
|
||||
|
||||
// Validator 3 has a balance diff within the threshold so the effective balance should not
|
||||
// have changed.
|
||||
val, err = bs.ValidatorAtIndex(3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, val.EffectiveBalance)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := electra.ProcessEffectiveBalanceUpdates(tt.state)
|
||||
require.Equal(t, tt.wantErr, err != nil, "unexpected error returned wanted error=nil (%s), got error=%s", tt.wantErr, err)
|
||||
if tt.check != nil {
|
||||
tt.check(t, tt.state)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -46,80 +46,84 @@ var (
|
||||
// process_effective_balance_updates(state)
|
||||
// process_slashings_reset(state)
|
||||
// process_randao_mixes_reset(state)
|
||||
func ProcessEpoch(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessEpoch")
|
||||
defer span.End()
|
||||
|
||||
if state == nil || state.IsNil() {
|
||||
return nil, errors.New("nil state")
|
||||
return errors.New("nil state")
|
||||
}
|
||||
vp, bp, err := InitializePrecomputeValidators(ctx, state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
vp, bp, err = ProcessEpochParticipation(ctx, state, bp, vp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
state, err = precompute.ProcessJustificationAndFinalizationPreCompute(state, bp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process justification")
|
||||
return errors.Wrap(err, "could not process justification")
|
||||
}
|
||||
state, vp, err = ProcessInactivityScores(ctx, state, vp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process inactivity updates")
|
||||
return errors.Wrap(err, "could not process inactivity updates")
|
||||
}
|
||||
state, err = ProcessRewardsAndPenaltiesPrecompute(state, bp, vp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process rewards and penalties")
|
||||
return errors.Wrap(err, "could not process rewards and penalties")
|
||||
}
|
||||
|
||||
state, err = ProcessRegistryUpdates(ctx, state)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process registry updates")
|
||||
return errors.Wrap(err, "could not process registry updates")
|
||||
}
|
||||
|
||||
proportionalSlashingMultiplier, err := state.ProportionalSlashingMultiplier()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
state, err = ProcessSlashings(state, proportionalSlashingMultiplier)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
state, err = ProcessEth1DataReset(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
if err = ProcessPendingBalanceDeposits(ctx, state, primitives.Gwei(bp.ActiveCurrentEpoch)); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if err := ProcessPendingConsolidations(ctx, state); err != nil {
|
||||
return nil, err
|
||||
if err = ProcessPendingConsolidations(ctx, state); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ProcessEffectiveBalanceUpdates(state); err != nil {
|
||||
return nil, err
|
||||
if err = ProcessEffectiveBalanceUpdates(state); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
state, err = ProcessSlashingsReset(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
state, err = ProcessRandaoMixesReset(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
state, err = ProcessHistoricalDataUpdate(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
state, err = ProcessParticipationFlagUpdates(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
state, err = ProcessSyncCommitteeUpdates(ctx, state)
|
||||
_, err = ProcessSyncCommitteeUpdates(ctx, state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
return state, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ import (
|
||||
// withdrawals_root=pre.latest_execution_payload_header.withdrawals_root,
|
||||
// blob_gas_used=pre.latest_execution_payload_header.blob_gas_used,
|
||||
// excess_blob_gas=pre.latest_execution_payload_header.excess_blob_gas,
|
||||
// deposit_receipts_root=Root(), # [New in Electra:EIP6110]
|
||||
// deposit_requests_root=Root(), # [New in Electra:EIP6110]
|
||||
// withdrawal_requests_root=Root(), # [New in Electra:EIP7002],
|
||||
// )
|
||||
//
|
||||
@@ -94,7 +94,7 @@ import (
|
||||
// # Deep history valid from Capella onwards
|
||||
// historical_summaries=pre.historical_summaries,
|
||||
// # [New in Electra:EIP6110]
|
||||
// deposit_receipts_start_index=UNSET_DEPOSIT_RECEIPTS_START_INDEX,
|
||||
// deposit_requests_start_index=UNSET_DEPOSIT_REQUESTS_START_INDEX,
|
||||
// # [New in Electra:EIP7251]
|
||||
// deposit_balance_to_consume=0,
|
||||
// exit_balance_to_consume=0,
|
||||
@@ -261,14 +261,14 @@ func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error)
|
||||
WithdrawalsRoot: wdRoot,
|
||||
ExcessBlobGas: excessBlobGas,
|
||||
BlobGasUsed: blobGasUsed,
|
||||
DepositReceiptsRoot: bytesutil.Bytes32(0), // [New in Electra:EIP6110]
|
||||
DepositRequestsRoot: bytesutil.Bytes32(0), // [New in Electra:EIP6110]
|
||||
WithdrawalRequestsRoot: bytesutil.Bytes32(0), // [New in Electra:EIP7002]
|
||||
},
|
||||
NextWithdrawalIndex: wi,
|
||||
NextWithdrawalValidatorIndex: vi,
|
||||
HistoricalSummaries: summaries,
|
||||
|
||||
DepositReceiptsStartIndex: params.BeaconConfig().UnsetDepositReceiptsStartIndex,
|
||||
DepositRequestsStartIndex: params.BeaconConfig().UnsetDepositRequestsStartIndex,
|
||||
DepositBalanceToConsume: 0,
|
||||
ExitBalanceToConsume: helpers.ActivationExitChurnLimit(primitives.Gwei(tab)),
|
||||
EarliestExitEpoch: earliestExitEpoch,
|
||||
|
||||
@@ -128,7 +128,7 @@ func TestUpgradeToElectra(t *testing.T) {
|
||||
BlockHash: prevHeader.BlockHash(),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
DepositReceiptsRoot: bytesutil.Bytes32(0),
|
||||
DepositRequestsRoot: bytesutil.Bytes32(0),
|
||||
WithdrawalRequestsRoot: bytesutil.Bytes32(0),
|
||||
}
|
||||
require.DeepEqual(t, wanted, protoHeader)
|
||||
@@ -145,9 +145,9 @@ func TestUpgradeToElectra(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(summaries))
|
||||
|
||||
startIndex, err := mSt.DepositReceiptsStartIndex()
|
||||
startIndex, err := mSt.DepositRequestsStartIndex()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().UnsetDepositReceiptsStartIndex, startIndex)
|
||||
require.Equal(t, params.BeaconConfig().UnsetDepositRequestsStartIndex, startIndex)
|
||||
|
||||
balance, err := mSt.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1,21 +1,33 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ProcessExecutionLayerWithdrawRequests processes the validator withdrawals from the provided execution payload
|
||||
// ProcessWithdrawalRequests processes the validator withdrawals from the provided execution payload
|
||||
// into the beacon state triggered by the execution layer.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def process_execution_layer_withdrawal_request(
|
||||
// def process_withdrawal_request(
|
||||
//
|
||||
// state: BeaconState,
|
||||
// execution_layer_withdrawal_request: ExecutionLayerWithdrawalRequest
|
||||
// withdrawal_request: WithdrawalRequest
|
||||
//
|
||||
// ) -> None:
|
||||
// amount = execution_layer_withdrawal_request.amount
|
||||
@@ -74,7 +86,109 @@ import (
|
||||
// amount=to_withdraw,
|
||||
// withdrawable_epoch=withdrawable_epoch,
|
||||
// ))
|
||||
func ProcessExecutionLayerWithdrawRequests(ctx context.Context, st state.BeaconState, wrs []*enginev1.ExecutionLayerWithdrawalRequest) (state.BeaconState, error) {
|
||||
//TODO: replace with real implementation
|
||||
func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []*enginev1.WithdrawalRequest) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "electra.ProcessWithdrawalRequests")
|
||||
defer span.End()
|
||||
currentEpoch := slots.ToEpoch(st.Slot())
|
||||
for _, wr := range wrs {
|
||||
if wr == nil {
|
||||
return nil, errors.New("nil execution layer withdrawal request")
|
||||
}
|
||||
amount := wr.Amount
|
||||
isFullExitRequest := amount == params.BeaconConfig().FullExitRequestAmount
|
||||
// If partial withdrawal queue is full, only full exits are processed
|
||||
if n, err := st.NumPendingPartialWithdrawals(); err != nil {
|
||||
return nil, err
|
||||
} else if n == params.BeaconConfig().PendingPartialWithdrawalsLimit && !isFullExitRequest {
|
||||
// if the PendingPartialWithdrawalsLimit is met, the user would have paid for a partial withdrawal that's not included
|
||||
log.Debugln("Skipping execution layer withdrawal request, PendingPartialWithdrawalsLimit reached")
|
||||
continue
|
||||
}
|
||||
|
||||
vIdx, exists := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(wr.ValidatorPubkey))
|
||||
if !exists {
|
||||
log.Debugf("Skipping execution layer withdrawal request, validator index for %s not found\n", hexutil.Encode(wr.ValidatorPubkey))
|
||||
continue
|
||||
}
|
||||
validator, err := st.ValidatorAtIndex(vIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Verify withdrawal credentials
|
||||
hasCorrectCredential := helpers.HasExecutionWithdrawalCredentials(validator)
|
||||
isCorrectSourceAddress := bytes.Equal(validator.WithdrawalCredentials[12:], wr.SourceAddress)
|
||||
if !hasCorrectCredential || !isCorrectSourceAddress {
|
||||
log.Debugln("Skipping execution layer withdrawal request, wrong withdrawal credentials")
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify the validator is active.
|
||||
if !helpers.IsActiveValidator(validator, currentEpoch) {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator not active")
|
||||
continue
|
||||
}
|
||||
// Verify the validator has not yet submitted an exit.
|
||||
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator has submitted an exit already")
|
||||
continue
|
||||
}
|
||||
// Verify the validator has been active long enough.
|
||||
if currentEpoch < validator.ActivationEpoch.AddEpoch(params.BeaconConfig().ShardCommitteePeriod) {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator has not been active long enough")
|
||||
continue
|
||||
}
|
||||
|
||||
pendingBalanceToWithdraw, err := st.PendingBalanceToWithdraw(vIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isFullExitRequest {
|
||||
// Only exit validator if it has no pending withdrawals in the queue
|
||||
if pendingBalanceToWithdraw == 0 {
|
||||
maxExitEpoch, churn := validators.MaxExitEpochAndChurn(st)
|
||||
var err error
|
||||
st, _, err = validators.InitiateValidatorExit(ctx, st, vIdx, maxExitEpoch, churn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
hasSufficientEffectiveBalance := validator.EffectiveBalance >= params.BeaconConfig().MinActivationBalance
|
||||
vBal, err := st.BalanceAtIndex(vIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hasExcessBalance := vBal > params.BeaconConfig().MinActivationBalance+pendingBalanceToWithdraw
|
||||
|
||||
// Only allow partial withdrawals with compounding withdrawal credentials
|
||||
if helpers.HasCompoundingWithdrawalCredential(validator) && hasSufficientEffectiveBalance && hasExcessBalance {
|
||||
// Spec definition:
|
||||
// to_withdraw = min(
|
||||
// state.balances[index] - MIN_ACTIVATION_BALANCE - pending_balance_to_withdraw,
|
||||
// amount
|
||||
// )
|
||||
|
||||
// note: you can safely subtract these values because haxExcessBalance is checked
|
||||
toWithdraw := min(vBal-params.BeaconConfig().MinActivationBalance-pendingBalanceToWithdraw, amount)
|
||||
exitQueueEpoch, err := st.ExitEpochAndUpdateChurn(primitives.Gwei(toWithdraw))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// safe add the uint64 to avoid overflow
|
||||
withdrawableEpoch, err := exitQueueEpoch.SafeAddEpoch(params.BeaconConfig().MinValidatorWithdrawabilityDelay)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to add withdrawability delay to exit queue epoch")
|
||||
}
|
||||
if err := st.AppendPendingPartialWithdrawal(ðpb.PendingPartialWithdrawal{
|
||||
Index: vIdx,
|
||||
Amount: toWithdraw,
|
||||
WithdrawableEpoch: withdrawableEpoch,
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
295
beacon-chain/core/electra/withdrawals_test.go
Normal file
295
beacon-chain/core/electra/withdrawals_test.go
Normal file
@@ -0,0 +1,295 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestProcessWithdrawRequests(t *testing.T) {
|
||||
logHook := test.NewGlobal()
|
||||
source, err := hexutil.Decode("0xb20a608c624Ca5003905aA834De7156C68b2E1d0")
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
currentSlot := primitives.Slot(uint64(params.BeaconConfig().SlotsPerEpoch)*uint64(params.BeaconConfig().ShardCommitteePeriod) + 1)
|
||||
require.NoError(t, st.SetSlot(currentSlot))
|
||||
val, err := st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
type args struct {
|
||||
st state.BeaconState
|
||||
wrs []*enginev1.WithdrawalRequest
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantFn func(t *testing.T, got state.BeaconState)
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "happy path exit and withdrawal only",
|
||||
args: args{
|
||||
st: func() state.BeaconState {
|
||||
preSt := st.Copy()
|
||||
require.NoError(t, preSt.AppendPendingPartialWithdrawal(ð.PendingPartialWithdrawal{
|
||||
Index: 0,
|
||||
Amount: params.BeaconConfig().FullExitRequestAmount,
|
||||
WithdrawableEpoch: 0,
|
||||
}))
|
||||
v, err := preSt.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
prefix := make([]byte, 12)
|
||||
prefix[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
v.WithdrawalCredentials = append(prefix, source...)
|
||||
require.NoError(t, preSt.SetValidators([]*eth.Validator{v}))
|
||||
return preSt
|
||||
}(),
|
||||
wrs: []*enginev1.WithdrawalRequest{
|
||||
{
|
||||
SourceAddress: source,
|
||||
ValidatorPubkey: bytesutil.SafeCopyBytes(val.PublicKey),
|
||||
Amount: params.BeaconConfig().FullExitRequestAmount,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantFn: func(t *testing.T, got state.BeaconState) {
|
||||
wantPostSt := st.Copy()
|
||||
v, err := wantPostSt.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
prefix := make([]byte, 12)
|
||||
prefix[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
v.WithdrawalCredentials = append(prefix, source...)
|
||||
v.ExitEpoch = 261
|
||||
v.WithdrawableEpoch = 517
|
||||
require.NoError(t, wantPostSt.SetValidators([]*eth.Validator{v}))
|
||||
require.NoError(t, wantPostSt.AppendPendingPartialWithdrawal(ð.PendingPartialWithdrawal{
|
||||
Index: 0,
|
||||
Amount: params.BeaconConfig().FullExitRequestAmount,
|
||||
WithdrawableEpoch: 0,
|
||||
}))
|
||||
_, err = wantPostSt.ExitEpochAndUpdateChurn(primitives.Gwei(v.EffectiveBalance))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, wantPostSt.Validators(), got.Validators())
|
||||
webc, err := wantPostSt.ExitBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
gebc, err := got.ExitBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, webc, gebc)
|
||||
weee, err := wantPostSt.EarliestExitEpoch()
|
||||
require.NoError(t, err)
|
||||
geee, err := got.EarliestExitEpoch()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, weee, geee)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "happy path has compounding",
|
||||
args: args{
|
||||
st: func() state.BeaconState {
|
||||
preSt := st.Copy()
|
||||
require.NoError(t, preSt.AppendPendingPartialWithdrawal(ð.PendingPartialWithdrawal{
|
||||
Index: 0,
|
||||
Amount: params.BeaconConfig().FullExitRequestAmount,
|
||||
WithdrawableEpoch: 0,
|
||||
}))
|
||||
v, err := preSt.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
prefix := make([]byte, 12)
|
||||
prefix[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
v.WithdrawalCredentials = append(prefix, source...)
|
||||
require.NoError(t, preSt.SetValidators([]*eth.Validator{v}))
|
||||
bal, err := preSt.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
bal += 200
|
||||
require.NoError(t, preSt.SetBalances([]uint64{bal}))
|
||||
require.NoError(t, preSt.AppendPendingPartialWithdrawal(ð.PendingPartialWithdrawal{
|
||||
Index: 0,
|
||||
Amount: 100,
|
||||
WithdrawableEpoch: 0,
|
||||
}))
|
||||
return preSt
|
||||
}(),
|
||||
wrs: []*enginev1.WithdrawalRequest{
|
||||
{
|
||||
SourceAddress: source,
|
||||
ValidatorPubkey: bytesutil.SafeCopyBytes(val.PublicKey),
|
||||
Amount: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantFn: func(t *testing.T, got state.BeaconState) {
|
||||
wantPostSt := st.Copy()
|
||||
v, err := wantPostSt.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
prefix := make([]byte, 12)
|
||||
prefix[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
v.WithdrawalCredentials = append(prefix, source...)
|
||||
require.NoError(t, wantPostSt.SetValidators([]*eth.Validator{v}))
|
||||
bal, err := wantPostSt.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
bal += 200
|
||||
require.NoError(t, wantPostSt.SetBalances([]uint64{bal}))
|
||||
require.NoError(t, wantPostSt.AppendPendingPartialWithdrawal(ð.PendingPartialWithdrawal{
|
||||
Index: 0,
|
||||
Amount: 0,
|
||||
WithdrawableEpoch: 0,
|
||||
}))
|
||||
require.NoError(t, wantPostSt.AppendPendingPartialWithdrawal(ð.PendingPartialWithdrawal{
|
||||
Index: 0,
|
||||
Amount: 100,
|
||||
WithdrawableEpoch: 0,
|
||||
}))
|
||||
require.NoError(t, wantPostSt.AppendPendingPartialWithdrawal(ð.PendingPartialWithdrawal{
|
||||
Index: 0,
|
||||
Amount: 100,
|
||||
WithdrawableEpoch: 517,
|
||||
}))
|
||||
wnppw, err := wantPostSt.NumPendingPartialWithdrawals()
|
||||
require.NoError(t, err)
|
||||
gnppw, err := got.NumPendingPartialWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, wnppw, gnppw)
|
||||
wece, err := wantPostSt.EarliestConsolidationEpoch()
|
||||
require.NoError(t, err)
|
||||
gece, err := got.EarliestConsolidationEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, wece, gece)
|
||||
_, err = wantPostSt.ExitEpochAndUpdateChurn(primitives.Gwei(100))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, wantPostSt.Validators(), got.Validators())
|
||||
webc, err := wantPostSt.ExitBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
gebc, err := got.ExitBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, webc, gebc)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "validator already submitted exit",
|
||||
args: args{
|
||||
st: func() state.BeaconState {
|
||||
preSt := st.Copy()
|
||||
v, err := preSt.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
prefix := make([]byte, 12)
|
||||
prefix[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
v.WithdrawalCredentials = append(prefix, source...)
|
||||
v.ExitEpoch = 1000
|
||||
require.NoError(t, preSt.SetValidators([]*eth.Validator{v}))
|
||||
return preSt
|
||||
}(),
|
||||
wrs: []*enginev1.WithdrawalRequest{
|
||||
{
|
||||
SourceAddress: source,
|
||||
ValidatorPubkey: bytesutil.SafeCopyBytes(val.PublicKey),
|
||||
Amount: params.BeaconConfig().FullExitRequestAmount,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantFn: func(t *testing.T, got state.BeaconState) {
|
||||
wantPostSt := st.Copy()
|
||||
v, err := wantPostSt.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
prefix := make([]byte, 12)
|
||||
prefix[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
v.WithdrawalCredentials = append(prefix, source...)
|
||||
v.ExitEpoch = 1000
|
||||
require.NoError(t, wantPostSt.SetValidators([]*eth.Validator{v}))
|
||||
eee, err := got.EarliestExitEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, eee, primitives.Epoch(0))
|
||||
require.DeepEqual(t, wantPostSt.Validators(), got.Validators())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "validator too new",
|
||||
args: args{
|
||||
st: func() state.BeaconState {
|
||||
preSt := st.Copy()
|
||||
require.NoError(t, preSt.SetSlot(0))
|
||||
v, err := preSt.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
prefix := make([]byte, 12)
|
||||
prefix[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
v.WithdrawalCredentials = append(prefix, source...)
|
||||
require.NoError(t, preSt.SetValidators([]*eth.Validator{v}))
|
||||
return preSt
|
||||
}(),
|
||||
wrs: []*enginev1.WithdrawalRequest{
|
||||
{
|
||||
SourceAddress: source,
|
||||
ValidatorPubkey: bytesutil.SafeCopyBytes(val.PublicKey),
|
||||
Amount: params.BeaconConfig().FullExitRequestAmount,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantFn: func(t *testing.T, got state.BeaconState) {
|
||||
wantPostSt := st.Copy()
|
||||
require.NoError(t, wantPostSt.SetSlot(0))
|
||||
v, err := wantPostSt.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
prefix := make([]byte, 12)
|
||||
prefix[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
v.WithdrawalCredentials = append(prefix, source...)
|
||||
require.NoError(t, wantPostSt.SetValidators([]*eth.Validator{v}))
|
||||
eee, err := got.EarliestExitEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, eee, primitives.Epoch(0))
|
||||
require.DeepEqual(t, wantPostSt.Validators(), got.Validators())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "PendingPartialWithdrawalsLimit reached with partial withdrawal results in a skip",
|
||||
args: args{
|
||||
st: func() state.BeaconState {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.PendingPartialWithdrawalsLimit = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
preSt := st.Copy()
|
||||
require.NoError(t, preSt.AppendPendingPartialWithdrawal(ð.PendingPartialWithdrawal{
|
||||
Index: 0,
|
||||
Amount: params.BeaconConfig().FullExitRequestAmount,
|
||||
WithdrawableEpoch: 0,
|
||||
}))
|
||||
return preSt
|
||||
}(),
|
||||
wrs: []*enginev1.WithdrawalRequest{
|
||||
{
|
||||
SourceAddress: source,
|
||||
ValidatorPubkey: bytesutil.SafeCopyBytes(val.PublicKey),
|
||||
Amount: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantFn: func(t *testing.T, got state.BeaconState) {
|
||||
assert.LogsContain(t, logHook, "Skipping execution layer withdrawal request, PendingPartialWithdrawalsLimit reached")
|
||||
params.SetupTestConfigCleanup(t)
|
||||
logrus.SetLevel(logrus.InfoLevel) // reset
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
got, err := electra.ProcessWithdrawalRequests(context.Background(), tt.args.st, tt.args.wrs)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ProcessWithdrawalRequests() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
tt.wantFn(t, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -470,7 +470,7 @@ func UnslashedAttestingIndices(ctx context.Context, state state.ReadOnlyBeaconSt
|
||||
seen := make(map[uint64]bool)
|
||||
|
||||
for _, att := range atts {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -32,6 +32,9 @@ const (
|
||||
|
||||
// AttesterSlashingReceived is sent after an attester slashing is received from gossip or rpc
|
||||
AttesterSlashingReceived = 8
|
||||
|
||||
// DataColumnSidecarReceived is sent after a data column sidecar is received from gossip or rpc.
|
||||
DataColumnSidecarReceived = 9
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -77,3 +80,7 @@ type ProposerSlashingReceivedData struct {
|
||||
type AttesterSlashingReceivedData struct {
|
||||
AttesterSlashing ethpb.AttSlashing
|
||||
}
|
||||
|
||||
type DataColumnSidecarReceivedData struct {
|
||||
DataColumn *blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
@@ -34,6 +34,7 @@ go_library(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
@@ -58,6 +59,29 @@ func SlotCommitteeCount(activeValidatorCount uint64) uint64 {
|
||||
return committeesPerSlot
|
||||
}
|
||||
|
||||
// AttestationCommittees returns beacon state committees that reflect attestation's committee indices.
|
||||
func AttestationCommittees(ctx context.Context, st state.ReadOnlyBeaconState, att ethpb.Att) ([][]primitives.ValidatorIndex, error) {
|
||||
var committees [][]primitives.ValidatorIndex
|
||||
if att.Version() >= version.Electra {
|
||||
committeeIndices := att.CommitteeBitsVal().BitIndices()
|
||||
committees = make([][]primitives.ValidatorIndex, len(committeeIndices))
|
||||
for i, ci := range committeeIndices {
|
||||
committee, err := BeaconCommitteeFromState(ctx, st, att.GetData().Slot, primitives.CommitteeIndex(ci))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
committees[i] = committee
|
||||
}
|
||||
} else {
|
||||
committee, err := BeaconCommitteeFromState(ctx, st, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
committees = [][]primitives.ValidatorIndex{committee}
|
||||
}
|
||||
return committees, nil
|
||||
}
|
||||
|
||||
// BeaconCommitteeFromState returns the crosslink committee of a given slot and committee index. This
|
||||
// is a spec implementation where state is used as an argument. In case of state retrieval
|
||||
// becomes expensive, consider using BeaconCommittee below.
|
||||
@@ -143,105 +167,137 @@ func BeaconCommittee(
|
||||
return ComputeCommittee(validatorIndices, seed, indexOffset, count)
|
||||
}
|
||||
|
||||
// CommitteeAssignmentContainer represents a committee list, committee index, and to be attested slot for a given epoch.
|
||||
type CommitteeAssignmentContainer struct {
|
||||
// CommitteeAssignment represents committee list, committee index, and to be attested slot for a given epoch.
|
||||
type CommitteeAssignment struct {
|
||||
Committee []primitives.ValidatorIndex
|
||||
AttesterSlot primitives.Slot
|
||||
CommitteeIndex primitives.CommitteeIndex
|
||||
}
|
||||
|
||||
// CommitteeAssignments is a map of validator indices pointing to the appropriate committee
|
||||
// assignment for the given epoch.
|
||||
//
|
||||
// 1. Determine the proposer validator index for each slot.
|
||||
// 2. Compute all committees.
|
||||
// 3. Determine the attesting slot for each committee.
|
||||
// 4. Construct a map of validator indices pointing to the respective committees.
|
||||
func CommitteeAssignments(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
epoch primitives.Epoch,
|
||||
) (map[primitives.ValidatorIndex]*CommitteeAssignmentContainer, map[primitives.ValidatorIndex][]primitives.Slot, error) {
|
||||
// verifyAssignmentEpoch verifies if the given epoch is valid for assignment based on the provided state.
|
||||
// It checks if the epoch is not greater than the next epoch, and if the start slot of the epoch is greater
|
||||
// than or equal to the minimum valid start slot calculated based on the state's current slot and historical roots.
|
||||
func verifyAssignmentEpoch(epoch primitives.Epoch, state state.BeaconState) error {
|
||||
nextEpoch := time.NextEpoch(state)
|
||||
if epoch > nextEpoch {
|
||||
return nil, nil, fmt.Errorf(
|
||||
"epoch %d can't be greater than next epoch %d",
|
||||
epoch,
|
||||
nextEpoch,
|
||||
)
|
||||
return fmt.Errorf("epoch %d can't be greater than next epoch %d", epoch, nextEpoch)
|
||||
}
|
||||
|
||||
// We determine the slots in which proposers are supposed to act.
|
||||
// Some validators may need to propose multiple times per epoch, so
|
||||
// we use a map of proposer idx -> []slot to keep track of this possibility.
|
||||
startSlot, err := slots.EpochStart(epoch)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return err
|
||||
}
|
||||
minValidStartSlot := primitives.Slot(0)
|
||||
if state.Slot() >= params.BeaconConfig().SlotsPerHistoricalRoot {
|
||||
minValidStartSlot = state.Slot() - params.BeaconConfig().SlotsPerHistoricalRoot
|
||||
if stateSlot := state.Slot(); stateSlot >= params.BeaconConfig().SlotsPerHistoricalRoot {
|
||||
minValidStartSlot = stateSlot - params.BeaconConfig().SlotsPerHistoricalRoot
|
||||
}
|
||||
if startSlot < minValidStartSlot {
|
||||
return nil, nil, fmt.Errorf("start slot %d is smaller than the minimum valid start slot %d", startSlot, minValidStartSlot)
|
||||
return fmt.Errorf("start slot %d is smaller than the minimum valid start slot %d", startSlot, minValidStartSlot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProposerAssignments calculates proposer assignments for each validator during the specified epoch.
|
||||
// It verifies the validity of the epoch, then iterates through each slot in the epoch to determine the
|
||||
// proposer for that slot and assigns them accordingly.
|
||||
func ProposerAssignments(ctx context.Context, state state.BeaconState, epoch primitives.Epoch) (map[primitives.ValidatorIndex][]primitives.Slot, error) {
|
||||
// Verify if the epoch is valid for assignment based on the provided state.
|
||||
if err := verifyAssignmentEpoch(epoch, state); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
startSlot, err := slots.EpochStart(epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
proposerIndexToSlots := make(map[primitives.ValidatorIndex][]primitives.Slot, params.BeaconConfig().SlotsPerEpoch)
|
||||
proposerAssignments := make(map[primitives.ValidatorIndex][]primitives.Slot)
|
||||
|
||||
originalStateSlot := state.Slot()
|
||||
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
|
||||
// Skip proposer assignment for genesis slot.
|
||||
if slot == 0 {
|
||||
continue
|
||||
}
|
||||
// Set the state's current slot.
|
||||
if err := state.SetSlot(slot); err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine the proposer index for the current slot.
|
||||
i, err := BeaconProposerIndex(ctx, state)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not check proposer at slot %d", state.Slot())
|
||||
return nil, errors.Wrapf(err, "could not check proposer at slot %d", state.Slot())
|
||||
}
|
||||
proposerIndexToSlots[i] = append(proposerIndexToSlots[i], slot)
|
||||
|
||||
// Append the slot to the proposer's assignments.
|
||||
if _, ok := proposerAssignments[i]; !ok {
|
||||
proposerAssignments[i] = make([]primitives.Slot, 0)
|
||||
}
|
||||
proposerAssignments[i] = append(proposerAssignments[i], slot)
|
||||
}
|
||||
|
||||
// If previous proposer indices computation is outside if current proposal epoch range,
|
||||
// we need to reset state slot back to start slot so that we can compute the correct committees.
|
||||
currentProposalEpoch := epoch < nextEpoch
|
||||
if !currentProposalEpoch {
|
||||
if err := state.SetSlot(state.Slot() - params.BeaconConfig().SlotsPerEpoch); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// Reset state back to its original slot.
|
||||
if err := state.SetSlot(originalStateSlot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
activeValidatorIndices, err := ActiveValidatorIndices(ctx, state, epoch)
|
||||
return proposerAssignments, nil
|
||||
}
|
||||
|
||||
// CommitteeAssignments calculates committee assignments for each validator during the specified epoch.
|
||||
// It retrieves active validator indices, determines the number of committees per slot, and computes
|
||||
// assignments for each validator based on their presence in the provided validators slice.
|
||||
func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch primitives.Epoch, validators []primitives.ValidatorIndex) (map[primitives.ValidatorIndex]*CommitteeAssignment, error) {
|
||||
// Verify if the epoch is valid for assignment based on the provided state.
|
||||
if err := verifyAssignmentEpoch(epoch, state); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Retrieve active validator count for the specified epoch.
|
||||
activeValidatorCount, err := ActiveValidatorCount(ctx, state, epoch)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
// Each slot in an epoch has a different set of committees. This value is derived from the
|
||||
// active validator set, which does not change.
|
||||
numCommitteesPerSlot := SlotCommitteeCount(uint64(len(activeValidatorIndices)))
|
||||
validatorIndexToCommittee := make(map[primitives.ValidatorIndex]*CommitteeAssignmentContainer, len(activeValidatorIndices))
|
||||
|
||||
// Compute all committees for all slots.
|
||||
for i := primitives.Slot(0); i < params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
// Compute committees.
|
||||
// Determine the number of committees per slot based on the number of active validator indices.
|
||||
numCommitteesPerSlot := SlotCommitteeCount(activeValidatorCount)
|
||||
|
||||
startSlot, err := slots.EpochStart(epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
assignments := make(map[primitives.ValidatorIndex]*CommitteeAssignment)
|
||||
vals := make(map[primitives.ValidatorIndex]struct{})
|
||||
for _, v := range validators {
|
||||
vals[v] = struct{}{}
|
||||
}
|
||||
|
||||
// Compute committee assignments for each slot in the epoch.
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
|
||||
// Compute committees for the current slot.
|
||||
for j := uint64(0); j < numCommitteesPerSlot; j++ {
|
||||
slot := startSlot + i
|
||||
committee, err := BeaconCommitteeFromState(ctx, state, slot, primitives.CommitteeIndex(j) /*committee index*/)
|
||||
committee, err := BeaconCommitteeFromState(ctx, state, slot, primitives.CommitteeIndex(j))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cac := &CommitteeAssignmentContainer{
|
||||
Committee: committee,
|
||||
CommitteeIndex: primitives.CommitteeIndex(j),
|
||||
AttesterSlot: slot,
|
||||
}
|
||||
for _, vIndex := range committee {
|
||||
validatorIndexToCommittee[vIndex] = cac
|
||||
if _, ok := vals[vIndex]; !ok { // Skip if the validator is not in the provided validators slice.
|
||||
continue
|
||||
}
|
||||
if _, ok := assignments[vIndex]; !ok {
|
||||
assignments[vIndex] = &CommitteeAssignment{}
|
||||
}
|
||||
assignments[vIndex].Committee = committee
|
||||
assignments[vIndex].AttesterSlot = slot
|
||||
assignments[vIndex].CommitteeIndex = primitives.CommitteeIndex(j)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return validatorIndexToCommittee, proposerIndexToSlots, nil
|
||||
return assignments, nil
|
||||
}
|
||||
|
||||
// VerifyBitfieldLength verifies that a bitfield length matches the given committee size.
|
||||
|
||||
@@ -104,7 +104,10 @@ func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
|
||||
Slot: 0, // Epoch 0.
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, _, err = helpers.CommitteeAssignments(context.Background(), state, epoch+1)
|
||||
_, err = helpers.CommitteeAssignments(context.Background(), state, epoch+1, nil)
|
||||
assert.ErrorContains(t, "can't be greater than next epoch", err)
|
||||
|
||||
_, err = helpers.ProposerAssignments(context.Background(), state, epoch+1)
|
||||
assert.ErrorContains(t, "can't be greater than next epoch", err)
|
||||
}
|
||||
|
||||
@@ -128,10 +131,10 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, proposerIndexToSlots, err := helpers.CommitteeAssignments(context.Background(), state, 0)
|
||||
require.NoError(t, err, "Failed to determine CommitteeAssignments")
|
||||
for _, ss := range proposerIndexToSlots {
|
||||
for _, s := range ss {
|
||||
assignments, err := helpers.ProposerAssignments(context.Background(), state, 0)
|
||||
require.NoError(t, err, "Failed to determine Assignments")
|
||||
for _, slots := range assignments {
|
||||
for _, s := range slots {
|
||||
assert.NotEqual(t, uint64(0), s, "No proposer should be assigned to slot 0")
|
||||
}
|
||||
}
|
||||
@@ -140,6 +143,7 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
|
||||
func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
validatorIndices := make([]primitives.ValidatorIndex, len(validators))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
// First 2 epochs only half validators are activated.
|
||||
var activationEpoch primitives.Epoch
|
||||
@@ -150,6 +154,7 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
|
||||
ActivationEpoch: activationEpoch,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
validatorIndices[i] = primitives.ValidatorIndex(i)
|
||||
}
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
@@ -201,14 +206,16 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
validatorIndexToCommittee, proposerIndexToSlots, err := helpers.CommitteeAssignments(context.Background(), state, slots.ToEpoch(tt.slot))
|
||||
require.NoError(t, err, "Failed to determine CommitteeAssignments")
|
||||
cac := validatorIndexToCommittee[tt.index]
|
||||
assignments, err := helpers.CommitteeAssignments(context.Background(), state, slots.ToEpoch(tt.slot), validatorIndices)
|
||||
require.NoError(t, err, "Failed to determine Assignments")
|
||||
cac := assignments[tt.index]
|
||||
assert.Equal(t, tt.committeeIndex, cac.CommitteeIndex, "Unexpected committeeIndex for validator index %d", tt.index)
|
||||
assert.Equal(t, tt.slot, cac.AttesterSlot, "Unexpected slot for validator index %d", tt.index)
|
||||
if len(proposerIndexToSlots[tt.index]) > 0 && proposerIndexToSlots[tt.index][0] != tt.proposerSlot {
|
||||
proposerAssignments, err := helpers.ProposerAssignments(context.Background(), state, slots.ToEpoch(tt.slot))
|
||||
require.NoError(t, err)
|
||||
if len(proposerAssignments[tt.index]) > 0 && proposerAssignments[tt.index][0] != tt.proposerSlot {
|
||||
t.Errorf("wanted proposer slot %d, got proposer slot %d for validator index %d",
|
||||
tt.proposerSlot, proposerIndexToSlots[tt.index][0], tt.index)
|
||||
tt.proposerSlot, proposerAssignments[tt.index][0], tt.index)
|
||||
}
|
||||
assert.DeepEqual(t, tt.committee, cac.Committee, "Unexpected committee for validator index %d", tt.index)
|
||||
})
|
||||
@@ -238,13 +245,13 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, proposerIndxs, err := helpers.CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state))
|
||||
assignments, err := helpers.ProposerAssignments(context.Background(), state, time.CurrentEpoch(state))
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
|
||||
require.NotEqual(t, 0, len(assignments), "wanted non-zero proposer index set")
|
||||
|
||||
_, proposerIndxs, err = helpers.CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state)+1)
|
||||
assignments, err = helpers.ProposerAssignments(context.Background(), state, time.CurrentEpoch(state)+1)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
|
||||
require.NotEqual(t, 0, len(assignments), "wanted non-zero proposer index set")
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *testing.T) {
|
||||
@@ -264,7 +271,7 @@ func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *t
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, _, err = helpers.CommitteeAssignments(context.Background(), state, 0)
|
||||
_, err = helpers.CommitteeAssignments(context.Background(), state, 0, nil)
|
||||
require.ErrorContains(t, "start slot 0 is smaller than the minimum valid start slot 1", err)
|
||||
}
|
||||
|
||||
@@ -286,12 +293,12 @@ func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
epoch := primitives.Epoch(1)
|
||||
_, proposerIndexToSlots, err := helpers.CommitteeAssignments(context.Background(), state, epoch)
|
||||
require.NoError(t, err, "Failed to determine CommitteeAssignments")
|
||||
assignments, err := helpers.ProposerAssignments(context.Background(), state, epoch)
|
||||
require.NoError(t, err, "Failed to determine Assignments")
|
||||
|
||||
slotsWithProposers := make(map[primitives.Slot]bool)
|
||||
for _, proposerSlots := range proposerIndexToSlots {
|
||||
for _, slot := range proposerSlots {
|
||||
for _, slots := range assignments {
|
||||
for _, slot := range slots {
|
||||
slotsWithProposers[slot] = true
|
||||
}
|
||||
}
|
||||
@@ -708,3 +715,37 @@ func TestCommitteeIndices(t *testing.T) {
|
||||
indices := helpers.CommitteeIndices(bitfield)
|
||||
assert.DeepEqual(t, []primitives.CommitteeIndex{0, 1, 3}, indices)
|
||||
}
|
||||
|
||||
func TestAttestationCommittees(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().TargetCommitteeSize))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("pre-Electra", func(t *testing.T) {
|
||||
att := ðpb.Attestation{Data: ðpb.AttestationData{CommitteeIndex: 0}}
|
||||
committees, err := helpers.AttestationCommittees(context.Background(), state, att)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(committees))
|
||||
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(committees[0])))
|
||||
})
|
||||
t.Run("post-Electra", func(t *testing.T) {
|
||||
bits := primitives.NewAttestationCommitteeBits()
|
||||
bits.SetBitAt(0, true)
|
||||
bits.SetBitAt(1, true)
|
||||
att := ðpb.AttestationElectra{CommitteeBits: bits, Data: ðpb.AttestationData{}}
|
||||
committees, err := helpers.AttestationCommittees(context.Background(), state, att)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(committees))
|
||||
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(committees[0])))
|
||||
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(committees[1])))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -500,11 +500,11 @@ func LastActivatedValidatorIndex(ctx context.Context, st state.ReadOnlyBeaconSta
|
||||
|
||||
// hasETH1WithdrawalCredential returns whether the validator has an ETH1
|
||||
// Withdrawal prefix. It assumes that the caller has a lock on the state
|
||||
func HasETH1WithdrawalCredential(val *ethpb.Validator) bool {
|
||||
func HasETH1WithdrawalCredential(val interfaces.WithWithdrawalCredentials) bool {
|
||||
if val == nil {
|
||||
return false
|
||||
}
|
||||
return isETH1WithdrawalCredential(val.WithdrawalCredentials)
|
||||
return isETH1WithdrawalCredential(val.GetWithdrawalCredentials())
|
||||
}
|
||||
|
||||
func isETH1WithdrawalCredential(creds []byte) bool {
|
||||
|
||||
36
beacon-chain/core/peerdas/BUILD.bazel
Normal file
36
beacon-chain/core/peerdas/BUILD.bazel
Normal file
@@ -0,0 +1,36 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["helpers.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["helpers_test.go"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
302
beacon-chain/core/peerdas/helpers.go
Normal file
302
beacon-chain/core/peerdas/helpers.go
Normal file
@@ -0,0 +1,302 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
|
||||
cKzg4844 "github.com/ethereum/c-kzg-4844/bindings/go"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/holiman/uint256"
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// Bytes per cell
|
||||
const bytesPerCell = cKzg4844.FieldElementsPerCell * cKzg4844.BytesPerFieldElement
|
||||
|
||||
var (
|
||||
// Custom errors
|
||||
errCustodySubnetCountTooLarge = errors.New("custody subnet count larger than data column sidecar subnet count")
|
||||
errIndexTooLarge = errors.New("column index is larger than the specified number of columns")
|
||||
errMismatchLength = errors.New("mismatch in the length of the commitments and proofs")
|
||||
|
||||
// maxUint256 is the maximum value of a uint256.
|
||||
maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64}
|
||||
)
|
||||
|
||||
// CustodyColumnSubnets computes the subnets the node should participate in for custody.
|
||||
func CustodyColumnSubnets(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool, error) {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Check if the custody subnet count is larger than the data column sidecar subnet count.
|
||||
if custodySubnetCount > dataColumnSidecarSubnetCount {
|
||||
return nil, errCustodySubnetCountTooLarge
|
||||
}
|
||||
|
||||
// First, compute the subnet IDs that the node should participate in.
|
||||
subnetIds := make(map[uint64]bool, custodySubnetCount)
|
||||
|
||||
one := uint256.NewInt(1)
|
||||
|
||||
for currentId := new(uint256.Int).SetBytes(nodeId.Bytes()); uint64(len(subnetIds)) < custodySubnetCount; currentId.Add(currentId, one) {
|
||||
// Convert to big endian bytes.
|
||||
currentIdBytesBigEndian := currentId.Bytes32()
|
||||
|
||||
// Convert to little endian.
|
||||
currentIdBytesLittleEndian := bytesutil.ReverseByteOrder(currentIdBytesBigEndian[:])
|
||||
|
||||
// Hash the result.
|
||||
hashedCurrentId := hash.Hash(currentIdBytesLittleEndian)
|
||||
|
||||
// Get the subnet ID.
|
||||
subnetId := binary.LittleEndian.Uint64(hashedCurrentId[:8]) % dataColumnSidecarSubnetCount
|
||||
|
||||
// Add the subnet to the map.
|
||||
subnetIds[subnetId] = true
|
||||
|
||||
// Overflow prevention.
|
||||
if currentId.Cmp(maxUint256) == 0 {
|
||||
currentId = uint256.NewInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
return subnetIds, nil
|
||||
}
|
||||
|
||||
// CustodyColumns computes the columns the node should custody.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#helper-functions
|
||||
func CustodyColumns(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool, error) {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Compute the custodied subnets.
|
||||
subnetIds, err := CustodyColumnSubnets(nodeId, custodySubnetCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "custody subnets")
|
||||
}
|
||||
|
||||
columnsPerSubnet := cKzg4844.CellsPerExtBlob / dataColumnSidecarSubnetCount
|
||||
|
||||
// Knowing the subnet ID and the number of columns per subnet, select all the columns the node should custody.
|
||||
// Columns belonging to the same subnet are contiguous.
|
||||
columnIndices := make(map[uint64]bool, custodySubnetCount*columnsPerSubnet)
|
||||
for i := uint64(0); i < columnsPerSubnet; i++ {
|
||||
for subnetId := range subnetIds {
|
||||
columnIndex := dataColumnSidecarSubnetCount*i + subnetId
|
||||
columnIndices[columnIndex] = true
|
||||
}
|
||||
}
|
||||
|
||||
return columnIndices, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecars computes the data column sidecars from the signed block and blobs.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#recover_matrix
|
||||
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, blobs []cKzg4844.Blob) ([]*ethpb.DataColumnSidecar, error) {
|
||||
blobsCount := len(blobs)
|
||||
if blobsCount == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the signed block header.
|
||||
signedBlockHeader, err := signedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "signed block header")
|
||||
}
|
||||
|
||||
// Get the block body.
|
||||
block := signedBlock.Block()
|
||||
blockBody := block.Body()
|
||||
|
||||
// Get the blob KZG commitments.
|
||||
blobKzgCommitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Compute the KZG commitments inclusion proof.
|
||||
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merkle proof ZKG commitments")
|
||||
}
|
||||
|
||||
// Compute cells and proofs.
|
||||
cells := make([][cKzg4844.CellsPerExtBlob]cKzg4844.Cell, 0, blobsCount)
|
||||
proofs := make([][cKzg4844.CellsPerExtBlob]cKzg4844.KZGProof, 0, blobsCount)
|
||||
|
||||
for i := range blobs {
|
||||
blob := &blobs[i]
|
||||
blobCells, blobProofs, err := cKzg4844.ComputeCellsAndKZGProofs(blob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells and KZG proofs")
|
||||
}
|
||||
|
||||
cells = append(cells, blobCells)
|
||||
proofs = append(proofs, blobProofs)
|
||||
}
|
||||
|
||||
// Get the column sidecars.
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, cKzg4844.CellsPerExtBlob)
|
||||
for columnIndex := uint64(0); columnIndex < cKzg4844.CellsPerExtBlob; columnIndex++ {
|
||||
column := make([]cKzg4844.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]cKzg4844.KZGProof, 0, blobsCount)
|
||||
|
||||
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
|
||||
cell := cells[rowIndex][columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofs[rowIndex][columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
cell := column[i]
|
||||
|
||||
cellBytes := make([]byte, 0, bytesPerCell)
|
||||
for _, fieldElement := range cell {
|
||||
copiedElem := fieldElement
|
||||
cellBytes = append(cellBytes, copiedElem[:]...)
|
||||
}
|
||||
|
||||
columnBytes = append(columnBytes, cellBytes)
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
copiedProof := kzgProof
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, copiedProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
ColumnIndex: columnIndex,
|
||||
DataColumn: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProof: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecarsForReconstruct is a TEMPORARY function until there is an official specification for it.
|
||||
// It is scheduled for deletion.
|
||||
func DataColumnSidecarsForReconstruct(
|
||||
blobKzgCommitments [][]byte,
|
||||
signedBlockHeader *ethpb.SignedBeaconBlockHeader,
|
||||
kzgCommitmentsInclusionProof [][]byte,
|
||||
blobs []cKzg4844.Blob,
|
||||
) ([]*ethpb.DataColumnSidecar, error) {
|
||||
blobsCount := len(blobs)
|
||||
if blobsCount == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Compute cells and proofs.
|
||||
cells := make([][cKzg4844.CellsPerExtBlob]cKzg4844.Cell, 0, blobsCount)
|
||||
proofs := make([][cKzg4844.CellsPerExtBlob]cKzg4844.KZGProof, 0, blobsCount)
|
||||
|
||||
for i := range blobs {
|
||||
blob := &blobs[i]
|
||||
blobCells, blobProofs, err := cKzg4844.ComputeCellsAndKZGProofs(blob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells and KZG proofs")
|
||||
}
|
||||
|
||||
cells = append(cells, blobCells)
|
||||
proofs = append(proofs, blobProofs)
|
||||
}
|
||||
|
||||
// Get the column sidecars.
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, cKzg4844.CellsPerExtBlob)
|
||||
for columnIndex := uint64(0); columnIndex < cKzg4844.CellsPerExtBlob; columnIndex++ {
|
||||
column := make([]cKzg4844.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]cKzg4844.KZGProof, 0, blobsCount)
|
||||
|
||||
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
|
||||
cell := cells[rowIndex][columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofs[rowIndex][columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
cell := column[i]
|
||||
|
||||
cellBytes := make([]byte, 0, bytesPerCell)
|
||||
for _, fieldElement := range cell {
|
||||
copiedElem := fieldElement
|
||||
cellBytes = append(cellBytes, copiedElem[:]...)
|
||||
}
|
||||
|
||||
columnBytes = append(columnBytes, cellBytes)
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
copiedProof := kzgProof
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, copiedProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
ColumnIndex: columnIndex,
|
||||
DataColumn: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProof: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// VerifyDataColumnSidecarKZGProofs verifies the provided KZG Proofs for the particular
|
||||
// data column.
|
||||
func VerifyDataColumnSidecarKZGProofs(sc *ethpb.DataColumnSidecar) (bool, error) {
|
||||
if sc.ColumnIndex >= params.BeaconConfig().NumberOfColumns {
|
||||
return false, errIndexTooLarge
|
||||
}
|
||||
if len(sc.DataColumn) != len(sc.KzgCommitments) || len(sc.KzgCommitments) != len(sc.KzgProof) {
|
||||
return false, errMismatchLength
|
||||
}
|
||||
blobsCount := len(sc.DataColumn)
|
||||
|
||||
rowIdx := make([]uint64, 0, blobsCount)
|
||||
colIdx := make([]uint64, 0, blobsCount)
|
||||
for i := 0; i < len(sc.DataColumn); i++ {
|
||||
copiedI := uint64(i)
|
||||
rowIdx = append(rowIdx, copiedI)
|
||||
colI := sc.ColumnIndex
|
||||
colIdx = append(colIdx, colI)
|
||||
}
|
||||
ckzgComms := make([]cKzg4844.Bytes48, 0, len(sc.KzgCommitments))
|
||||
for _, com := range sc.KzgCommitments {
|
||||
ckzgComms = append(ckzgComms, cKzg4844.Bytes48(com))
|
||||
}
|
||||
var cells []cKzg4844.Cell
|
||||
for _, ce := range sc.DataColumn {
|
||||
var newCell []cKzg4844.Bytes32
|
||||
for i := 0; i < len(ce); i += 32 {
|
||||
newCell = append(newCell, cKzg4844.Bytes32(ce[i:i+32]))
|
||||
}
|
||||
cells = append(cells, cKzg4844.Cell(newCell))
|
||||
}
|
||||
var proofs []cKzg4844.Bytes48
|
||||
for _, p := range sc.KzgProof {
|
||||
proofs = append(proofs, cKzg4844.Bytes48(p))
|
||||
}
|
||||
return cKzg4844.VerifyCellKZGProofBatch(ckzgComms, rowIdx, colIdx, cells, proofs)
|
||||
}
|
||||
91
beacon-chain/core/peerdas/helpers_test.go
Normal file
91
beacon-chain/core/peerdas/helpers_test.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
ckzg4844 "github.com/ethereum/c-kzg-4844/bindings/go"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func deterministicRandomness(seed int64) [32]byte {
|
||||
// Converts an int64 to a byte slice
|
||||
buf := new(bytes.Buffer)
|
||||
err := binary.Write(buf, binary.BigEndian, seed)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Failed to write int64 to bytes buffer")
|
||||
return [32]byte{}
|
||||
}
|
||||
bytes := buf.Bytes()
|
||||
|
||||
return sha256.Sum256(bytes)
|
||||
}
|
||||
|
||||
// Returns a serialized random field element in big-endian
|
||||
func GetRandFieldElement(seed int64) [32]byte {
|
||||
bytes := deterministicRandomness(seed)
|
||||
var r fr.Element
|
||||
r.SetBytes(bytes[:])
|
||||
|
||||
return GoKZG.SerializeScalar(r)
|
||||
}
|
||||
|
||||
// Returns a random blob using the passed seed as entropy
|
||||
func GetRandBlob(seed int64) ckzg4844.Blob {
|
||||
var blob ckzg4844.Blob
|
||||
bytesPerBlob := GoKZG.ScalarsPerBlob * GoKZG.SerializedScalarSize
|
||||
for i := 0; i < bytesPerBlob; i += GoKZG.SerializedScalarSize {
|
||||
fieldElementBytes := GetRandFieldElement(seed + int64(i))
|
||||
copy(blob[i:i+GoKZG.SerializedScalarSize], fieldElementBytes[:])
|
||||
}
|
||||
return blob
|
||||
}
|
||||
|
||||
func GenerateCommitmentAndProof(blob ckzg4844.Blob) (ckzg4844.KZGCommitment, ckzg4844.KZGProof, error) {
|
||||
commitment, err := ckzg4844.BlobToKZGCommitment(&blob)
|
||||
if err != nil {
|
||||
return ckzg4844.KZGCommitment{}, ckzg4844.KZGProof{}, err
|
||||
}
|
||||
proof, err := ckzg4844.ComputeBlobKZGProof(&blob, ckzg4844.Bytes48(commitment))
|
||||
if err != nil {
|
||||
return ckzg4844.KZGCommitment{}, ckzg4844.KZGProof{}, err
|
||||
}
|
||||
return commitment, proof, err
|
||||
}
|
||||
|
||||
func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
||||
dbBlock := util.NewBeaconBlockDeneb()
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
comms := [][]byte{}
|
||||
blobs := []ckzg4844.Blob{}
|
||||
for i := int64(0); i < 6; i++ {
|
||||
blob := GetRandBlob(i)
|
||||
commitment, _, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
comms = append(comms, commitment[:])
|
||||
blobs = append(blobs, blob)
|
||||
}
|
||||
|
||||
dbBlock.Block.Body.BlobKzgCommitments = comms
|
||||
sBlock, err := blocks.NewSignedBeaconBlock(dbBlock)
|
||||
require.NoError(t, err)
|
||||
sCars, err := peerdas.DataColumnSidecars(sBlock, blobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i, sidecar := range sCars {
|
||||
verified, err := peerdas.VerifyDataColumnSidecarKZGProofs(sidecar)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified, fmt.Sprintf("sidecar %d failed", i))
|
||||
}
|
||||
}
|
||||
@@ -226,7 +226,7 @@ func TestProcessEpoch_BadBalanceAltair(t *testing.T) {
|
||||
epochParticipation[0] = participation
|
||||
assert.NoError(t, s.SetCurrentParticipationBits(epochParticipation))
|
||||
assert.NoError(t, s.SetPreviousParticipationBits(epochParticipation))
|
||||
_, err = altair.ProcessEpoch(context.Background(), s)
|
||||
err = altair.ProcessEpoch(context.Background(), s)
|
||||
assert.ErrorContains(t, "addition overflows", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -216,7 +216,7 @@ func TestProcessEpoch_BadBalanceBellatrix(t *testing.T) {
|
||||
epochParticipation[0] = participation
|
||||
assert.NoError(t, s.SetCurrentParticipationBits(epochParticipation))
|
||||
assert.NoError(t, s.SetPreviousParticipationBits(epochParticipation))
|
||||
_, err = altair.ProcessEpoch(context.Background(), s)
|
||||
err = altair.ProcessEpoch(context.Background(), s)
|
||||
assert.ErrorContains(t, "addition overflows", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -257,14 +257,12 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot primitives.
|
||||
return nil, errors.Wrap(err, "could not process epoch with optimizations")
|
||||
}
|
||||
} else if state.Version() <= version.Deneb {
|
||||
state, err = altair.ProcessEpoch(ctx, state)
|
||||
if err != nil {
|
||||
if err = altair.ProcessEpoch(ctx, state); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("could not process %s epoch", version.String(state.Version())))
|
||||
}
|
||||
} else {
|
||||
state, err = electra.ProcessEpoch(ctx, state)
|
||||
if err != nil {
|
||||
if err = electra.ProcessEpoch(ctx, state); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("could not process %s epoch", version.String(state.Version())))
|
||||
}
|
||||
|
||||
@@ -227,7 +227,7 @@ func ProcessBlockNoVerifyAnySig(
|
||||
// def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
// # [Modified in Electra:EIP6110]
|
||||
// # Disable former deposit mechanism once all prior deposits are processed
|
||||
// eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_receipts_start_index)
|
||||
// eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_requests_start_index)
|
||||
// if state.eth1_deposit_index < eth1_deposit_index_limit:
|
||||
// assert len(body.deposits) == min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
|
||||
// else:
|
||||
@@ -245,7 +245,7 @@ func ProcessBlockNoVerifyAnySig(
|
||||
// for_ops(body.bls_to_execution_changes, process_bls_to_execution_change)
|
||||
// # [New in Electra:EIP7002:EIP7251]
|
||||
// for_ops(body.execution_payload.withdrawal_requests, process_execution_layer_withdrawal_request)
|
||||
// for_ops(body.execution_payload.deposit_receipts, process_deposit_receipt) # [New in Electra:EIP6110]
|
||||
// for_ops(body.execution_payload.deposit_requests, process_deposit_requests) # [New in Electra:EIP6110]
|
||||
// for_ops(body.consolidations, process_consolidation) # [New in Electra:EIP7251]
|
||||
func ProcessOperationsNoVerifyAttsSigs(
|
||||
ctx context.Context,
|
||||
@@ -401,7 +401,7 @@ func VerifyBlobCommitmentCount(blk interfaces.ReadOnlyBeaconBlock) error {
|
||||
// def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
// # [Modified in Electra:EIP6110]
|
||||
// # Disable former deposit mechanism once all prior deposits are processed
|
||||
// eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_receipts_start_index)
|
||||
// eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_requests_start_index)
|
||||
// if state.eth1_deposit_index < eth1_deposit_index_limit:
|
||||
// assert len(body.deposits) == min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
|
||||
// else:
|
||||
@@ -419,7 +419,7 @@ func VerifyBlobCommitmentCount(blk interfaces.ReadOnlyBeaconBlock) error {
|
||||
// for_ops(body.bls_to_execution_changes, process_bls_to_execution_change)
|
||||
// # [New in Electra:EIP7002:EIP7251]
|
||||
// for_ops(body.execution_payload.withdrawal_requests, process_execution_layer_withdrawal_request)
|
||||
// for_ops(body.execution_payload.deposit_receipts, process_deposit_receipt) # [New in Electra:EIP6110]
|
||||
// for_ops(body.execution_payload.deposit_requests, process_deposit_requests) # [New in Electra:EIP6110]
|
||||
// for_ops(body.consolidations, process_consolidation) # [New in Electra:EIP7251]
|
||||
func electraOperations(
|
||||
ctx context.Context,
|
||||
@@ -445,11 +445,12 @@ func electraOperations(
|
||||
if !ok {
|
||||
return nil, errors.New("could not cast execution data to electra execution data")
|
||||
}
|
||||
st, err = electra.ProcessExecutionLayerWithdrawRequests(ctx, st, exe.WithdrawalRequests())
|
||||
st, err = electra.ProcessWithdrawalRequests(ctx, st, exe.WithdrawalRequests())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution layer withdrawal requests")
|
||||
}
|
||||
st, err = electra.ProcessDepositReceipts(ctx, st, exe.DepositReceipts())
|
||||
|
||||
st, err = electra.ProcessDepositRequests(ctx, st, exe.DepositRequests()) // TODO: EIP-6110 deposit changes.
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposit receipts")
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"availability.go",
|
||||
"availability_columns.go",
|
||||
"cache.go",
|
||||
"iface.go",
|
||||
"mock.go",
|
||||
@@ -20,6 +21,7 @@ go_library(
|
||||
"//runtime/logging:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
|
||||
151
beacon-chain/das/availability_columns.go
Normal file
151
beacon-chain/das/availability_columns.go
Normal file
@@ -0,0 +1,151 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
|
||||
// This implementation will hold any blobs passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreColumn struct {
|
||||
store *filesystem.BlobStorage
|
||||
cache *cache
|
||||
verifier ColumnBatchVerifier
|
||||
nodeID enode.ID
|
||||
}
|
||||
|
||||
type ColumnBatchVerifier interface {
|
||||
VerifiedRODataColumns(ctx context.Context, blk blocks.ROBlock, sc []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
func NewLazilyPersistentStoreColumn(store *filesystem.BlobStorage, verifier ColumnBatchVerifier, id enode.ID) *LazilyPersistentStoreColumn {
|
||||
return &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
cache: newCache(),
|
||||
verifier: verifier,
|
||||
nodeID: id,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Very Ugly, change interface to allow for columns and blobs
|
||||
func (s *LazilyPersistentStoreColumn) Persist(current primitives.Slot, sc ...blocks.ROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PersistColumns adds columns to the working column cache. columns stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreColumn) PersistColumns(current primitives.Slot, sc ...blocks.RODataColumn) error {
|
||||
if len(sc) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(sc) > 1 {
|
||||
first := sc[0].BlockRoot()
|
||||
for i := 1; i < len(sc); i++ {
|
||||
if first != sc[i].BlockRoot() {
|
||||
return errMixedRoots
|
||||
}
|
||||
}
|
||||
}
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(sc[0].Slot()), slots.ToEpoch(current)) {
|
||||
return nil
|
||||
}
|
||||
key := keyFromColumn(sc[0])
|
||||
entry := s.cache.ensure(key)
|
||||
for i := range sc {
|
||||
if err := entry.stashColumns(&sc[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// BlobSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
blockCommitments, err := fullCommitmentsToCheck(b, current)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could check data availability for block %#x", b.Root())
|
||||
}
|
||||
// Return early for blocks that are pre-deneb or which do not have any commitments.
|
||||
if blockCommitments.count() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
key := keyFromBlock(b)
|
||||
entry := s.cache.ensure(key)
|
||||
defer s.cache.delete(key)
|
||||
root := b.Root()
|
||||
sumz, err := s.store.WaitForSummarizer(ctx)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", b.Root())).
|
||||
WithError(err).
|
||||
Debug("Failed to receive BlobStorageSummarizer within IsDataAvailable")
|
||||
} else {
|
||||
entry.setDiskSummary(sumz.Summary(root))
|
||||
}
|
||||
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
// ignore their response and decrease their peer score.
|
||||
sidecars, err := entry.filterColumns(root, blockCommitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "incomplete BlobSidecar batch")
|
||||
}
|
||||
// Do thorough verifications of each BlobSidecar for the block.
|
||||
// Same as above, we don't save BlobSidecars if there are any problems with the batch.
|
||||
vscs, err := s.verifier.VerifiedRODataColumns(ctx, b, sidecars)
|
||||
if err != nil {
|
||||
var me verification.VerificationMultiError
|
||||
ok := errors.As(err, &me)
|
||||
if ok {
|
||||
fails := me.Failures()
|
||||
lf := make(log.Fields, len(fails))
|
||||
for i := range fails {
|
||||
lf[fmt.Sprintf("fail_%d", i)] = fails[i].Error()
|
||||
}
|
||||
log.WithFields(lf).
|
||||
Debug("invalid ColumnSidecars received")
|
||||
}
|
||||
return errors.Wrapf(err, "invalid ColumnSidecars received for block %#x", root)
|
||||
}
|
||||
// Ensure that each column sidecar is written to disk.
|
||||
for i := range vscs {
|
||||
if err := s.store.SaveDataColumn(vscs[i]); err != nil {
|
||||
return errors.Wrapf(err, "failed to save ColumnSidecar index %d for block %#x", vscs[i].ColumnIndex, root)
|
||||
}
|
||||
}
|
||||
// All ColumnSidecars are persisted - da check succeeds.
|
||||
return nil
|
||||
}
|
||||
|
||||
func fullCommitmentsToCheck(b blocks.ROBlock, current primitives.Slot) (safeCommitmentsArray, error) {
|
||||
var ar safeCommitmentsArray
|
||||
if b.Version() < version.Deneb {
|
||||
return ar, nil
|
||||
}
|
||||
// We are only required to check within MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(current)) {
|
||||
return ar, nil
|
||||
}
|
||||
kc, err := b.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return ar, err
|
||||
}
|
||||
for i := range ar {
|
||||
copy(ar[i], kc)
|
||||
}
|
||||
return ar, nil
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package das
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
@@ -38,6 +39,10 @@ func keyFromSidecar(sc blocks.ROBlob) cacheKey {
|
||||
return cacheKey{slot: sc.Slot(), root: sc.BlockRoot()}
|
||||
}
|
||||
|
||||
func keyFromColumn(sc blocks.RODataColumn) cacheKey {
|
||||
return cacheKey{slot: sc.Slot(), root: sc.BlockRoot()}
|
||||
}
|
||||
|
||||
// keyFromBlock is a convenience method for constructing a cacheKey from a ROBlock value.
|
||||
func keyFromBlock(b blocks.ROBlock) cacheKey {
|
||||
return cacheKey{slot: b.Block().Slot(), root: b.Root()}
|
||||
@@ -61,6 +66,7 @@ func (c *cache) delete(key cacheKey) {
|
||||
// cacheEntry holds a fixed-length cache of BlobSidecars.
|
||||
type cacheEntry struct {
|
||||
scs [fieldparams.MaxBlobsPerBlock]*blocks.ROBlob
|
||||
colScs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
diskSummary filesystem.BlobStorageSummary
|
||||
}
|
||||
|
||||
@@ -82,6 +88,17 @@ func (e *cacheEntry) stash(sc *blocks.ROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *cacheEntry) stashColumns(sc *blocks.RODataColumn) error {
|
||||
if sc.ColumnIndex >= fieldparams.NumberOfColumns {
|
||||
return errors.Wrapf(errIndexOutOfBounds, "index=%d", sc.ColumnIndex)
|
||||
}
|
||||
if e.colScs[sc.ColumnIndex] != nil {
|
||||
return errors.Wrapf(ErrDuplicateSidecar, "root=%#x, index=%d, commitment=%#x", sc.BlockRoot(), sc.ColumnIndex, sc.KzgCommitments)
|
||||
}
|
||||
e.colScs[sc.ColumnIndex] = sc
|
||||
return nil
|
||||
}
|
||||
|
||||
// filter evicts sidecars that are not committed to by the block and returns custom
|
||||
// errors if the cache is missing any of the commitments, or if the commitments in
|
||||
// the cache do not match those found in the block. If err is nil, then all expected
|
||||
@@ -117,6 +134,35 @@ func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROB
|
||||
return scs, nil
|
||||
}
|
||||
|
||||
func (e *cacheEntry) filterColumns(root [32]byte, kc safeCommitmentsArray) ([]blocks.RODataColumn, error) {
|
||||
if e.diskSummary.AllAvailable(kc.count()) {
|
||||
return nil, nil
|
||||
}
|
||||
scs := make([]blocks.RODataColumn, 0, kc.count())
|
||||
for i := uint64(0); i < fieldparams.NumberOfColumns; i++ {
|
||||
// We already have this blob, we don't need to write it or validate it.
|
||||
if e.diskSummary.HasIndex(i) {
|
||||
continue
|
||||
}
|
||||
if kc[i] == nil {
|
||||
if e.colScs[i] != nil {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, no block commitment", root, i, e.scs[i].KzgCommitment)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if e.colScs[i] == nil {
|
||||
return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", root, i)
|
||||
}
|
||||
if !reflect.DeepEqual(kc[i], e.colScs[i].KzgCommitments) {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.colScs[i].KzgCommitments, kc[i])
|
||||
}
|
||||
scs = append(scs, *e.colScs[i])
|
||||
}
|
||||
|
||||
return scs, nil
|
||||
}
|
||||
|
||||
// safeCommitmentArray is a fixed size array of commitment byte slices. This is helpful for avoiding
|
||||
// gratuitous bounds checks.
|
||||
type safeCommitmentArray [fieldparams.MaxBlobsPerBlock][]byte
|
||||
@@ -129,3 +175,14 @@ func (s safeCommitmentArray) count() int {
|
||||
}
|
||||
return fieldparams.MaxBlobsPerBlock
|
||||
}
|
||||
|
||||
type safeCommitmentsArray [fieldparams.NumberOfColumns][][]byte
|
||||
|
||||
func (s safeCommitmentsArray) count() int {
|
||||
for i := range s {
|
||||
if s[i] == nil {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return fieldparams.NumberOfColumns
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -39,8 +40,15 @@ const (
|
||||
directoryPermissions = 0700
|
||||
)
|
||||
|
||||
// BlobStorageOption is a functional option for configuring a BlobStorage.
|
||||
type BlobStorageOption func(*BlobStorage) error
|
||||
type (
|
||||
// BlobStorageOption is a functional option for configuring a BlobStorage.
|
||||
BlobStorageOption func(*BlobStorage) error
|
||||
|
||||
RootIndexPair struct {
|
||||
Root [fieldparams.RootLength]byte
|
||||
Index uint64
|
||||
}
|
||||
)
|
||||
|
||||
// WithBasePath is a required option that sets the base path of blob storage.
|
||||
func WithBasePath(base string) BlobStorageOption {
|
||||
@@ -70,7 +78,10 @@ func WithSaveFsync(fsync bool) BlobStorageOption {
|
||||
// attempt to hold a file lock to guarantee exclusive control of the blob storage directory, so this should only be
|
||||
// initialized once per beacon node.
|
||||
func NewBlobStorage(opts ...BlobStorageOption) (*BlobStorage, error) {
|
||||
b := &BlobStorage{}
|
||||
b := &BlobStorage{
|
||||
DataColumnFeed: new(event.Feed),
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
if err := o(b); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create blob storage")
|
||||
@@ -99,6 +110,7 @@ type BlobStorage struct {
|
||||
fsync bool
|
||||
fs afero.Fs
|
||||
pruner *blobPruner
|
||||
DataColumnFeed *event.Feed
|
||||
}
|
||||
|
||||
// WarmCache runs the prune routine with an expiration of slot of 0, so nothing will be pruned, but the pruner's cache
|
||||
@@ -221,6 +233,110 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveDataColumn saves a data column to our local filesystem.
|
||||
func (bs *BlobStorage) SaveDataColumn(column blocks.VerifiedRODataColumn) error {
|
||||
startTime := time.Now()
|
||||
fname := namerForDataColumn(column)
|
||||
sszPath := fname.path()
|
||||
exists, err := afero.Exists(bs.fs, sszPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if exists {
|
||||
log.Trace("Ignoring a duplicate data column sidecar save attempt")
|
||||
return nil
|
||||
}
|
||||
|
||||
if bs.pruner != nil {
|
||||
hRoot, err := column.SignedBlockHeader.Header.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := bs.pruner.notify(hRoot, column.SignedBlockHeader.Header.Slot, column.ColumnIndex); err != nil {
|
||||
return errors.Wrapf(err, "problem maintaining pruning cache/metrics for sidecar with root=%#x", hRoot)
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize the ethpb.DataColumnSidecar to binary data using SSZ.
|
||||
sidecarData, err := column.MarshalSSZ()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to serialize sidecar data")
|
||||
} else if len(sidecarData) == 0 {
|
||||
return errSidecarEmptySSZData
|
||||
}
|
||||
|
||||
if err := bs.fs.MkdirAll(fname.dir(), directoryPermissions); err != nil {
|
||||
return err
|
||||
}
|
||||
partPath := fname.partPath(fmt.Sprintf("%p", sidecarData))
|
||||
|
||||
partialMoved := false
|
||||
// Ensure the partial file is deleted.
|
||||
defer func() {
|
||||
if partialMoved {
|
||||
return
|
||||
}
|
||||
// It's expected to error if the save is successful.
|
||||
err = bs.fs.Remove(partPath)
|
||||
if err == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"partPath": partPath,
|
||||
}).Debugf("Removed partial file")
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a partial file and write the serialized data to it.
|
||||
partialFile, err := bs.fs.Create(partPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create partial file")
|
||||
}
|
||||
|
||||
n, err := partialFile.Write(sidecarData)
|
||||
if err != nil {
|
||||
closeErr := partialFile.Close()
|
||||
if closeErr != nil {
|
||||
return closeErr
|
||||
}
|
||||
return errors.Wrap(err, "failed to write to partial file")
|
||||
}
|
||||
if bs.fsync {
|
||||
if err := partialFile.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := partialFile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n != len(sidecarData) {
|
||||
return fmt.Errorf("failed to write the full bytes of sidecarData, wrote only %d of %d bytes", n, len(sidecarData))
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
return errEmptyBlobWritten
|
||||
}
|
||||
|
||||
// Atomically rename the partial file to its final name.
|
||||
err = bs.fs.Rename(partPath, sszPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to rename partial file to final name")
|
||||
}
|
||||
partialMoved = true
|
||||
|
||||
// Notify the data column notifier that a new data column has been saved.
|
||||
bs.DataColumnFeed.Send(RootIndexPair{
|
||||
Root: column.BlockRoot(),
|
||||
Index: column.ColumnIndex,
|
||||
})
|
||||
|
||||
// TODO: Use new metrics for data columns
|
||||
blobsWrittenCounter.Inc()
|
||||
blobSaveLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves a single BlobSidecar by its root and index.
|
||||
// Since BlobStorage only writes blobs that have undergone full verification, the return
|
||||
// value is always a VerifiedROBlob.
|
||||
@@ -246,6 +362,20 @@ func (bs *BlobStorage) Get(root [32]byte, idx uint64) (blocks.VerifiedROBlob, er
|
||||
return verification.BlobSidecarNoop(ro)
|
||||
}
|
||||
|
||||
// GetColumn retrieves a single DataColumnSidecar by its root and index.
|
||||
func (bs *BlobStorage) GetColumn(root [32]byte, idx uint64) (*ethpb.DataColumnSidecar, error) {
|
||||
expected := blobNamer{root: root, index: idx}
|
||||
encoded, err := afero.ReadFile(bs.fs, expected.path())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := ðpb.DataColumnSidecar{}
|
||||
if err := s.UnmarshalSSZ(encoded); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Remove removes all blobs for a given root.
|
||||
func (bs *BlobStorage) Remove(root [32]byte) error {
|
||||
rootDir := blobNamer{root: root}.dir()
|
||||
@@ -289,6 +419,61 @@ func (bs *BlobStorage) Indices(root [32]byte) ([fieldparams.MaxBlobsPerBlock]boo
|
||||
return mask, nil
|
||||
}
|
||||
|
||||
// ColumnIndices retrieve the stored column indexes from our filesystem.
|
||||
func (bs *BlobStorage) ColumnIndices(root [32]byte) (map[uint64]bool, error) {
|
||||
custody := make(map[uint64]bool, fieldparams.NumberOfColumns)
|
||||
|
||||
// Get all the files in the directory.
|
||||
rootDir := blobNamer{root: root}.dir()
|
||||
entries, err := afero.ReadDir(bs.fs, rootDir)
|
||||
if err != nil {
|
||||
// If the directory does not exist, we do not custody any columns.
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "read directory")
|
||||
}
|
||||
|
||||
// Iterate over all the entries in the directory.
|
||||
for _, entry := range entries {
|
||||
// If the entry is a directory, skip it.
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
// If the entry does not have the correct extension, skip it.
|
||||
name := entry.Name()
|
||||
if !strings.HasSuffix(name, sszExt) {
|
||||
continue
|
||||
}
|
||||
|
||||
// The file should be in the `<index>.<extension>` format.
|
||||
// Skip the file if it does not match the format.
|
||||
parts := strings.Split(name, ".")
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the column index from the file name.
|
||||
columnIndexStr := parts[0]
|
||||
columnIndex, err := strconv.ParseUint(columnIndexStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unexpected directory entry breaks listing, %s", parts[0])
|
||||
}
|
||||
|
||||
// If the column index is out of bounds, return an error.
|
||||
if columnIndex >= fieldparams.NumberOfColumns {
|
||||
return nil, errors.Wrapf(errIndexOutOfBounds, "invalid index %d", columnIndex)
|
||||
}
|
||||
|
||||
// Mark the column index as in custody.
|
||||
custody[columnIndex] = true
|
||||
}
|
||||
|
||||
return custody, nil
|
||||
}
|
||||
|
||||
// Clear deletes all files on the filesystem.
|
||||
func (bs *BlobStorage) Clear() error {
|
||||
dirs, err := listDir(bs.fs, ".")
|
||||
@@ -321,6 +506,10 @@ func namerForSidecar(sc blocks.VerifiedROBlob) blobNamer {
|
||||
return blobNamer{root: sc.BlockRoot(), index: sc.Index}
|
||||
}
|
||||
|
||||
func namerForDataColumn(col blocks.VerifiedRODataColumn) blobNamer {
|
||||
return blobNamer{root: col.BlockRoot(), index: col.ColumnIndex}
|
||||
}
|
||||
|
||||
func (p blobNamer) dir() string {
|
||||
return rootString(p.root)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
// blobIndexMask is a bitmask representing the set of blob indices that are currently set.
|
||||
type blobIndexMask [fieldparams.MaxBlobsPerBlock]bool
|
||||
type blobIndexMask [fieldparams.NumberOfColumns]bool
|
||||
|
||||
// BlobStorageSummary represents cached information about the BlobSidecars on disk for each root the cache knows about.
|
||||
type BlobStorageSummary struct {
|
||||
@@ -68,9 +68,12 @@ func (s *blobStorageCache) Summary(root [32]byte) BlobStorageSummary {
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) ensure(key [32]byte, slot primitives.Slot, idx uint64) error {
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return errIndexOutOfBounds
|
||||
}
|
||||
// TODO: Separate blob index checks from data column index checks
|
||||
/*
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return errIndexOutOfBounds
|
||||
}
|
||||
*/
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
v := s.cache[key]
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func TestSlotByRoot_Summary(t *testing.T) {
|
||||
t.Skip("Use new test for data columns")
|
||||
var noneSet, allSet, firstSet, lastSet, oneSet blobIndexMask
|
||||
firstSet[0] = true
|
||||
lastSet[len(lastSet)-1] = true
|
||||
|
||||
@@ -92,14 +92,16 @@ func windowMin(latest, offset primitives.Slot) primitives.Slot {
|
||||
|
||||
func (p *blobPruner) warmCache() error {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
defer func() {
|
||||
if !p.warmed {
|
||||
p.warmed = true
|
||||
close(p.cacheReady)
|
||||
}
|
||||
p.Unlock()
|
||||
}()
|
||||
if err := p.prune(0); err != nil {
|
||||
return err
|
||||
}
|
||||
if !p.warmed {
|
||||
p.warmed = true
|
||||
close(p.cacheReady)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -2,16 +2,19 @@ package filesystem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/spf13/afero"
|
||||
@@ -34,6 +37,34 @@ func TestTryPruneDir_CachedNotExpired(t *testing.T) {
|
||||
require.Equal(t, 0, pruned)
|
||||
}
|
||||
|
||||
func TestCacheWarmFail(t *testing.T) {
|
||||
fs := afero.NewMemMapFs()
|
||||
n := blobNamer{root: bytesutil.ToBytes32([]byte("derp")), index: 0}
|
||||
bp := n.path()
|
||||
mkdir := path.Dir(bp)
|
||||
require.NoError(t, fs.MkdirAll(mkdir, directoryPermissions))
|
||||
|
||||
// Create an empty blob index in the fs by touching the file at a seemingly valid path.
|
||||
fi, err := fs.Create(bp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fi.Close())
|
||||
|
||||
// Cache warm should fail due to the unexpected EOF.
|
||||
pr, err := newBlobPruner(fs, 0)
|
||||
require.NoError(t, err)
|
||||
require.ErrorIs(t, pr.warmCache(), errPruningFailures)
|
||||
|
||||
// The cache warm has finished, so calling waitForCache with a super short deadline
|
||||
// should not block or hit the context deadline.
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithDeadline(ctx, time.Now().Add(1*time.Millisecond))
|
||||
defer cancel()
|
||||
c, err := pr.waitForCache(ctx)
|
||||
// We will get an error and a nil value for the cache if we hit the deadline.
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, c)
|
||||
}
|
||||
|
||||
func TestTryPruneDir_CachedExpired(t *testing.T) {
|
||||
t.Run("empty directory", func(t *testing.T) {
|
||||
fs := afero.NewMemMapFs()
|
||||
|
||||
@@ -23,10 +23,10 @@ import (
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// used to represent errors for inconsistent slot ranges.
|
||||
// Used to represent errors for inconsistent slot ranges.
|
||||
var errInvalidSlotRange = errors.New("invalid end slot and start slot provided")
|
||||
|
||||
// Block retrieval by root.
|
||||
// Block retrieval by root. Return nil if block is not found.
|
||||
func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.Block")
|
||||
defer span.End()
|
||||
|
||||
@@ -149,7 +149,7 @@ func TestState_CanSaveRetrieve(t *testing.T) {
|
||||
BlockHash: make([]byte, 32),
|
||||
TransactionsRoot: make([]byte, 32),
|
||||
WithdrawalsRoot: make([]byte, 32),
|
||||
DepositReceiptsRoot: make([]byte, 32),
|
||||
DepositRequestsRoot: make([]byte, 32),
|
||||
WithdrawalRequestsRoot: make([]byte, 32),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -631,7 +631,7 @@ func fullPayloadFromPayloadBody(
|
||||
Withdrawals: body.Withdrawals,
|
||||
ExcessBlobGas: ebg,
|
||||
BlobGasUsed: bgu,
|
||||
DepositReceipts: dr,
|
||||
DepositRequests: dr,
|
||||
WithdrawalRequests: wr,
|
||||
}) // We can't get the block value and don't care about the block value for this instance
|
||||
default:
|
||||
@@ -780,8 +780,8 @@ func buildEmptyExecutionPayload(v int) (proto.Message, error) {
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*pb.Withdrawal, 0),
|
||||
WithdrawalRequests: make([]*pb.ExecutionLayerWithdrawalRequest, 0),
|
||||
DepositReceipts: make([]*pb.DepositReceipt, 0),
|
||||
WithdrawalRequests: make([]*pb.WithdrawalRequest, 0),
|
||||
DepositRequests: make([]*pb.DepositRequest, 0),
|
||||
}, nil
|
||||
default:
|
||||
return nil, errors.Wrapf(ErrUnsupportedVersion, "version=%s", version.String(v))
|
||||
|
||||
@@ -1559,7 +1559,7 @@ func fixturesStruct() *payloadFixtures {
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
BlobGasUsed: 2,
|
||||
ExcessBlobGas: 3,
|
||||
DepositReceipts: dr,
|
||||
DepositRequests: dr,
|
||||
WithdrawalRequests: wr,
|
||||
}
|
||||
hexUint := hexutil.Uint64(1)
|
||||
|
||||
@@ -66,7 +66,7 @@ func payloadToBody(t *testing.T, ed interfaces.ExecutionData) *pb.ExecutionPaylo
|
||||
}
|
||||
eed, isElectra := ed.(interfaces.ExecutionDataElectra)
|
||||
if isElectra {
|
||||
body.DepositRequests = pb.ProtoDepositRequestsToJson(eed.DepositReceipts())
|
||||
body.DepositRequests = pb.ProtoDepositRequestsToJson(eed.DepositRequests())
|
||||
body.WithdrawalRequests = pb.ProtoWithdrawalRequestsToJson(eed.WithdrawalRequests())
|
||||
}
|
||||
return body
|
||||
|
||||
@@ -16,7 +16,7 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//api/gateway:go_default_library",
|
||||
"//api/server:go_default_library",
|
||||
"//api/server/middleware:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/pkg/errors"
|
||||
apigateway "github.com/prysmaticlabs/prysm/v5/api/gateway"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/middleware"
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/builder"
|
||||
@@ -120,7 +120,6 @@ type BeaconNode struct {
|
||||
initialSyncComplete chan struct{}
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
BlobStorageOptions []filesystem.BlobStorageOption
|
||||
blobRetentionEpochs primitives.Epoch
|
||||
verifyInitWaiter *verification.InitializerWaiter
|
||||
syncChecker *initialsync.SyncChecker
|
||||
}
|
||||
@@ -409,8 +408,8 @@ func newRouter(cliCtx *cli.Context) *mux.Router {
|
||||
allowedOrigins = strings.Split(flags.GPRCGatewayCorsDomain.Value, ",")
|
||||
}
|
||||
r := mux.NewRouter()
|
||||
r.Use(server.NormalizeQueryValuesHandler)
|
||||
r.Use(server.CorsHandler(allowedOrigins))
|
||||
r.Use(middleware.NormalizeQueryValuesHandler)
|
||||
r.Use(middleware.CorsHandler(allowedOrigins))
|
||||
return r
|
||||
}
|
||||
|
||||
@@ -965,9 +964,8 @@ func (b *BeaconNode) registerRPCService(router *mux.Router) error {
|
||||
cert := b.cliCtx.String(flags.CertFlag.Name)
|
||||
key := b.cliCtx.String(flags.KeyFlag.Name)
|
||||
mockEth1DataVotes := b.cliCtx.Bool(flags.InteropMockEth1DataVotesFlag.Name)
|
||||
|
||||
maxMsgSize := b.cliCtx.Int(cmd.GrpcMaxCallRecvMsgSizeFlag.Name)
|
||||
enableDebugRPCEndpoints := b.cliCtx.Bool(flags.EnableDebugRPCEndpoints.Name)
|
||||
enableDebugRPCEndpoints := !b.cliCtx.Bool(flags.DisableDebugRPCEndpoints.Name)
|
||||
|
||||
p2pService := b.fetchP2P()
|
||||
rpcService := rpc.NewService(b.ctx, &rpc.Config{
|
||||
@@ -992,6 +990,7 @@ func (b *BeaconNode) registerRPCService(router *mux.Router) error {
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
BlobReceiver: chainService,
|
||||
DataColumnReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
@@ -1056,11 +1055,10 @@ func (b *BeaconNode) registerGRPCGateway(router *mux.Router) error {
|
||||
gatewayPort := b.cliCtx.Int(flags.GRPCGatewayPort.Name)
|
||||
rpcHost := b.cliCtx.String(flags.RPCHost.Name)
|
||||
rpcPort := b.cliCtx.Int(flags.RPCPort.Name)
|
||||
|
||||
enableDebugRPCEndpoints := !b.cliCtx.Bool(flags.DisableDebugRPCEndpoints.Name)
|
||||
selfAddress := net.JoinHostPort(rpcHost, strconv.Itoa(rpcPort))
|
||||
gatewayAddress := net.JoinHostPort(gatewayHost, strconv.Itoa(gatewayPort))
|
||||
allowedOrigins := strings.Split(b.cliCtx.String(flags.GPRCGatewayCorsDomain.Name), ",")
|
||||
enableDebugRPCEndpoints := b.cliCtx.Bool(flags.EnableDebugRPCEndpoints.Name)
|
||||
selfCert := b.cliCtx.String(flags.CertFlag.Name)
|
||||
maxCallSize := b.cliCtx.Uint64(cmd.GrpcMaxCallRecvMsgSizeFlag.Name)
|
||||
httpModules := b.cliCtx.String(flags.HTTPModules.Name)
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/builder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// Option for beacon node configuration.
|
||||
@@ -51,11 +50,3 @@ func WithBlobStorageOptions(opt ...filesystem.BlobStorageOption) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithBlobRetentionEpochs sets the blobRetentionEpochs value, used in kv store initialization.
|
||||
func WithBlobRetentionEpochs(e primitives.Epoch) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.blobRetentionEpochs = e
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,9 +17,9 @@ import (
|
||||
func validAttesterSlashingForValIdx(t *testing.T, beaconState state.BeaconState, privs []bls.SecretKey, valIdx ...uint64) *ethpb.AttesterSlashing {
|
||||
var slashings []*ethpb.AttesterSlashing
|
||||
for _, idx := range valIdx {
|
||||
slashing, err := util.GenerateAttesterSlashingForValidator(beaconState, privs[idx], primitives.ValidatorIndex(idx))
|
||||
generatedSlashing, err := util.GenerateAttesterSlashingForValidator(beaconState, privs[idx], primitives.ValidatorIndex(idx))
|
||||
require.NoError(t, err)
|
||||
slashings = append(slashings, slashing)
|
||||
slashings = append(slashings, generatedSlashing.(*ethpb.AttesterSlashing))
|
||||
}
|
||||
var allSig1 []bls.Signature
|
||||
var allSig2 []bls.Signature
|
||||
@@ -78,12 +78,14 @@ func TestPool_InsertAttesterSlashing(t *testing.T) {
|
||||
pendingSlashings := make([]*PendingAttesterSlashing, 20)
|
||||
slashings := make([]*ethpb.AttesterSlashing, 20)
|
||||
for i := 0; i < len(pendingSlashings); i++ {
|
||||
sl, err := util.GenerateAttesterSlashingForValidator(beaconState, privKeys[i], primitives.ValidatorIndex(i))
|
||||
generatedSl, err := util.GenerateAttesterSlashingForValidator(beaconState, privKeys[i], primitives.ValidatorIndex(i))
|
||||
require.NoError(t, err)
|
||||
pendingSlashings[i] = &PendingAttesterSlashing{
|
||||
attesterSlashing: sl,
|
||||
attesterSlashing: generatedSl,
|
||||
validatorToSlash: primitives.ValidatorIndex(i),
|
||||
}
|
||||
sl, ok := generatedSl.(*ethpb.AttesterSlashing)
|
||||
require.Equal(t, true, ok, "Attester slashing has the wrong type (expected %T, got %T)", ðpb.AttesterSlashing{}, generatedSl)
|
||||
slashings[i] = sl
|
||||
}
|
||||
require.NoError(t, beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch))
|
||||
@@ -303,12 +305,16 @@ func TestPool_InsertAttesterSlashing_SigFailsVerify_ClearPool(t *testing.T) {
|
||||
pendingSlashings := make([]*PendingAttesterSlashing, 2)
|
||||
slashings := make([]*ethpb.AttesterSlashing, 2)
|
||||
for i := 0; i < 2; i++ {
|
||||
sl, err := util.GenerateAttesterSlashingForValidator(beaconState, privKeys[i], primitives.ValidatorIndex(i))
|
||||
generatedSl, err := util.GenerateAttesterSlashingForValidator(beaconState, privKeys[i], primitives.ValidatorIndex(i))
|
||||
require.NoError(t, err)
|
||||
pendingSlashings[i] = &PendingAttesterSlashing{
|
||||
attesterSlashing: sl,
|
||||
attesterSlashing: generatedSl,
|
||||
validatorToSlash: primitives.ValidatorIndex(i),
|
||||
}
|
||||
sl, ok := generatedSl.(*ethpb.AttesterSlashing)
|
||||
if !ok {
|
||||
require.Equal(t, true, ok, "Attester slashing has the wrong type (expected %T, got %T)", ðpb.AttesterSlashing{}, generatedSl)
|
||||
}
|
||||
slashings[i] = sl
|
||||
}
|
||||
// We mess up the signature of the second slashing.
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"broadcaster.go",
|
||||
"config.go",
|
||||
"connection_gater.go",
|
||||
"custody.go",
|
||||
"dial_relay_node.go",
|
||||
"discovery.go",
|
||||
"doc.go",
|
||||
@@ -46,6 +47,7 @@ go_library(
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
@@ -56,6 +58,7 @@ go_library(
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
@@ -74,6 +77,8 @@ go_library(
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_btcsuite_btcd_btcec_v2//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
@@ -136,9 +141,11 @@ go_test(
|
||||
flaky = True,
|
||||
tags = ["requires-network"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
@@ -149,8 +156,10 @@ go_test(
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//container/leaky-bucket:go_default_library",
|
||||
@@ -168,6 +177,7 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
|
||||
@@ -9,16 +9,18 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// ErrMessageNotMapped occurs on a Broadcast attempt when a message has not been defined in the
|
||||
@@ -96,7 +98,12 @@ func (s *Service) BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint64, att ethpb.Att, forkDigest [4]byte) {
|
||||
func (s *Service) internalBroadcastAttestation(
|
||||
ctx context.Context,
|
||||
subnet uint64,
|
||||
att ethpb.Att,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastAttestation")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -142,7 +149,7 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
|
||||
log.WithFields(logrus.Fields{
|
||||
"attestationSlot": att.GetData().Slot,
|
||||
"currentSlot": currSlot,
|
||||
}).WithError(err).Warning("Attestation is too old to broadcast, discarding it")
|
||||
}).WithError(err).Debug("Attestation is too old to broadcast, discarding it")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -152,7 +159,7 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [4]byte) {
|
||||
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [fieldparams.VersionLength]byte) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.broadcastSyncCommittee")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -228,7 +235,12 @@ func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [4]byte) {
|
||||
func (s *Service) internalBroadcastBlob(
|
||||
ctx context.Context,
|
||||
subnet uint64,
|
||||
blobSidecar *ethpb.BlobSidecar,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastBlob")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -243,7 +255,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
s.subnetLocker(wrappedSubIdx).RUnlock()
|
||||
|
||||
if !hasPeer {
|
||||
blobSidecarCommitteeBroadcastAttempts.Inc()
|
||||
blobSidecarBroadcastAttempts.Inc()
|
||||
if err := func() error {
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
@@ -252,7 +264,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
blobSidecarCommitteeBroadcasts.Inc()
|
||||
blobSidecarBroadcasts.Inc()
|
||||
return nil
|
||||
}
|
||||
return errors.New("failed to find peers for subnet")
|
||||
@@ -268,6 +280,99 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
}
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork and to the input column subnet.
|
||||
// TODO: Add tests
|
||||
func (s *Service) BroadcastDataColumn(ctx context.Context, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error {
|
||||
// Add tracing to the function.
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.BroadcastBlob")
|
||||
defer span.End()
|
||||
|
||||
// Ensure the data column sidecar is not nil.
|
||||
if dataColumnSidecar == nil {
|
||||
return errors.Errorf("attempted to broadcast nil data column sidecar at subnet %d", columnSubnet)
|
||||
}
|
||||
|
||||
// Retrieve the current fork digest.
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "current fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a column subnet peer if none available.
|
||||
go s.internalBroadcastDataColumn(ctx, columnSubnet, dataColumnSidecar, forkDigest)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastDataColumn(
|
||||
ctx context.Context,
|
||||
columnSubnet uint64,
|
||||
dataColumnSidecar *ethpb.DataColumnSidecar,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
// Add tracing to the function.
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumn")
|
||||
defer span.End()
|
||||
|
||||
// Increase the number of broadcast attempts.
|
||||
dataColumnSidecarBroadcastAttempts.Inc()
|
||||
|
||||
// Clear parent context / deadline.
|
||||
ctx = trace.NewContext(context.Background(), span)
|
||||
|
||||
// Define a one-slot length context timeout.
|
||||
oneSlot := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
ctx, cancel := context.WithTimeout(ctx, oneSlot)
|
||||
defer cancel()
|
||||
|
||||
// Build the topic corresponding to this column subnet and this fork digest.
|
||||
topic := dataColumnSubnetToTopic(columnSubnet, forkDigest)
|
||||
|
||||
// Compute the wrapped subnet index.
|
||||
wrappedSubIdx := columnSubnet + dataColumnSubnetVal
|
||||
|
||||
// Check if we have peers with this subnet.
|
||||
hasPeer := func() bool {
|
||||
s.subnetLocker(wrappedSubIdx).RLock()
|
||||
defer s.subnetLocker(wrappedSubIdx).RUnlock()
|
||||
|
||||
return s.hasPeerWithSubnet(topic)
|
||||
}()
|
||||
|
||||
// If no peers are found, attempt to find peers with this subnet.
|
||||
if !hasPeer {
|
||||
if err := func() error {
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
|
||||
ok, err := s.FindPeersWithSubnet(ctx, topic, columnSubnet, 1 /*threshold*/)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "find peers for subnet")
|
||||
}
|
||||
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
return errors.New("failed to find peers for subnet")
|
||||
}(); err != nil {
|
||||
log.WithError(err).Error("Failed to find peers")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Broadcast the data column sidecar to the network.
|
||||
if err := s.broadcastObject(ctx, dataColumnSidecar, topic); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast data column sidecar")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
|
||||
// Increase the number of successful broadcasts.
|
||||
dataColumnSidecarBroadcasts.Inc()
|
||||
}
|
||||
|
||||
// method to broadcast messages to other peers in our gossip mesh.
|
||||
func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic string) error {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.broadcastObject")
|
||||
@@ -297,14 +402,18 @@ func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic
|
||||
return nil
|
||||
}
|
||||
|
||||
func attestationToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
func attestationToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(AttestationSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func syncCommitteeToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
func syncCommitteeToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func blobSubnetToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
func blobSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(BlobSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func dataColumnSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(DataColumnSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
@@ -9,15 +9,21 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
cKzg4844 "github.com/ethereum/c-kzg-4844/bindings/go"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -25,7 +31,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestService_Broadcast(t *testing.T) {
|
||||
@@ -520,3 +525,70 @@ func TestService_BroadcastBlob(t *testing.T) {
|
||||
require.NoError(t, p.BroadcastBlob(ctx, subnet, blobSidecar))
|
||||
require.Equal(t, false, util.WaitTimeout(&wg, 1*time.Second), "Failed to receive pubsub within 1s")
|
||||
}
|
||||
|
||||
func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
require.NoError(t, kzg.Start())
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
require.NotEqual(t, 0, len(p1.BHost.Network().Peers()), "No peers")
|
||||
|
||||
p := &Service{
|
||||
host: p1.BHost,
|
||||
pubsub: p1.PubSub(),
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
cfg: &Config{},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
subnetsLockLock: sync.Mutex{},
|
||||
peers: peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
}
|
||||
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockElectra())
|
||||
require.NoError(t, err)
|
||||
blobs := make([]cKzg4844.Blob, fieldparams.MaxBlobsPerBlock)
|
||||
sidecars, err := peerdas.DataColumnSidecars(b, blobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
sidecar := sidecars[0]
|
||||
subnet := uint64(0)
|
||||
topic := DataColumnSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(sidecar)] = topic
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest, subnet)
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func(tt *testing.T) {
|
||||
defer wg.Done()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
msg, err := sub.Next(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := ðpb.DataColumnSidecar{}
|
||||
require.NoError(t, p.Encoding().DecodeGossip(msg.Data, result))
|
||||
require.DeepEqual(t, result, sidecar)
|
||||
}(t)
|
||||
|
||||
// Attempt to broadcast nil object should fail.
|
||||
ctx := context.Background()
|
||||
require.ErrorContains(t, "attempted to broadcast nil", p.BroadcastDataColumn(ctx, subnet, nil))
|
||||
|
||||
// Broadcast to peers and wait.
|
||||
require.NoError(t, p.BroadcastDataColumn(ctx, subnet, sidecar))
|
||||
require.Equal(t, false, util.WaitTimeout(&wg, 1*time.Second), "Failed to receive pubsub within 1s")
|
||||
}
|
||||
|
||||
81
beacon-chain/p2p/custody.go
Normal file
81
beacon-chain/p2p/custody.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
func (s *Service) GetValidCustodyPeers(peers []peer.ID) ([]peer.ID, error) {
|
||||
custodiedSubnetCount := params.BeaconConfig().CustodyRequirement
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
custodiedSubnetCount = params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
}
|
||||
custodiedColumns, err := peerdas.CustodyColumns(s.NodeID(), custodiedSubnetCount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var validPeers []peer.ID
|
||||
for _, pid := range peers {
|
||||
remoteCount := s.CustodyCountFromRemotePeer(pid)
|
||||
|
||||
nodeId, err := ConvertPeerIDToNodeID(pid)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "convert peer ID to node ID")
|
||||
}
|
||||
remoteCustodiedColumns, err := peerdas.CustodyColumns(nodeId, remoteCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "custody columns")
|
||||
}
|
||||
invalidPeer := false
|
||||
for c := range custodiedColumns {
|
||||
if !remoteCustodiedColumns[c] {
|
||||
invalidPeer = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if invalidPeer {
|
||||
continue
|
||||
}
|
||||
copiedId := pid
|
||||
// Add valid peer to list
|
||||
validPeers = append(validPeers, copiedId)
|
||||
}
|
||||
return validPeers, nil
|
||||
}
|
||||
|
||||
func (s *Service) CustodyCountFromRemotePeer(pid peer.ID) uint64 {
|
||||
// By default, we assume the peer custodies the minimum number of subnets.
|
||||
peerCustodyCountCount := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
// Retrieve the ENR of the peer.
|
||||
peerRecord, err := s.peers.ENR(pid)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Error("Failed to retrieve ENR for peer")
|
||||
return peerCustodyCountCount
|
||||
}
|
||||
|
||||
if peerRecord == nil {
|
||||
// This is the case for inbound peers. So we don't log an error for this.
|
||||
log.WithField("peerID", pid).Debug("No ENR found for peer")
|
||||
return peerCustodyCountCount
|
||||
}
|
||||
|
||||
// Load the `custody_subnet_count`
|
||||
var csc CustodySubnetCount
|
||||
if err := peerRecord.Load(&csc); err != nil {
|
||||
log.WithField("peerID", pid).Error("Cannot load the custody_subnet_count from peer")
|
||||
return peerCustodyCountCount
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"custodyCount": csc,
|
||||
}).Debug("Custody count read from peer's ENR")
|
||||
|
||||
return uint64(csc)
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"crypto/ecdsa"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
@@ -14,7 +15,9 @@ import (
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
|
||||
@@ -40,15 +43,26 @@ const (
|
||||
udp6
|
||||
)
|
||||
|
||||
type quicProtocol uint16
|
||||
const (
|
||||
quickProtocolEnrKey = "quic"
|
||||
custodySubnetCountEnrKey = "csc"
|
||||
)
|
||||
|
||||
type (
|
||||
quicProtocol uint16
|
||||
CustodySubnetCount uint64
|
||||
)
|
||||
|
||||
// quicProtocol is the "quic" key, which holds the QUIC port of the node.
|
||||
func (quicProtocol) ENRKey() string { return "quic" }
|
||||
func (quicProtocol) ENRKey() string { return quickProtocolEnrKey }
|
||||
|
||||
// RefreshENR uses an epoch to refresh the enr entry for our node
|
||||
// with the tracked committee ids for the epoch, allowing our node
|
||||
// to be dynamically discoverable by others given our tracked committee ids.
|
||||
func (s *Service) RefreshENR() {
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/p2p-interface.md#the-discovery-domain-discv5
|
||||
func (CustodySubnetCount) ENRKey() string { return custodySubnetCountEnrKey }
|
||||
|
||||
// RefreshPersistentSubnets checks that we are tracking our local persistent subnets for a variety of gossip topics.
|
||||
// This routine checks for our attestation, sync committee and data column subnets and updates them if they have
|
||||
// been rotated.
|
||||
func (s *Service) RefreshPersistentSubnets() {
|
||||
// return early if discv5 isnt running
|
||||
if s.dv5Listener == nil || !s.isInitialized() {
|
||||
return
|
||||
@@ -58,6 +72,10 @@ func (s *Service) RefreshENR() {
|
||||
log.WithError(err).Error("Could not initialize persistent subnets")
|
||||
return
|
||||
}
|
||||
if err := initializePersistentColumnSubnets(s.dv5Listener.LocalNode().ID()); err != nil {
|
||||
log.WithError(err).Error("Could not initialize persistent column subnets")
|
||||
return
|
||||
}
|
||||
|
||||
bitV := bitfield.NewBitvector64()
|
||||
committees := cache.SubnetIDs.GetAllSubnets()
|
||||
@@ -106,7 +124,7 @@ func (s *Service) RefreshENR() {
|
||||
|
||||
// listen for new nodes watches for new nodes in the network and adds them to the peerstore.
|
||||
func (s *Service) listenForNewNodes() {
|
||||
iterator := enode.Filter(s.dv5Listener.RandomNodes(), s.filterPeer)
|
||||
iterator := filterNodes(s.ctx, s.dv5Listener.RandomNodes(), s.filterPeer)
|
||||
defer iterator.Close()
|
||||
|
||||
for {
|
||||
@@ -122,29 +140,41 @@ func (s *Service) listenForNewNodes() {
|
||||
time.Sleep(pollingPeriod)
|
||||
continue
|
||||
}
|
||||
|
||||
if exists := iterator.Next(); !exists {
|
||||
break
|
||||
}
|
||||
|
||||
node := iterator.Node()
|
||||
peerInfo, _, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert to peer info")
|
||||
wantedCount := s.wantedPeerDials()
|
||||
if wantedCount == 0 {
|
||||
log.Trace("Not looking for peers, at peer limit")
|
||||
time.Sleep(pollingPeriod)
|
||||
continue
|
||||
}
|
||||
|
||||
if peerInfo == nil {
|
||||
continue
|
||||
// Restrict dials if limit is applied.
|
||||
if flags.MaxDialIsActive() {
|
||||
wantedCount = min(wantedCount, flags.Get().MaxConcurrentDials)
|
||||
}
|
||||
|
||||
// Make sure that peer is not dialed too often, for each connection attempt there's a backoff period.
|
||||
s.Peers().RandomizeBackOff(peerInfo.ID)
|
||||
go func(info *peer.AddrInfo) {
|
||||
if err := s.connectWithPeer(s.ctx, *info); err != nil {
|
||||
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
|
||||
wantedNodes := enode.ReadNodes(iterator, wantedCount)
|
||||
wg := new(sync.WaitGroup)
|
||||
for i := 0; i < len(wantedNodes); i++ {
|
||||
node := wantedNodes[i]
|
||||
peerInfo, _, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert to peer info")
|
||||
continue
|
||||
}
|
||||
}(peerInfo)
|
||||
|
||||
if peerInfo == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Make sure that peer is not dialed too often, for each connection attempt there's a backoff period.
|
||||
s.Peers().RandomizeBackOff(peerInfo.ID)
|
||||
wg.Add(1)
|
||||
go func(info *peer.AddrInfo) {
|
||||
if err := s.connectWithPeer(s.ctx, *info); err != nil {
|
||||
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
|
||||
}
|
||||
wg.Done()
|
||||
}(peerInfo)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -244,6 +274,14 @@ func (s *Service) createLocalNode(
|
||||
localNode.Set(quicEntry)
|
||||
}
|
||||
|
||||
if features.Get().EnablePeerDAS {
|
||||
custodySubnetEntry := CustodySubnetCount(params.BeaconConfig().CustodyRequirement)
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
custodySubnetEntry = CustodySubnetCount(params.BeaconConfig().DataColumnSidecarSubnetCount)
|
||||
}
|
||||
localNode.Set(custodySubnetEntry)
|
||||
}
|
||||
|
||||
localNode.SetFallbackIP(ipAddr)
|
||||
localNode.SetFallbackUDP(udpPort)
|
||||
|
||||
@@ -332,6 +370,8 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
|
||||
// Ignore nodes that are already active.
|
||||
if s.peers.IsActive(peerData.ID) {
|
||||
// Constantly update enr for known peers
|
||||
s.peers.UpdateENR(node.Record(), peerData.ID)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -384,6 +424,17 @@ func (s *Service) isPeerAtLimit(inbound bool) bool {
|
||||
return activePeers >= maxPeers || numOfConns >= maxPeers
|
||||
}
|
||||
|
||||
func (s *Service) wantedPeerDials() int {
|
||||
maxPeers := int(s.cfg.MaxPeers)
|
||||
|
||||
activePeers := len(s.Peers().Active())
|
||||
wantedCount := 0
|
||||
if maxPeers > activePeers {
|
||||
wantedCount = maxPeers - activePeers
|
||||
}
|
||||
return wantedCount
|
||||
}
|
||||
|
||||
// PeersFromStringAddrs converts peer raw ENRs into multiaddrs for p2p.
|
||||
func PeersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
|
||||
var allAddrs []ma.Multiaddr
|
||||
|
||||
@@ -20,6 +20,8 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
@@ -27,6 +29,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
testp2p "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
leakybucket "github.com/prysmaticlabs/prysm/v5/container/leaky-bucket"
|
||||
@@ -37,7 +40,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
var discoveryWaitTime = 1 * time.Second
|
||||
@@ -131,6 +133,11 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCreateLocalNode(t *testing.T) {
|
||||
resetFn := features.InitWithReset(&features.Flags{
|
||||
EnablePeerDAS: true,
|
||||
})
|
||||
defer resetFn()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
cfg *Config
|
||||
@@ -227,6 +234,11 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
syncSubnets := new([]byte)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(syncCommsSubnetEnrKey, syncSubnets)))
|
||||
require.DeepSSZEqual(t, []byte{0}, *syncSubnets)
|
||||
|
||||
// Check custody_subnet_count config.
|
||||
custodySubnetCount := new(uint64)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(custodySubnetCountEnrKey, custodySubnetCount)))
|
||||
require.Equal(t, uint64(1), *custodySubnetCount)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -601,7 +613,7 @@ func TestRefreshENR_ForkBoundaries(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := tt.svcBuilder(t)
|
||||
s.RefreshENR()
|
||||
s.RefreshPersistentSubnets()
|
||||
tt.postValidation(t, s)
|
||||
s.dv5Listener.Close()
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
@@ -121,7 +121,7 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
|
||||
return defaultAttesterSlashingTopicParams(), nil
|
||||
case strings.Contains(topic, GossipBlsToExecutionChangeMessage):
|
||||
return defaultBlsToExecutionChangeTopicParams(), nil
|
||||
case strings.Contains(topic, GossipBlobSidecarMessage):
|
||||
case strings.Contains(topic, GossipBlobSidecarMessage), strings.Contains(topic, GossipDataColumnSidecarMessage):
|
||||
// TODO(Deneb): Using the default block scoring. But this should be updated.
|
||||
return defaultBlockTopicParams(), nil
|
||||
default:
|
||||
|
||||
@@ -22,6 +22,7 @@ var gossipTopicMappings = map[string]proto.Message{
|
||||
SyncCommitteeSubnetTopicFormat: ðpb.SyncCommitteeMessage{},
|
||||
BlsToExecutionChangeSubnetTopicFormat: ðpb.SignedBLSToExecutionChange{},
|
||||
BlobSubnetTopicFormat: ðpb.BlobSidecar{},
|
||||
DataColumnSubnetTopicFormat: ðpb.DataColumnSidecar{},
|
||||
}
|
||||
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
|
||||
@@ -3,6 +3,7 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/connmgr"
|
||||
@@ -28,6 +29,12 @@ type P2P interface {
|
||||
ConnectionHandler
|
||||
PeersProvider
|
||||
MetadataProvider
|
||||
CustodyHandler
|
||||
}
|
||||
|
||||
type Acceser interface {
|
||||
Broadcaster
|
||||
PeerManager
|
||||
}
|
||||
|
||||
// Broadcaster broadcasts messages to peers over the p2p pubsub protocol.
|
||||
@@ -36,6 +43,7 @@ type Broadcaster interface {
|
||||
BroadcastAttestation(ctx context.Context, subnet uint64, att ethpb.Att) error
|
||||
BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage) error
|
||||
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
|
||||
BroadcastDataColumn(ctx context.Context, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error
|
||||
}
|
||||
|
||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||
@@ -81,8 +89,9 @@ type PeerManager interface {
|
||||
PeerID() peer.ID
|
||||
Host() host.Host
|
||||
ENR() *enr.Record
|
||||
NodeID() enode.ID
|
||||
DiscoveryAddresses() ([]multiaddr.Multiaddr, error)
|
||||
RefreshENR()
|
||||
RefreshPersistentSubnets()
|
||||
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold int) (bool, error)
|
||||
AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error)
|
||||
}
|
||||
@@ -102,3 +111,8 @@ type MetadataProvider interface {
|
||||
Metadata() metadata.Metadata
|
||||
MetadataSeq() uint64
|
||||
}
|
||||
|
||||
type CustodyHandler interface {
|
||||
CustodyCountFromRemotePeer(peer.ID) uint64
|
||||
GetValidCustodyPeers([]peer.ID) ([]peer.ID, error)
|
||||
}
|
||||
|
||||
@@ -2,10 +2,14 @@ package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
const backOffCounter = 50
|
||||
|
||||
// filterNodes wraps an iterator such that Next only returns nodes for which
|
||||
// the 'check' function returns true. This custom implementation also
|
||||
// checks for context deadlines so that in the event the parent context has
|
||||
@@ -24,13 +28,21 @@ type filterIter struct {
|
||||
// Next looks up for the next valid node according to our
|
||||
// filter criteria.
|
||||
func (f *filterIter) Next() bool {
|
||||
lookupCounter := 0
|
||||
for f.Iterator.Next() {
|
||||
// Do not excessively perform lookups if we constantly receive non-viable peers.
|
||||
if lookupCounter > backOffCounter {
|
||||
lookupCounter = 0
|
||||
runtime.Gosched()
|
||||
time.Sleep(pollingPeriod)
|
||||
}
|
||||
if f.Context.Err() != nil {
|
||||
return false
|
||||
}
|
||||
if f.check(f.Node()) {
|
||||
return true
|
||||
}
|
||||
lookupCounter++
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -60,17 +60,25 @@ var (
|
||||
"the subnet. The beacon node increments this counter when the broadcast is blocked " +
|
||||
"until a subnet peer can be found.",
|
||||
})
|
||||
blobSidecarCommitteeBroadcasts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
blobSidecarBroadcasts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_blob_sidecar_committee_broadcasts",
|
||||
Help: "The number of blob sidecar committee messages that were broadcast with no peer on.",
|
||||
Help: "The number of blob sidecar messages that were broadcast with no peer on.",
|
||||
})
|
||||
syncCommitteeBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_sync_committee_subnet_attempted_broadcasts",
|
||||
Help: "The number of sync committee that were attempted to be broadcast.",
|
||||
})
|
||||
blobSidecarCommitteeBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
blobSidecarBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_blob_sidecar_committee_attempted_broadcasts",
|
||||
Help: "The number of blob sidecar committee messages that were attempted to be broadcast.",
|
||||
Help: "The number of blob sidecar messages that were attempted to be broadcast.",
|
||||
})
|
||||
dataColumnSidecarBroadcasts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_data_column_sidecar_broadcasts",
|
||||
Help: "The number of data column sidecar messages that were broadcasted.",
|
||||
})
|
||||
dataColumnSidecarBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_data_column_sidecar_attempted_broadcasts",
|
||||
Help: "The number of data column sidecar messages that were attempted to be broadcast.",
|
||||
})
|
||||
|
||||
// Gossip Tracer Metrics
|
||||
|
||||
@@ -159,6 +159,14 @@ func (p *Status) Add(record *enr.Record, pid peer.ID, address ma.Multiaddr, dire
|
||||
p.addIpToTracker(pid)
|
||||
}
|
||||
|
||||
func (p *Status) UpdateENR(record *enr.Record, pid peer.ID) {
|
||||
p.store.Lock()
|
||||
defer p.store.Unlock()
|
||||
if peerData, ok := p.store.PeerData(pid); ok {
|
||||
peerData.Enr = record
|
||||
}
|
||||
}
|
||||
|
||||
// Address returns the multiaddress of the given remote peer.
|
||||
// This will error if the peer does not exist.
|
||||
func (p *Status) Address(pid peer.ID) (ma.Multiaddr, error) {
|
||||
|
||||
@@ -165,14 +165,14 @@ func (s *Service) pubsubOptions() []pubsub.Option {
|
||||
func parsePeersEnr(peers []string) ([]peer.AddrInfo, error) {
|
||||
addrs, err := PeersFromStringAddrs(peers)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot convert peers raw ENRs into multiaddresses: %v", err)
|
||||
return nil, fmt.Errorf("cannot convert peers raw ENRs into multiaddresses: %v", err)
|
||||
}
|
||||
if len(addrs) == 0 {
|
||||
return nil, fmt.Errorf("Converting peers raw ENRs into multiaddresses resulted in an empty list")
|
||||
return nil, fmt.Errorf("converting peers raw ENRs into multiaddresses resulted in an empty list")
|
||||
}
|
||||
directAddrInfos, err := peer.AddrInfosFromP2pAddrs(addrs...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot convert peers multiaddresses into AddrInfos: %v", err)
|
||||
return nil, fmt.Errorf("cannot convert peers multiaddresses into AddrInfos: %v", err)
|
||||
}
|
||||
return directAddrInfos, nil
|
||||
}
|
||||
|
||||
@@ -90,7 +90,7 @@ func TestService_CanSubscribe(t *testing.T) {
|
||||
formatting := []interface{}{digest}
|
||||
|
||||
// Special case for attestation subnets which have a second formatting placeholder.
|
||||
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat || topic == BlobSubnetTopicFormat {
|
||||
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat || topic == BlobSubnetTopicFormat || topic == DataColumnSubnetTopicFormat {
|
||||
formatting = append(formatting, 0 /* some subnet ID */)
|
||||
}
|
||||
|
||||
|
||||
@@ -43,6 +43,12 @@ const BlobSidecarsByRangeName = "/blob_sidecars_by_range"
|
||||
// BlobSidecarsByRootName is the name for the BlobSidecarsByRoot v1 message topic.
|
||||
const BlobSidecarsByRootName = "/blob_sidecars_by_root"
|
||||
|
||||
// DataColumnSidecarsByRootName is the name for the DataColumnSidecarsByRoot v1 message topic.
|
||||
const DataColumnSidecarsByRootName = "/data_column_sidecars_by_root"
|
||||
|
||||
// DataColumnSidecarsByRangeName is the name for the DataColumnSidecarsByRange v1 message topic.
|
||||
const DataColumnSidecarsByRangeName = "/data_column_sidecars_by_range"
|
||||
|
||||
const (
|
||||
// V1 RPC Topics
|
||||
// RPCStatusTopicV1 defines the v1 topic for the status rpc method.
|
||||
@@ -65,6 +71,12 @@ const (
|
||||
// RPCBlobSidecarsByRootTopicV1 is a topic for requesting blob sidecars by their block root. New in deneb.
|
||||
// /eth2/beacon_chain/req/blob_sidecars_by_root/1/
|
||||
RPCBlobSidecarsByRootTopicV1 = protocolPrefix + BlobSidecarsByRootName + SchemaVersionV1
|
||||
// RPCDataColumnSidecarsByRootTopicV1 is a topic for requesting data column sidecars by their block root. New in PeerDAS.
|
||||
// /eth2/beacon_chain/req/data_column_sidecars_by_root/1
|
||||
RPCDataColumnSidecarsByRootTopicV1 = protocolPrefix + DataColumnSidecarsByRootName + SchemaVersionV1
|
||||
// RPCDataColumnSidecarsByRangeTopicV1 is a topic for requesting data column sidecars by their slot. New in PeerDAS.
|
||||
// /eth2/beacon_chain/req/data_column_sidecars_by_range/1
|
||||
RPCDataColumnSidecarsByRangeTopicV1 = protocolPrefix + DataColumnSidecarsByRangeName + SchemaVersionV1
|
||||
|
||||
// V2 RPC Topics
|
||||
// RPCBlocksByRangeTopicV2 defines v2 the topic for the blocks by range rpc method.
|
||||
@@ -101,6 +113,10 @@ var RPCTopicMappings = map[string]interface{}{
|
||||
RPCBlobSidecarsByRangeTopicV1: new(pb.BlobSidecarsByRangeRequest),
|
||||
// BlobSidecarsByRoot v1 Message
|
||||
RPCBlobSidecarsByRootTopicV1: new(p2ptypes.BlobSidecarsByRootReq),
|
||||
// DataColumnSidecarsByRange v1 Message
|
||||
RPCDataColumnSidecarsByRangeTopicV1: new(pb.DataColumnSidecarsByRangeRequest),
|
||||
// DataColumnSidecarsByRoot v1 Message
|
||||
RPCDataColumnSidecarsByRootTopicV1: new(p2ptypes.BlobSidecarsByRootReq),
|
||||
}
|
||||
|
||||
// Maps all registered protocol prefixes.
|
||||
@@ -119,6 +135,8 @@ var messageMapping = map[string]bool{
|
||||
MetadataMessageName: true,
|
||||
BlobSidecarsByRangeName: true,
|
||||
BlobSidecarsByRootName: true,
|
||||
DataColumnSidecarsByRootName: true,
|
||||
DataColumnSidecarsByRangeName: true,
|
||||
}
|
||||
|
||||
// Maps all the RPC messages which are to updated in altair.
|
||||
|
||||
@@ -226,7 +226,7 @@ func (s *Service) Start() {
|
||||
}
|
||||
// Initialize metadata according to the
|
||||
// current epoch.
|
||||
s.RefreshENR()
|
||||
s.RefreshPersistentSubnets()
|
||||
|
||||
// Periodic functions.
|
||||
async.RunEvery(s.ctx, params.BeaconConfig().TtfbTimeoutDuration(), func() {
|
||||
@@ -234,7 +234,7 @@ func (s *Service) Start() {
|
||||
})
|
||||
async.RunEvery(s.ctx, 30*time.Minute, s.Peers().Prune)
|
||||
async.RunEvery(s.ctx, time.Duration(params.BeaconConfig().RespTimeout)*time.Second, s.updateMetrics)
|
||||
async.RunEvery(s.ctx, refreshRate, s.RefreshENR)
|
||||
async.RunEvery(s.ctx, refreshRate, s.RefreshPersistentSubnets)
|
||||
async.RunEvery(s.ctx, 1*time.Minute, func() {
|
||||
inboundQUICCount := len(s.peers.InboundConnectedWithProtocol(peers.QUIC))
|
||||
inboundTCPCount := len(s.peers.InboundConnectedWithProtocol(peers.TCP))
|
||||
@@ -358,6 +358,15 @@ func (s *Service) ENR() *enr.Record {
|
||||
return s.dv5Listener.Self().Record()
|
||||
}
|
||||
|
||||
// NodeID returns the local node's node ID
|
||||
// for discovery.
|
||||
func (s *Service) NodeID() enode.ID {
|
||||
if s.dv5Listener == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
return s.dv5Listener.Self().ID()
|
||||
}
|
||||
|
||||
// DiscoveryAddresses represents our enr addresses as multiaddresses.
|
||||
func (s *Service) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
if s.dv5Listener == nil {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user