mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 06:18:05 -05:00
Compare commits
58 Commits
uncdr-patc
...
nil-check-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bf6e35dea3 | ||
|
|
2060f876b1 | ||
|
|
e226237590 | ||
|
|
314ef8e1bd | ||
|
|
a428664eb5 | ||
|
|
7c3d89b25f | ||
|
|
f4c004085b | ||
|
|
c0e207eb47 | ||
|
|
7003d97061 | ||
|
|
957d853a2f | ||
|
|
29695d8906 | ||
|
|
a8ba5e7dd8 | ||
|
|
0ad190c1fb | ||
|
|
2e056b38da | ||
|
|
966de59478 | ||
|
|
c8919bd233 | ||
|
|
baa2e2e340 | ||
|
|
7765d275d1 | ||
|
|
2fd7c926ed | ||
|
|
21fc9853ee | ||
|
|
642de455d5 | ||
|
|
d371cd6e89 | ||
|
|
c8a7f6f0bd | ||
|
|
d7a7fa025d | ||
|
|
6b6ac4c3fb | ||
|
|
3b8651cbf4 | ||
|
|
9d29d2f4bf | ||
|
|
3d205a387c | ||
|
|
269b382229 | ||
|
|
80ebbcf03e | ||
|
|
32ebe94515 | ||
|
|
d2f4a8cc7c | ||
|
|
982de94428 | ||
|
|
a41025e5ec | ||
|
|
3ce26ae7e2 | ||
|
|
1acedd5b01 | ||
|
|
a0679c70d3 | ||
|
|
7f53700306 | ||
|
|
3b69f7a196 | ||
|
|
72562dcf3a | ||
|
|
cc23b8311a | ||
|
|
cbe54fe3f9 | ||
|
|
1b6adca3ca | ||
|
|
1651649e5a | ||
|
|
56187edb98 | ||
|
|
ecad5bbffc | ||
|
|
407182387b | ||
|
|
ad0b0b503d | ||
|
|
58f4ba758c | ||
|
|
64f64f06bf | ||
|
|
e70055733f | ||
|
|
36e4f49af0 | ||
|
|
d98428dec4 | ||
|
|
00b92e01d3 | ||
|
|
ca5adbf7e4 | ||
|
|
a083b7a0a5 | ||
|
|
dd5995b665 | ||
|
|
6903d52dde |
8
.github/workflows/go.yml
vendored
8
.github/workflows/go.yml
vendored
@@ -67,13 +67,7 @@ jobs:
|
||||
args: --print-issued-lines --sort-results --no-config --timeout=10m --disable-all -E deadcode -E errcheck -E gosimple --skip-files=validator/web/site_data.go --skip-dirs=proto --go=1.17
|
||||
version: v1.45.2
|
||||
skip-go-installation: true
|
||||
- name: Horusec Downloader
|
||||
run: curl -fsSL https://raw.githubusercontent.com/ZupIT/horusec/main/deployments/scripts/install.sh | bash -s latest
|
||||
|
||||
- name: Horusec Security Scanner
|
||||
run: horusec start -p="./" -e="true" -i="**/crypto/bls/herumi/**, **/**/*_test.go, **/third_party/afl/**, **/crypto/keystore/key.go"
|
||||
|
||||
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
22
.github/workflows/horusec.yaml
vendored
Normal file
22
.github/workflows/horusec.yaml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
name: Horusec Security Scan
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Runs cron at 16.00 UTC on
|
||||
- cron: '0 0 * * SUN'
|
||||
|
||||
jobs:
|
||||
Horusec_Scan:
|
||||
name: horusec-Scan
|
||||
runs-on: ubuntu-latest
|
||||
if: github.ref == 'refs/heads/develop'
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
with: # Required when commit authors is enabled
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Running Security Scan
|
||||
run: |
|
||||
curl -fsSL https://raw.githubusercontent.com/ZupIT/horusec/main/deployments/scripts/install.sh | bash -s latest
|
||||
horusec start -t="10000" -p="./" -e="true" -i="**/crypto/bls/herumi/**, **/**/*_test.go, **/third_party/afl/**, **/crypto/keystore/key.go"
|
||||
13
BUILD.bazel
13
BUILD.bazel
@@ -115,18 +115,19 @@ nogo(
|
||||
"@org_golang_x_tools//go/analysis/passes/assign:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library",
|
||||
"//tools/analyzers/maligned:go_default_library",
|
||||
"//tools/analyzers/comparesame:go_default_library",
|
||||
"//tools/analyzers/cryptorand:go_default_library",
|
||||
"//tools/analyzers/errcheck:go_default_library",
|
||||
"//tools/analyzers/featureconfig:go_default_library",
|
||||
"//tools/analyzers/comparesame:go_default_library",
|
||||
"//tools/analyzers/shadowpredecl:go_default_library",
|
||||
"//tools/analyzers/nop:go_default_library",
|
||||
"//tools/analyzers/slicedirect:go_default_library",
|
||||
"//tools/analyzers/interfacechecker:go_default_library",
|
||||
"//tools/analyzers/gocognit:go_default_library",
|
||||
"//tools/analyzers/ineffassign:go_default_library",
|
||||
"//tools/analyzers/interfacechecker:go_default_library",
|
||||
"//tools/analyzers/maligned:go_default_library",
|
||||
"//tools/analyzers/nop:go_default_library",
|
||||
"//tools/analyzers/properpermissions:go_default_library",
|
||||
"//tools/analyzers/recursivelock:go_default_library",
|
||||
"//tools/analyzers/shadowpredecl:go_default_library",
|
||||
"//tools/analyzers/slicedirect:go_default_library",
|
||||
"//tools/analyzers/uintcast:go_default_library",
|
||||
] + select({
|
||||
# nogo checks that fail with coverage enabled.
|
||||
|
||||
@@ -176,7 +176,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.17.6",
|
||||
go_version = "1.17.9",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -369,7 +368,7 @@ type WeakSubjectivityData struct {
|
||||
}
|
||||
|
||||
func non200Err(response *http.Response) error {
|
||||
bodyBytes, err := ioutil.ReadAll(response.Body)
|
||||
bodyBytes, err := io.ReadAll(response.Body)
|
||||
var body string
|
||||
if err != nil {
|
||||
body = "(Unable to read response body.)"
|
||||
|
||||
@@ -3,6 +3,7 @@ package apimiddleware
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -73,6 +74,10 @@ func processField(s interface{}, processors []fieldProcessor) error {
|
||||
}
|
||||
|
||||
func hexToBase64Processor(v reflect.Value) error {
|
||||
if v.String() == "0x" {
|
||||
v.SetString("")
|
||||
return nil
|
||||
}
|
||||
b, err := bytesutil.FromHexString(v.String())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -83,6 +88,8 @@ func hexToBase64Processor(v reflect.Value) error {
|
||||
|
||||
func base64ToHexProcessor(v reflect.Value) error {
|
||||
if v.String() == "" {
|
||||
// Empty hex values are represented as "0x".
|
||||
v.SetString("0x")
|
||||
return nil
|
||||
}
|
||||
b, err := base64.StdEncoding.DecodeString(v.String())
|
||||
@@ -93,6 +100,55 @@ func base64ToHexProcessor(v reflect.Value) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func base64ToUint256Processor(v reflect.Value) error {
|
||||
if v.String() == "" {
|
||||
return nil
|
||||
}
|
||||
littleEndian, err := base64.StdEncoding.DecodeString(v.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(littleEndian) != 32 {
|
||||
return errors.New("invalid length for Uint256")
|
||||
}
|
||||
|
||||
// Integers are stored as little-endian, but
|
||||
// big.Int expects big-endian. So we need to reverse
|
||||
// the byte order before decoding.
|
||||
var bigEndian [32]byte
|
||||
for i := 0; i < len(littleEndian); i++ {
|
||||
bigEndian[i] = littleEndian[len(littleEndian)-1-i]
|
||||
}
|
||||
var uint256 big.Int
|
||||
uint256.SetBytes(bigEndian[:])
|
||||
v.SetString(uint256.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
func uint256ToBase64Processor(v reflect.Value) error {
|
||||
if v.String() == "" {
|
||||
return nil
|
||||
}
|
||||
uint256, ok := new(big.Int).SetString(v.String(), 10)
|
||||
if !ok {
|
||||
return fmt.Errorf("could not parse Uint256")
|
||||
}
|
||||
bigEndian := uint256.Bytes()
|
||||
if len(bigEndian) > 32 {
|
||||
return fmt.Errorf("number too big for Uint256")
|
||||
}
|
||||
|
||||
// Integers are stored as little-endian, but
|
||||
// big.Int gives big-endian. So we need to reverse
|
||||
// the byte order before encoding.
|
||||
var littleEndian [32]byte
|
||||
for i := 0; i < len(bigEndian); i++ {
|
||||
littleEndian[i] = bigEndian[len(bigEndian)-1-i]
|
||||
}
|
||||
v.SetString(base64.StdEncoding.EncodeToString(littleEndian[:]))
|
||||
return nil
|
||||
}
|
||||
|
||||
func enumToLowercaseProcessor(v reflect.Value) error {
|
||||
v.SetString(strings.ToLower(v.String()))
|
||||
return nil
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
@@ -38,6 +37,10 @@ func ProcessRequestContainerFields(requestContainer interface{}) ErrorJson {
|
||||
tag: "hex",
|
||||
f: hexToBase64Processor,
|
||||
},
|
||||
{
|
||||
tag: "uint256",
|
||||
f: uint256ToBase64Processor,
|
||||
},
|
||||
}); err != nil {
|
||||
return InternalServerErrorWithMessage(err, "could not process request data")
|
||||
}
|
||||
@@ -52,7 +55,7 @@ func SetRequestBodyToRequestContainer(requestContainer interface{}, req *http.Re
|
||||
return InternalServerErrorWithMessage(err, "could not marshal request")
|
||||
}
|
||||
// Set the body to the new JSON.
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(j))
|
||||
req.Body = io.NopCloser(bytes.NewReader(j))
|
||||
req.Header.Set("Content-Length", strconv.Itoa(len(j)))
|
||||
req.ContentLength = int64(len(j))
|
||||
return nil
|
||||
@@ -93,7 +96,7 @@ func (m *ApiProxyMiddleware) ProxyRequest(req *http.Request) (*http.Response, Er
|
||||
|
||||
// ReadGrpcResponseBody reads the body from the grpc-gateway's response.
|
||||
func ReadGrpcResponseBody(r io.Reader) ([]byte, ErrorJson) {
|
||||
body, err := ioutil.ReadAll(r)
|
||||
body, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, InternalServerErrorWithMessage(err, "could not read response body")
|
||||
}
|
||||
@@ -154,6 +157,10 @@ func ProcessMiddlewareResponseFields(responseContainer interface{}) ErrorJson {
|
||||
tag: "time",
|
||||
f: timeToUnixProcessor,
|
||||
},
|
||||
{
|
||||
tag: "uint256",
|
||||
f: base64ToUint256Processor,
|
||||
},
|
||||
}); err != nil {
|
||||
return InternalServerErrorWithMessage(err, "could not process response data")
|
||||
}
|
||||
@@ -195,7 +202,7 @@ func WriteMiddlewareResponseHeadersAndBody(grpcResp *http.Response, responseJson
|
||||
} else {
|
||||
w.WriteHeader(grpcResp.StatusCode)
|
||||
}
|
||||
if _, err := io.Copy(w, ioutil.NopCloser(bytes.NewReader(responseJson))); err != nil {
|
||||
if _, err := io.Copy(w, io.NopCloser(bytes.NewReader(responseJson))); err != nil {
|
||||
return InternalServerErrorWithMessage(err, "could not write response message")
|
||||
}
|
||||
} else {
|
||||
@@ -249,7 +256,7 @@ func WriteError(w http.ResponseWriter, errJson ErrorJson, responseHeader http.He
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(j)))
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(errJson.StatusCode())
|
||||
if _, err := io.Copy(w, ioutil.NopCloser(bytes.NewReader(j))); err != nil {
|
||||
if _, err := io.Copy(w, io.NopCloser(bytes.NewReader(j))); err != nil {
|
||||
log.WithError(err).Error("Could not write error message")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,30 +15,40 @@ import (
|
||||
)
|
||||
|
||||
type testRequestContainer struct {
|
||||
TestString string
|
||||
TestHexString string `hex:"true"`
|
||||
TestString string
|
||||
TestHexString string `hex:"true"`
|
||||
TestEmptyHexString string `hex:"true"`
|
||||
TestUint256String string `uint256:"true"`
|
||||
}
|
||||
|
||||
func defaultRequestContainer() *testRequestContainer {
|
||||
return &testRequestContainer{
|
||||
TestString: "test string",
|
||||
TestHexString: "0x666F6F", // hex encoding of "foo"
|
||||
TestString: "test string",
|
||||
TestHexString: "0x666F6F", // hex encoding of "foo"
|
||||
TestEmptyHexString: "0x",
|
||||
TestUint256String: "4196",
|
||||
}
|
||||
}
|
||||
|
||||
type testResponseContainer struct {
|
||||
TestString string
|
||||
TestHex string `hex:"true"`
|
||||
TestEnum string `enum:"true"`
|
||||
TestTime string `time:"true"`
|
||||
TestString string
|
||||
TestHex string `hex:"true"`
|
||||
TestEmptyHex string `hex:"true"`
|
||||
TestUint256 string `uint256:"true"`
|
||||
TestEnum string `enum:"true"`
|
||||
TestTime string `time:"true"`
|
||||
}
|
||||
|
||||
func defaultResponseContainer() *testResponseContainer {
|
||||
return &testResponseContainer{
|
||||
TestString: "test string",
|
||||
TestHex: "Zm9v", // base64 encoding of "foo"
|
||||
TestEnum: "Test Enum",
|
||||
TestTime: "2006-01-02T15:04:05Z",
|
||||
TestString: "test string",
|
||||
TestHex: "Zm9v", // base64 encoding of "foo"
|
||||
TestEmptyHex: "",
|
||||
TestEnum: "Test Enum",
|
||||
TestTime: "2006-01-02T15:04:05Z",
|
||||
|
||||
// base64 encoding of 4196 in little-endian
|
||||
TestUint256: "ZBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,6 +116,8 @@ func TestProcessRequestContainerFields(t *testing.T) {
|
||||
errJson := ProcessRequestContainerFields(container)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, "Zm9v", container.TestHexString)
|
||||
assert.Equal(t, "", container.TestEmptyHexString)
|
||||
assert.Equal(t, "ZBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=", container.TestUint256String)
|
||||
})
|
||||
|
||||
t.Run("error", func(t *testing.T) {
|
||||
@@ -128,8 +140,8 @@ func TestSetRequestBodyToRequestContainer(t *testing.T) {
|
||||
contentLengthHeader, ok := request.Header["Content-Length"]
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, 1, len(contentLengthHeader), "wrong number of header values")
|
||||
assert.Equal(t, "55", contentLengthHeader[0])
|
||||
assert.Equal(t, int64(55), request.ContentLength)
|
||||
assert.Equal(t, "108", contentLengthHeader[0])
|
||||
assert.Equal(t, int64(108), request.ContentLength)
|
||||
}
|
||||
|
||||
func TestPrepareRequestForProxying(t *testing.T) {
|
||||
@@ -234,6 +246,8 @@ func TestProcessMiddlewareResponseFields(t *testing.T) {
|
||||
errJson := ProcessMiddlewareResponseFields(container)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, "0x666f6f", container.TestHex)
|
||||
assert.Equal(t, "0x", container.TestEmptyHex)
|
||||
assert.Equal(t, "4196", container.TestUint256)
|
||||
assert.Equal(t, "test enum", container.TestEnum)
|
||||
assert.Equal(t, "1136214245", container.TestTime)
|
||||
})
|
||||
@@ -278,7 +292,7 @@ func TestWriteMiddlewareResponseHeadersAndBody(t *testing.T) {
|
||||
v, ok = writer.Header()["Content-Length"]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "102", v[0])
|
||||
assert.Equal(t, "181", v[0])
|
||||
assert.Equal(t, 204, writer.Code)
|
||||
assert.DeepEqual(t, responseJson, writer.Body.Bytes())
|
||||
})
|
||||
|
||||
@@ -5,13 +5,13 @@ go_library(
|
||||
srcs = [
|
||||
"chain_info.go",
|
||||
"error.go",
|
||||
"execution_engine.go",
|
||||
"head.go",
|
||||
"head_sync_committee_info.go",
|
||||
"init_sync_process_block.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"new_slot.go",
|
||||
"optimistic_sync.go",
|
||||
"options.go",
|
||||
"pow_block.go",
|
||||
"process_attestation.go",
|
||||
@@ -66,6 +66,7 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
@@ -102,13 +103,13 @@ go_test(
|
||||
"blockchain_test.go",
|
||||
"chain_info_test.go",
|
||||
"checktags_test.go",
|
||||
"execution_engine_test.go",
|
||||
"head_sync_committee_info_test.go",
|
||||
"head_test.go",
|
||||
"init_test.go",
|
||||
"log_test.go",
|
||||
"metrics_test.go",
|
||||
"mock_test.go",
|
||||
"optimistic_sync_test.go",
|
||||
"pow_block_test.go",
|
||||
"process_attestation_test.go",
|
||||
"process_block_test.go",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
logrus.SetOutput(io.Discard)
|
||||
|
||||
m.Run()
|
||||
}
|
||||
|
||||
@@ -50,7 +50,6 @@ type HeadFetcher interface {
|
||||
HeadBlock(ctx context.Context) (block.SignedBeaconBlock, error)
|
||||
HeadState(ctx context.Context) (state.BeaconState, error)
|
||||
HeadValidatorsIndices(ctx context.Context, epoch types.Epoch) ([]types.ValidatorIndex, error)
|
||||
HeadSeed(ctx context.Context, epoch types.Epoch) ([32]byte, error)
|
||||
HeadGenesisValidatorsRoot() [32]byte
|
||||
HeadETH1Data() *ethpb.Eth1Data
|
||||
HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (types.ValidatorIndex, bool)
|
||||
@@ -60,18 +59,17 @@ type HeadFetcher interface {
|
||||
IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error)
|
||||
HeadSyncCommitteeFetcher
|
||||
HeadDomainFetcher
|
||||
ForkChoicer() forkchoice.ForkChoicer
|
||||
}
|
||||
|
||||
// ForkFetcher retrieves the current fork information of the Ethereum beacon chain.
|
||||
type ForkFetcher interface {
|
||||
ForkChoicer() forkchoice.ForkChoicer
|
||||
CurrentFork() *ethpb.Fork
|
||||
}
|
||||
|
||||
// CanonicalFetcher retrieves the current chain's canonical information.
|
||||
type CanonicalFetcher interface {
|
||||
IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error)
|
||||
VerifyBlkDescendant(ctx context.Context, blockRoot [32]byte) error
|
||||
}
|
||||
|
||||
// FinalizationFetcher defines a common interface for methods in blockchain service which
|
||||
@@ -80,6 +78,7 @@ type FinalizationFetcher interface {
|
||||
FinalizedCheckpt() *ethpb.Checkpoint
|
||||
CurrentJustifiedCheckpt() *ethpb.Checkpoint
|
||||
PreviousJustifiedCheckpt() *ethpb.Checkpoint
|
||||
VerifyFinalizedBlkDescendant(ctx context.Context, blockRoot [32]byte) error
|
||||
}
|
||||
|
||||
// FinalizedCheckpt returns the latest finalized checkpoint from chain store.
|
||||
@@ -205,18 +204,6 @@ func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch types.Epoch)
|
||||
return helpers.ActiveValidatorIndices(ctx, s.headState(ctx), epoch)
|
||||
}
|
||||
|
||||
// HeadSeed returns the seed from the head view of a given epoch.
|
||||
func (s *Service) HeadSeed(ctx context.Context, epoch types.Epoch) ([32]byte, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if !s.hasHeadState() {
|
||||
return [32]byte{}, nil
|
||||
}
|
||||
|
||||
return helpers.Seed(s.headState(ctx), epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
}
|
||||
|
||||
// HeadGenesisValidatorsRoot returns genesis validators root of the head state.
|
||||
func (s *Service) HeadGenesisValidatorsRoot() [32]byte {
|
||||
s.headLock.RLock()
|
||||
@@ -273,13 +260,13 @@ func (s *Service) CurrentFork() *ethpb.Fork {
|
||||
|
||||
// IsCanonical returns true if the input block root is part of the canonical chain.
|
||||
func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error) {
|
||||
// If the block has been finalized, the block will always be part of the canonical chain.
|
||||
if s.cfg.BeaconDB.IsFinalizedBlock(ctx, blockRoot) {
|
||||
return true, nil
|
||||
// If the block has not been finalized, check fork choice store to see if the block is canonical
|
||||
if s.cfg.ForkChoiceStore.HasNode(blockRoot) {
|
||||
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
|
||||
}
|
||||
|
||||
// If the block has not been finalized, check fork choice store to see if the block is canonical
|
||||
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
|
||||
// If the block has been finalized, the block will always be part of the canonical chain.
|
||||
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, blockRoot), nil
|
||||
}
|
||||
|
||||
// ChainHeads returns all possible chain heads (leaves of fork choice tree).
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"time"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
@@ -261,23 +260,6 @@ func TestService_HeadValidatorsIndices(t *testing.T) {
|
||||
require.Equal(t, 10, len(indices))
|
||||
}
|
||||
|
||||
func TestService_HeadSeed(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 1)
|
||||
c := &Service{}
|
||||
seed, err := helpers.Seed(s, 0, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
|
||||
c.head = &head{}
|
||||
root, err := c.HeadSeed(context.Background(), 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{}, root)
|
||||
|
||||
c.head = &head{state: s}
|
||||
root, err = c.HeadSeed(context.Background(), 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, seed, root)
|
||||
}
|
||||
|
||||
func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 1)
|
||||
c := &Service{}
|
||||
|
||||
@@ -24,6 +24,8 @@ import (
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var ErrUndefinedExecutionEngineError = errors.New("received an undefined ee error")
|
||||
|
||||
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
|
||||
// 1. Re-organizes the execution payload chain and corresponding state to make head_block_hash the head.
|
||||
// 2. Applies finality to the execution state: it irreversibly persists the chain of all execution payloads and corresponding state, up to and including finalized_block_hash.
|
||||
@@ -83,16 +85,18 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headState state.Be
|
||||
if err != nil {
|
||||
switch err {
|
||||
case powchain.ErrAcceptedSyncingPayloadStatus:
|
||||
forkchoiceUpdatedOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"headSlot": headBlk.Slot(),
|
||||
"headHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
|
||||
"finalizedHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
|
||||
"headSlot": headBlk.Slot(),
|
||||
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
|
||||
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
|
||||
}).Info("Called fork choice updated with optimistic block")
|
||||
return payloadID, nil
|
||||
default:
|
||||
return nil, errors.Wrap(err, "could not notify forkchoice update from execution engine")
|
||||
return nil, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
}
|
||||
forkchoiceUpdatedValidNodeCount.Inc()
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, headRoot); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set block to valid")
|
||||
}
|
||||
@@ -106,8 +110,8 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headState state.Be
|
||||
|
||||
// notifyForkchoiceUpdate signals execution engine on a new payload.
|
||||
// It returns true if the EL has returned VALID for the block
|
||||
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion, postStateVersion int,
|
||||
preStateHeader, postStateHeader *ethpb.ExecutionPayloadHeader, blk block.SignedBeaconBlock) (bool, error) {
|
||||
func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||
postStateHeader *ethpb.ExecutionPayloadHeader, blk block.SignedBeaconBlock) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
|
||||
defer span.End()
|
||||
|
||||
@@ -132,46 +136,34 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion, postSta
|
||||
return false, errors.Wrap(err, "could not get execution payload")
|
||||
}
|
||||
lastValidHash, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
|
||||
if err != nil {
|
||||
switch err {
|
||||
case powchain.ErrAcceptedSyncingPayloadStatus:
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
|
||||
}).Info("Called new payload with optimistic block")
|
||||
return false, nil
|
||||
case powchain.ErrInvalidPayloadStatus:
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, root, bytesutil.ToBytes32(lastValidHash))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return false, errors.New("could not validate an INVALID payload from execution engine")
|
||||
default:
|
||||
return false, errors.Wrap(err, "could not validate execution payload from execution engine")
|
||||
}
|
||||
}
|
||||
|
||||
// During the transition event, the transition block should be verified for sanity.
|
||||
if blocks.IsPreBellatrixVersion(preStateVersion) {
|
||||
// Handle case where pre-state is Altair but block contains payload.
|
||||
// To reach here, the block must have contained a valid payload.
|
||||
return true, s.validateMergeBlock(ctx, blk)
|
||||
}
|
||||
atTransition, err := blocks.IsMergeTransitionBlockUsingPreStatePayloadHeader(preStateHeader, body)
|
||||
if err != nil {
|
||||
return true, errors.Wrap(err, "could not check if merge block is terminal")
|
||||
}
|
||||
if !atTransition {
|
||||
switch err {
|
||||
case nil:
|
||||
newPayloadValidNodeCount.Inc()
|
||||
return true, nil
|
||||
case powchain.ErrAcceptedSyncingPayloadStatus:
|
||||
newPayloadOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
|
||||
}).Info("Called new payload with optimistic block")
|
||||
return false, nil
|
||||
case powchain.ErrInvalidPayloadStatus:
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, root, bytesutil.ToBytes32(lastValidHash))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return false, errors.New("could not validate an INVALID payload from execution engine")
|
||||
default:
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
return true, s.validateMergeBlock(ctx, blk)
|
||||
}
|
||||
|
||||
// optimisticCandidateBlock returns true if this block can be optimistically synced.
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
@@ -159,7 +158,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
}(),
|
||||
newForkchoiceErr: powchain.ErrInvalidPayloadStatus,
|
||||
finalizedRoot: bellatrixBlkRoot,
|
||||
errString: "could not notify forkchoice update from execution engine: payload status is INVALID",
|
||||
errString: ErrUndefinedExecutionEngineError.Error(),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -229,7 +228,6 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
preState state.BeaconState
|
||||
postState state.BeaconState
|
||||
isValidPayload bool
|
||||
blk block.SignedBeaconBlock
|
||||
@@ -239,26 +237,22 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
{
|
||||
name: "phase 0 post state",
|
||||
postState: phase0State,
|
||||
preState: phase0State,
|
||||
isValidPayload: true,
|
||||
},
|
||||
{
|
||||
name: "altair post state",
|
||||
postState: altairState,
|
||||
preState: altairState,
|
||||
isValidPayload: true,
|
||||
},
|
||||
{
|
||||
name: "nil beacon block",
|
||||
postState: bellatrixState,
|
||||
preState: bellatrixState,
|
||||
errString: "signed beacon block can't be nil",
|
||||
isValidPayload: false,
|
||||
},
|
||||
{
|
||||
name: "new payload with optimistic block",
|
||||
postState: bellatrixState,
|
||||
preState: bellatrixState,
|
||||
blk: bellatrixBlk,
|
||||
newPayloadErr: powchain.ErrAcceptedSyncingPayloadStatus,
|
||||
isValidPayload: false,
|
||||
@@ -266,7 +260,6 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
{
|
||||
name: "new payload with invalid block",
|
||||
postState: bellatrixState,
|
||||
preState: bellatrixState,
|
||||
blk: bellatrixBlk,
|
||||
newPayloadErr: powchain.ErrInvalidPayloadStatus,
|
||||
errString: "could not validate an INVALID payload from execution engine",
|
||||
@@ -275,14 +268,12 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
{
|
||||
name: "altair pre state, altair block",
|
||||
postState: bellatrixState,
|
||||
preState: altairState,
|
||||
blk: altairBlk,
|
||||
isValidPayload: true,
|
||||
},
|
||||
{
|
||||
name: "altair pre state, happy case",
|
||||
postState: bellatrixState,
|
||||
preState: altairState,
|
||||
blk: func() block.SignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
@@ -299,18 +290,9 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
}(),
|
||||
isValidPayload: true,
|
||||
},
|
||||
{
|
||||
name: "could not get merge block",
|
||||
postState: bellatrixState,
|
||||
preState: bellatrixState,
|
||||
blk: bellatrixBlk,
|
||||
errString: "could not get merge block parent hash and total difficulty",
|
||||
isValidPayload: false,
|
||||
},
|
||||
{
|
||||
name: "not at merge transition",
|
||||
postState: bellatrixState,
|
||||
preState: bellatrixState,
|
||||
blk: func() block.SignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
@@ -334,18 +316,9 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
}(),
|
||||
isValidPayload: true,
|
||||
},
|
||||
{
|
||||
name: "could not get merge block",
|
||||
postState: bellatrixState,
|
||||
preState: bellatrixState,
|
||||
blk: bellatrixBlk,
|
||||
errString: "could not get merge block parent hash and total difficulty",
|
||||
isValidPayload: false,
|
||||
},
|
||||
{
|
||||
name: "happy case",
|
||||
postState: bellatrixState,
|
||||
preState: bellatrixState,
|
||||
blk: func() block.SignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
@@ -362,6 +335,26 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
}(),
|
||||
isValidPayload: true,
|
||||
},
|
||||
{
|
||||
name: "undefined error from ee",
|
||||
postState: bellatrixState,
|
||||
blk: func() block.SignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
newPayloadErr: ErrUndefinedExecutionEngineError,
|
||||
errString: ErrUndefinedExecutionEngineError.Error(),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
@@ -375,16 +368,11 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
TotalDifficulty: "0x1",
|
||||
}
|
||||
service.cfg.ExecutionEngineCaller = e
|
||||
var payload *ethpb.ExecutionPayloadHeader
|
||||
if tt.preState.Version() == version.Bellatrix {
|
||||
payload, err = tt.preState.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
root := [32]byte{'a'}
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, root, root, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
postVersion, postHeader, err := getStateVersionAndPayload(tt.postState)
|
||||
require.NoError(t, err)
|
||||
isValidPayload, err := service.notifyNewPayload(ctx, tt.preState.Version(), postVersion, payload, postHeader, tt.blk)
|
||||
isValidPayload, err := service.notifyNewPayload(ctx, postVersion, postHeader, tt.blk)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
} else {
|
||||
@@ -431,11 +419,9 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
TotalDifficulty: "0x1",
|
||||
}
|
||||
service.cfg.ExecutionEngineCaller = e
|
||||
payload, err := bellatrixState.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
postVersion, postHeader, err := getStateVersionAndPayload(bellatrixState)
|
||||
require.NoError(t, err)
|
||||
validated, err := service.notifyNewPayload(ctx, bellatrixState.Version(), postVersion, payload, postHeader, bellatrixBlk)
|
||||
validated, err := service.notifyNewPayload(ctx, postVersion, postHeader, bellatrixBlk)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, validated)
|
||||
}
|
||||
@@ -138,6 +138,26 @@ var (
|
||||
Name: "state_balance_cache_miss",
|
||||
Help: "Count the number of state balance cache hits.",
|
||||
})
|
||||
newPayloadValidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "new_payload_valid_node_count",
|
||||
Help: "Count the number of valid nodes after newPayload EE call",
|
||||
})
|
||||
newPayloadOptimisticNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "new_payload_optimistic_node_count",
|
||||
Help: "Count the number of optimistic nodes after newPayload EE call",
|
||||
})
|
||||
newPayloadInvalidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "new_payload_invalid_node_count",
|
||||
Help: "Count the number of invalid nodes after newPayload EE call",
|
||||
})
|
||||
forkchoiceUpdatedValidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "forkchoice_updated_valid_node_count",
|
||||
Help: "Count the number of valid nodes after forkchoiceUpdated EE call",
|
||||
})
|
||||
forkchoiceUpdatedOptimisticNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "forkchoice_updated_optimistic_node_count",
|
||||
Help: "Count the number of optimistic nodes after forkchoiceUpdated EE call",
|
||||
})
|
||||
)
|
||||
|
||||
// reportSlotMetrics reports slot related metrics.
|
||||
|
||||
@@ -113,11 +113,15 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, preStateVersion, postStateVersion, preStateHeader, postStateHeader, signed)
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, postStateVersion, postStateHeader, signed)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not verify new payload")
|
||||
}
|
||||
if !isValidPayload {
|
||||
if isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, preStateVersion, preStateHeader, signed); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
candidate, err := s.optimisticCandidateBlock(ctx, b)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if block is optimistic candidate")
|
||||
@@ -127,8 +131,8 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
|
||||
return err
|
||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
|
||||
}
|
||||
if isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
|
||||
@@ -148,6 +152,9 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
|
||||
return err
|
||||
}
|
||||
// If slasher is configured, forward the attestations in the block via
|
||||
// an event feed for processing.
|
||||
if features.Get().EnableSlasher {
|
||||
@@ -403,17 +410,18 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
|
||||
|
||||
// blocks have been verified, add them to forkchoice and call the engine
|
||||
for i, b := range blks {
|
||||
s.saveInitSyncBlock(blockRoots[i], b)
|
||||
isValidPayload, err := s.notifyNewPayload(ctx,
|
||||
preVersionAndHeaders[i].version,
|
||||
postVersionAndHeaders[i].version,
|
||||
preVersionAndHeaders[i].header,
|
||||
postVersionAndHeaders[i].header, b)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if !isValidPayload {
|
||||
if isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
|
||||
preVersionAndHeaders[i].header, b); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
} else {
|
||||
candidate, err := s.optimisticCandidateBlock(ctx, b.Block())
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not check if block is optimistic candidate")
|
||||
@@ -431,10 +439,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
|
||||
return nil, nil, errors.Wrap(err, "could not set optimistic block to valid")
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, preState, b.Block(), blockRoots[i], bytesutil.ToBytes32(fCheckpoints[i].Root)); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
s.saveInitSyncBlock(blockRoots[i], b)
|
||||
}
|
||||
|
||||
for r, st := range boundaries {
|
||||
@@ -448,6 +453,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
|
||||
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
f := fCheckpoints[len(fCheckpoints)-1]
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, preState, lastB.Block(), lastBR, bytesutil.ToBytes32(f.Root)); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := s.saveHeadNoDB(ctx, lastB, lastBR, preState); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -598,20 +607,15 @@ func getBlockPayloadHash(blk block.BeaconBlock) ([32]byte, error) {
|
||||
|
||||
// This saves post state info to DB or cache. This also saves post state info to fork choice store.
|
||||
// Post state info consists of processed block and state. Do not call this method unless the block and state are verified.
|
||||
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.SignedBeaconBlock, st state.BeaconState, initSync bool) error {
|
||||
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.SignedBeaconBlock, st state.BeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.savePostStateInfo")
|
||||
defer span.End()
|
||||
if initSync {
|
||||
s.saveInitSyncBlock(r, b)
|
||||
} else if err := s.cfg.BeaconDB.SaveBlock(ctx, b); err != nil {
|
||||
if err := s.cfg.BeaconDB.SaveBlock(ctx, b); err != nil {
|
||||
return errors.Wrapf(err, "could not save block from slot %d", b.Block().Slot())
|
||||
}
|
||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block(), r, st); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Block().Slot())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -644,3 +648,36 @@ func (s *Service) pruneCanonicalAttsFromPool(ctx context.Context, r [32]byte, b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateMergeTransitionBlock validates the merge transition block.
|
||||
func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion int, stateHeader *ethpb.ExecutionPayloadHeader, blk block.SignedBeaconBlock) error {
|
||||
// Skip validation if block is older than Bellatrix.
|
||||
if blocks.IsPreBellatrixVersion(blk.Block().Version()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Skip validation if block has an empty payload.
|
||||
payload, err := blk.Block().Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if blocks.IsEmptyPayload(payload) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle case where pre-state is Altair but block contains payload.
|
||||
// To reach here, the block must have contained a valid payload.
|
||||
if blocks.IsPreBellatrixVersion(stateVersion) {
|
||||
return s.validateMergeBlock(ctx, blk)
|
||||
}
|
||||
|
||||
// Skip validation if the block is not a merge transition block.
|
||||
atTransition, err := blocks.IsMergeTransitionBlockUsingPreStatePayloadHeader(stateHeader, blk.Block().Body())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if merge block is terminal")
|
||||
}
|
||||
if !atTransition {
|
||||
return nil
|
||||
}
|
||||
return s.validateMergeBlock(ctx, blk)
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
mathutil "github.com/prysmaticlabs/prysm/math"
|
||||
"github.com/prysmaticlabs/prysm/monitoring/tracing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
@@ -69,7 +70,7 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b block.BeaconBlock) er
|
||||
return errors.New("could not reconstruct parent state")
|
||||
}
|
||||
|
||||
if err := s.VerifyBlkDescendant(ctx, bytesutil.ToBytes32(b.ParentRoot())); err != nil {
|
||||
if err := s.VerifyFinalizedBlkDescendant(ctx, bytesutil.ToBytes32(b.ParentRoot())); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -86,10 +87,10 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b block.BeaconBlock) er
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyBlkDescendant validates input block root is a descendant of the
|
||||
// VerifyFinalizedBlkDescendant validates if input block root is a descendant of the
|
||||
// current finalized block root.
|
||||
func (s *Service) VerifyBlkDescendant(ctx context.Context, root [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.VerifyBlkDescendant")
|
||||
func (s *Service) VerifyFinalizedBlkDescendant(ctx context.Context, root [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.VerifyFinalizedBlkDescendant")
|
||||
defer span.End()
|
||||
finalized := s.store.FinalizedCheckpt()
|
||||
if finalized == nil {
|
||||
@@ -402,10 +403,17 @@ func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) e
|
||||
// We update the cache up to the last deposit index in the finalized block's state.
|
||||
// We can be confident that these deposits will be included in some block
|
||||
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
|
||||
eth1DepositIndex := int64(finalizedState.Eth1Data().DepositCount - 1)
|
||||
s.cfg.DepositCache.InsertFinalizedDeposits(ctx, eth1DepositIndex)
|
||||
eth1DepositIndex, err := mathutil.Int(finalizedState.Eth1DepositIndex())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not cast eth1 deposit index")
|
||||
}
|
||||
// The deposit index in the state is always the index of the next deposit
|
||||
// to be included(rather than the last one to be processed). This was most likely
|
||||
// done as the state cannot represent signed integers.
|
||||
eth1DepositIndex -= 1
|
||||
s.cfg.DepositCache.InsertFinalizedDeposits(ctx, int64(eth1DepositIndex))
|
||||
// Deposit proofs are only used during state transition and can be safely removed to save space.
|
||||
if err = s.cfg.DepositCache.PruneProofs(ctx, eth1DepositIndex); err != nil {
|
||||
if err = s.cfg.DepositCache.PruneProofs(ctx, int64(eth1DepositIndex)); err != nil {
|
||||
return errors.Wrap(err, "could not prune deposit proofs")
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
@@ -19,6 +20,7 @@ import (
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
@@ -26,6 +28,7 @@ import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
@@ -1340,7 +1343,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: tt.args.finalizedRoot[:]})
|
||||
err = service.VerifyBlkDescendant(ctx, tt.args.parentRoot)
|
||||
err = service.VerifyFinalizedBlkDescendant(ctx, tt.args.parentRoot)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
} else if err != nil {
|
||||
@@ -1516,6 +1519,7 @@ func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
gs = gs.Copy()
|
||||
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 10}))
|
||||
assert.NoError(t, gs.SetEth1DepositIndex(8))
|
||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
|
||||
zeroSig := [96]byte{}
|
||||
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
|
||||
@@ -1529,8 +1533,64 @@ func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
}
|
||||
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
|
||||
fDeposits := depositCache.FinalizedDeposits(ctx)
|
||||
assert.Equal(t, 9, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
deps := depositCache.AllDeposits(ctx, big.NewInt(109))
|
||||
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
deps := depositCache.AllDeposits(ctx, big.NewInt(107))
|
||||
for _, d := range deps {
|
||||
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
opts := testServiceOptsWithDB(t)
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
opts = append(opts, WithDepositCache(depositCache))
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
gs, _ := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{Root: gRoot[:]})
|
||||
gs = gs.Copy()
|
||||
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 7}))
|
||||
assert.NoError(t, gs.SetEth1DepositIndex(6))
|
||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
|
||||
gs2 := gs.Copy()
|
||||
assert.NoError(t, gs2.SetEth1Data(ðpb.Eth1Data{DepositCount: 15}))
|
||||
assert.NoError(t, gs2.SetEth1DepositIndex(13))
|
||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k', '2'}, gs2))
|
||||
zeroSig := [96]byte{}
|
||||
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
|
||||
root := []byte(strconv.Itoa(int(i)))
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, ðpb.Deposit{Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.FromBytes48([fieldparams.BLSPubkeyLength]byte{}),
|
||||
WithdrawalCredentials: params.BeaconConfig().ZeroHash[:],
|
||||
Amount: 0,
|
||||
Signature: zeroSig[:],
|
||||
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
|
||||
}
|
||||
// Insert 3 deposits before hand.
|
||||
depositCache.InsertFinalizedDeposits(ctx, 2)
|
||||
|
||||
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
|
||||
fDeposits := depositCache.FinalizedDeposits(ctx)
|
||||
assert.Equal(t, 5, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
|
||||
deps := depositCache.AllDeposits(ctx, big.NewInt(105))
|
||||
for _, d := range deps {
|
||||
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
|
||||
}
|
||||
|
||||
// Insert New Finalized State with higher deposit count.
|
||||
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k', '2'}))
|
||||
fDeposits = depositCache.FinalizedDeposits(ctx)
|
||||
assert.Equal(t, 12, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
deps = depositCache.AllDeposits(ctx, big.NewInt(112))
|
||||
for _, d := range deps {
|
||||
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
|
||||
}
|
||||
@@ -1635,3 +1695,136 @@ func Test_getStateVersionAndPayload(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalTotalDifficulty = "2"
|
||||
cfg.TerminalBlockHash = params.BeaconConfig().ZeroHash
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := protoarray.New(0, 0, [32]byte{'a'})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
stateVersion int
|
||||
header *ethpb.ExecutionPayloadHeader
|
||||
payload *enginev1.ExecutionPayload
|
||||
errString string
|
||||
}{
|
||||
{
|
||||
name: "state older than Bellatrix, nil payload",
|
||||
stateVersion: 1,
|
||||
payload: nil,
|
||||
},
|
||||
{
|
||||
name: "state older than Bellatrix, empty payload",
|
||||
stateVersion: 1,
|
||||
payload: &enginev1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "state older than Bellatrix, non empty payload",
|
||||
stateVersion: 1,
|
||||
payload: &enginev1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "state is Bellatrix, nil payload",
|
||||
stateVersion: 2,
|
||||
payload: nil,
|
||||
},
|
||||
{
|
||||
name: "state is Bellatrix, empty payload",
|
||||
stateVersion: 2,
|
||||
payload: &enginev1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "state is Bellatrix, non empty payload, empty header",
|
||||
stateVersion: 2,
|
||||
payload: &enginev1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
header: ðpb.ExecutionPayloadHeader{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "state is Bellatrix, non empty payload, non empty header",
|
||||
stateVersion: 2,
|
||||
payload: &enginev1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
header: ðpb.ExecutionPayloadHeader{
|
||||
BlockNumber: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "state is Bellatrix, non empty payload, nil header",
|
||||
stateVersion: 2,
|
||||
payload: &enginev1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
errString: "nil header or block body",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
e := &mockPOW.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
|
||||
e.BlockByHashMap[[32]byte{'a'}] = &enginev1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
|
||||
TotalDifficulty: "0x2",
|
||||
}
|
||||
e.BlockByHashMap[[32]byte{'b'}] = &enginev1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
|
||||
TotalDifficulty: "0x1",
|
||||
}
|
||||
service.cfg.ExecutionEngineCaller = e
|
||||
b := util.HydrateSignedBeaconBlockBellatrix(ðpb.SignedBeaconBlockBellatrix{})
|
||||
b.Block.Body.ExecutionPayload = tt.payload
|
||||
blk, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
err = service.validateMergeTransitionBlock(ctx, tt.stateVersion, tt.header, blk)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,27 +28,10 @@ type AttestationStateFetcher interface {
|
||||
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
|
||||
type AttestationReceiver interface {
|
||||
AttestationStateFetcher
|
||||
ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation) error
|
||||
VerifyLmdFfgConsistency(ctx context.Context, att *ethpb.Attestation) error
|
||||
VerifyFinalizedConsistency(ctx context.Context, root []byte) error
|
||||
}
|
||||
|
||||
// ReceiveAttestationNoPubsub is a function that defines the operations that are performed on
|
||||
// attestation that is received from regular sync. The operations consist of:
|
||||
// 1. Validate attestation, update validator's latest vote
|
||||
// 2. Apply fork choice to the processed attestation
|
||||
// 3. Save latest head info
|
||||
func (s *Service) ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveAttestationNoPubsub")
|
||||
defer span.End()
|
||||
|
||||
if err := s.OnAttestation(ctx, att); err != nil {
|
||||
return errors.Wrap(err, "could not process attestation")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AttestationTargetState returns the pre state of attestation.
|
||||
func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Checkpoint) (state.BeaconState, error) {
|
||||
ss, err := slots.EpochStart(target.Epoch)
|
||||
@@ -83,7 +66,8 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestat
|
||||
func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) error {
|
||||
// A canonical root implies the root to has an ancestor that aligns with finalized check point.
|
||||
// In this case, we could exit early to save on additional computation.
|
||||
if s.cfg.ForkChoiceStore.IsCanonical(bytesutil.ToBytes32(root)) {
|
||||
blockRoot := bytesutil.ToBytes32(root)
|
||||
if s.cfg.ForkChoiceStore.HasNode(blockRoot) && s.cfg.ForkChoiceStore.IsCanonical(blockRoot) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -186,11 +170,23 @@ func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32
|
||||
log.WithError(err).Error("Could not get block from db")
|
||||
return
|
||||
}
|
||||
if newHeadBlock == nil || newHeadBlock.IsNil() {
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", newHeadRoot),
|
||||
}).Error("Block with root not found in database when notifying engine of head change")
|
||||
return
|
||||
}
|
||||
headState, err := s.cfg.StateGen.StateByRoot(ctx, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get state from db")
|
||||
return
|
||||
}
|
||||
if headState == nil || headState.IsNil() {
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", newHeadRoot),
|
||||
}).Error("State for block with root not found in database when notifying engine of head change")
|
||||
return
|
||||
}
|
||||
_, err = s.notifyForkchoiceUpdate(s.ctx,
|
||||
headState,
|
||||
newHeadBlock.Block(),
|
||||
@@ -231,7 +227,7 @@ func (s *Service) processAttestations(ctx context.Context) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.ReceiveAttestationNoPubsub(ctx, a); err != nil {
|
||||
if err := s.receiveAttestationNoPubsub(ctx, a); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": a.Data.Slot,
|
||||
"committeeIndex": a.Data.CommitteeIndex,
|
||||
@@ -242,3 +238,19 @@ func (s *Service) processAttestations(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// receiveAttestationNoPubsub is a function that defines the operations that are performed on
|
||||
// attestation that is received from regular sync. The operations consist of:
|
||||
// 1. Validate attestation, update validator's latest vote
|
||||
// 2. Apply fork choice to the processed attestation
|
||||
// 3. Save latest head info
|
||||
func (s *Service) receiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.receiveAttestationNoPubsub")
|
||||
defer span.End()
|
||||
|
||||
if err := s.OnAttestation(ctx, att); err != nil {
|
||||
return errors.Wrap(err, "could not process attestation")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
// This defines how many epochs since finality the run time will begin to save hot state on to the DB.
|
||||
var epochsSinceFinalitySaveHotStateDB = types.Epoch(100)
|
||||
|
||||
// BlockReceiver interface defines the methods of chain service receive and processing new blocks.
|
||||
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block block.SignedBeaconBlock, blockRoot [32]byte) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []block.SignedBeaconBlock, blkRoots [][32]byte) error
|
||||
@@ -78,7 +78,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []block.SignedBe
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
// Apply state transition on the incoming newly received blockCopy without verifying its BLS contents.
|
||||
// Apply state transition on the incoming newly received block batches, one by one.
|
||||
fCheckpoints, jCheckpoints, err := s.onBlockBatch(ctx, blocks, blkRoots)
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not process block in batch")
|
||||
|
||||
@@ -50,14 +50,13 @@ const headSyncMinEpochsAfterCheckpoint = 128
|
||||
// Service represents a service that handles the internal
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type Service struct {
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
genesisTime time.Time
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
// originBlockRoot is the genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
|
||||
originBlockRoot [32]byte
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
genesisTime time.Time
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
|
||||
nextEpochBoundarySlot types.Slot
|
||||
boundaryRoots [][32]byte
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
@@ -160,11 +159,12 @@ func (s *Service) Status() error {
|
||||
return errors.New("genesis state has not been created")
|
||||
}
|
||||
if runtime.NumGoroutine() > s.cfg.MaxRoutines {
|
||||
return fmt.Errorf("too many goroutines %d", runtime.NumGoroutine())
|
||||
return fmt.Errorf("too many goroutines (%d)", runtime.NumGoroutine())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartFromSavedState initializes the blockchain using a previously saved finalized checkpoint.
|
||||
func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
log.Info("Blockchain data already exists in DB, initializing...")
|
||||
s.genesisTime = time.Unix(int64(saved.GenesisTime()), 0) // lint:ignore uintcast -- Genesis time will not exceed int64 in your lifetime.
|
||||
@@ -191,14 +191,14 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
}
|
||||
s.store = store.New(justified, finalized)
|
||||
|
||||
var f f.ForkChoicer
|
||||
var forkChoicer f.ForkChoicer
|
||||
fRoot := bytesutil.ToBytes32(finalized.Root)
|
||||
if features.Get().EnableForkChoiceDoublyLinkedTree {
|
||||
f = doublylinkedtree.New(justified.Epoch, finalized.Epoch)
|
||||
forkChoicer = doublylinkedtree.New(justified.Epoch, finalized.Epoch)
|
||||
} else {
|
||||
f = protoarray.New(justified.Epoch, finalized.Epoch, fRoot)
|
||||
forkChoicer = protoarray.New(justified.Epoch, finalized.Epoch, fRoot)
|
||||
}
|
||||
s.cfg.ForkChoiceStore = f
|
||||
s.cfg.ForkChoiceStore = forkChoicer
|
||||
fb, err := s.cfg.BeaconDB.Block(s.ctx, s.ensureRootNotZeros(fRoot))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint block")
|
||||
@@ -211,7 +211,7 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
return errors.Wrap(err, "could not get execution payload hash")
|
||||
}
|
||||
fSlot := fb.Block().Slot()
|
||||
if err := f.InsertOptimisticBlock(s.ctx, fSlot, fRoot, params.BeaconConfig().ZeroHash,
|
||||
if err := forkChoicer.InsertOptimisticBlock(s.ctx, fSlot, fRoot, params.BeaconConfig().ZeroHash,
|
||||
payloadHash, justified.Epoch, finalized.Epoch); err != nil {
|
||||
return errors.Wrap(err, "could not insert finalized block to forkchoice")
|
||||
}
|
||||
@@ -221,7 +221,7 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
return errors.Wrap(err, "could not get last validated checkpoint")
|
||||
}
|
||||
if bytes.Equal(finalized.Root, lastValidatedCheckpoint.Root) {
|
||||
if err := f.SetOptimisticToValid(s.ctx, fRoot); err != nil {
|
||||
if err := forkChoicer.SetOptimisticToValid(s.ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not set finalized block as validated")
|
||||
}
|
||||
}
|
||||
@@ -281,9 +281,9 @@ func (s *Service) originRootFromSavedState(ctx context.Context) ([32]byte, error
|
||||
return genesisBlkRoot, nil
|
||||
}
|
||||
|
||||
// initializeHeadFromDB uses the finalized checkpoint and head block found in the database to set the current head
|
||||
// note that this may block until stategen replays blocks between the finalized and head blocks
|
||||
// if the head sync flag was specified and the gap between the finalized and head blocks is at least 128 epochs long
|
||||
// initializeHeadFromDB uses the finalized checkpoint and head block found in the database to set the current head.
|
||||
// Note that this may block until stategen replays blocks between the finalized and head blocks
|
||||
// if the head sync flag was specified and the gap between the finalized and head blocks is at least 128 epochs long.
|
||||
func (s *Service) initializeHeadFromDB(ctx context.Context) error {
|
||||
finalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
|
||||
@@ -2,7 +2,7 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
|
||||
func init() {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
logrus.SetOutput(io.Discard)
|
||||
}
|
||||
|
||||
func TestChainService_SaveHead_DataRace(t *testing.T) {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// New creates a store object.
|
||||
func New(justifiedCheckpt *ethpb.Checkpoint, finalizedCheckpt *ethpb.Checkpoint) *Store {
|
||||
return &Store{
|
||||
justifiedCheckpt: justifiedCheckpt,
|
||||
|
||||
@@ -301,11 +301,6 @@ func (_ *ChainService) ReceiveAttestation(_ context.Context, _ *ethpb.Attestatio
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveAttestationNoPubsub mocks ReceiveAttestationNoPubsub method in chain service.
|
||||
func (_ *ChainService) ReceiveAttestationNoPubsub(context.Context, *ethpb.Attestation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AttestationTargetState mocks AttestationTargetState method in chain service.
|
||||
func (s *ChainService) AttestationTargetState(_ context.Context, _ *ethpb.Checkpoint) (state.BeaconState, error) {
|
||||
return s.State, nil
|
||||
@@ -319,11 +314,6 @@ func (s *ChainService) HeadValidatorsIndices(ctx context.Context, epoch types.Ep
|
||||
return helpers.ActiveValidatorIndices(ctx, s.State, epoch)
|
||||
}
|
||||
|
||||
// HeadSeed mocks the same method in the chain service.
|
||||
func (s *ChainService) HeadSeed(_ context.Context, epoch types.Epoch) ([32]byte, error) {
|
||||
return helpers.Seed(s.State, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
}
|
||||
|
||||
// HeadETH1Data provides the current ETH1Data of the head state.
|
||||
func (s *ChainService) HeadETH1Data() *ethpb.Eth1Data {
|
||||
return s.ETH1Data
|
||||
@@ -381,7 +371,7 @@ func (_ *ChainService) HeadGenesisValidatorsRoot() [32]byte {
|
||||
}
|
||||
|
||||
// VerifyBlkDescendant mocks VerifyBlkDescendant and always returns nil.
|
||||
func (s *ChainService) VerifyBlkDescendant(_ context.Context, _ [32]byte) error {
|
||||
func (s *ChainService) VerifyFinalizedBlkDescendant(_ context.Context, _ [32]byte) error {
|
||||
return s.VerifyBlkDescendantErr
|
||||
}
|
||||
|
||||
|
||||
@@ -32,9 +32,6 @@ type WeakSubjectivityVerifier struct {
|
||||
|
||||
// NewWeakSubjectivityVerifier validates a checkpoint, and if valid, uses it to initialize a weak subjectivity verifier
|
||||
func NewWeakSubjectivityVerifier(wsc *ethpb.Checkpoint, db weakSubjectivityDB) (*WeakSubjectivityVerifier, error) {
|
||||
// TODO(7342): Weak subjectivity checks are currently optional. When we require the flag to be specified
|
||||
// per 7342, a nil checkpoint, zero-root or zero-epoch should all fail validation
|
||||
// and return an error instead of creating a WeakSubjectivityVerifier that permits any chain history.
|
||||
if wsc == nil || len(wsc.Root) == 0 || wsc.Epoch == 0 {
|
||||
log.Warn("No valid weak subjectivity checkpoint specified, running without weak subjectivity verification")
|
||||
return &WeakSubjectivityVerifier{
|
||||
|
||||
2
beacon-chain/cache/BUILD.bazel
vendored
2
beacon-chain/cache/BUILD.bazel
vendored
@@ -33,7 +33,6 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//cache/lru:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
@@ -77,7 +76,6 @@ go_test(
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//beacon-chain/state/v2:go_default_library",
|
||||
"//beacon-chain/state/v3:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
|
||||
7
beacon-chain/cache/active_balance.go
vendored
7
beacon-chain/cache/active_balance.go
vendored
@@ -14,7 +14,6 @@ import (
|
||||
ethTypes "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
lruwrpr "github.com/prysmaticlabs/prysm/cache/lru"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
)
|
||||
|
||||
@@ -51,9 +50,6 @@ func NewEffectiveBalanceCache() *BalanceCache {
|
||||
|
||||
// AddTotalEffectiveBalance adds a new total effective balance entry for current balance for state `st` into the cache.
|
||||
func (c *BalanceCache) AddTotalEffectiveBalance(st state.ReadOnlyBeaconState, balance uint64) error {
|
||||
if !features.Get().EnableActiveBalanceCache {
|
||||
return nil
|
||||
}
|
||||
key, err := balanceCacheKey(st)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -68,9 +64,6 @@ func (c *BalanceCache) AddTotalEffectiveBalance(st state.ReadOnlyBeaconState, ba
|
||||
|
||||
// Get returns the current epoch's effective balance for state `st` in cache.
|
||||
func (c *BalanceCache) Get(st state.ReadOnlyBeaconState) (uint64, error) {
|
||||
if !features.Get().EnableActiveBalanceCache {
|
||||
return 0, ErrNotFound
|
||||
}
|
||||
key, err := balanceCacheKey(st)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
||||
6
beacon-chain/cache/active_balance_test.go
vendored
6
beacon-chain/cache/active_balance_test.go
vendored
@@ -7,18 +7,12 @@ import (
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
state "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestBalanceCache_AddGetBalance(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableActiveBalanceCache: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
b := make([]byte, 8)
|
||||
|
||||
@@ -36,7 +36,7 @@ type DepositFetcher interface {
|
||||
DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int)
|
||||
DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte)
|
||||
FinalizedDeposits(ctx context.Context) *FinalizedDeposits
|
||||
NonFinalizedDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit
|
||||
NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit
|
||||
}
|
||||
|
||||
// FinalizedDeposits stores the trie of deposits that have been included
|
||||
@@ -137,6 +137,22 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
|
||||
|
||||
depositTrie := dc.finalizedDeposits.Deposits
|
||||
insertIndex := int(dc.finalizedDeposits.MerkleTrieIndex + 1)
|
||||
|
||||
// Don't insert into finalized trie if there is no deposit to
|
||||
// insert.
|
||||
if len(dc.deposits) == 0 {
|
||||
return
|
||||
}
|
||||
// In the event we have less deposits than we need to
|
||||
// finalize we finalize till the index on which we do have it.
|
||||
if len(dc.deposits) <= int(eth1DepositIndex) {
|
||||
eth1DepositIndex = int64(len(dc.deposits)) - 1
|
||||
}
|
||||
// If we finalize to some lower deposit index, we
|
||||
// ignore it.
|
||||
if int(eth1DepositIndex) < insertIndex {
|
||||
return
|
||||
}
|
||||
for _, d := range dc.deposits {
|
||||
if d.Index <= dc.finalizedDeposits.MerkleTrieIndex {
|
||||
continue
|
||||
@@ -246,7 +262,7 @@ func (dc *DepositCache) FinalizedDeposits(ctx context.Context) *FinalizedDeposit
|
||||
|
||||
// NonFinalizedDeposits returns the list of non-finalized deposits until the given block number (inclusive).
|
||||
// If no block is specified then this method returns all non-finalized deposits.
|
||||
func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.NonFinalizedDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
@@ -256,10 +272,9 @@ func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, untilBlk *big.
|
||||
return dc.allDeposits(untilBlk)
|
||||
}
|
||||
|
||||
lastFinalizedDepositIndex := dc.finalizedDeposits.MerkleTrieIndex
|
||||
var deposits []*ethpb.Deposit
|
||||
for _, d := range dc.deposits {
|
||||
if (d.Index > lastFinalizedDepositIndex) && (untilBlk == nil || untilBlk.Uint64() >= d.Eth1BlockHeight) {
|
||||
if (d.Index > lastFinalizedIndex) && (untilBlk == nil || untilBlk.Uint64() >= d.Eth1BlockHeight) {
|
||||
deposits = append(deposits, d.Deposit)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -459,7 +459,7 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
|
||||
Index: 1,
|
||||
},
|
||||
}
|
||||
newFinalizedDeposit := ethpb.DepositContainer{
|
||||
newFinalizedDeposit := ðpb.DepositContainer{
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{2}, 48),
|
||||
@@ -471,17 +471,17 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
|
||||
}
|
||||
dc.deposits = oldFinalizedDeposits
|
||||
dc.InsertFinalizedDeposits(context.Background(), 1)
|
||||
// Artificially exclude old deposits so that they can only be retrieved from previously finalized deposits.
|
||||
dc.deposits = []*ethpb.DepositContainer{&newFinalizedDeposit}
|
||||
|
||||
dc.InsertFinalizedDeposits(context.Background(), 2)
|
||||
|
||||
dc.deposits = append(dc.deposits, []*ethpb.DepositContainer{newFinalizedDeposit}...)
|
||||
|
||||
cachedDeposits := dc.FinalizedDeposits(context.Background())
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
assert.Equal(t, int64(2), cachedDeposits.MerkleTrieIndex)
|
||||
assert.Equal(t, int64(1), cachedDeposits.MerkleTrieIndex)
|
||||
|
||||
var deps [][]byte
|
||||
for _, d := range append(oldFinalizedDeposits, &newFinalizedDeposit) {
|
||||
for _, d := range oldFinalizedDeposits {
|
||||
hash, err := d.Deposit.Data.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not hash deposit data")
|
||||
deps = append(deps, hash[:])
|
||||
@@ -491,6 +491,140 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
|
||||
assert.Equal(t, trie.HashTreeRoot(), cachedDeposits.Deposits.HashTreeRoot())
|
||||
}
|
||||
|
||||
func TestFinalizedDeposits_HandleZeroDeposits(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
dc.InsertFinalizedDeposits(context.Background(), 2)
|
||||
|
||||
cachedDeposits := dc.FinalizedDeposits(context.Background())
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
assert.Equal(t, int64(-1), cachedDeposits.MerkleTrieIndex)
|
||||
}
|
||||
|
||||
func TestFinalizedDeposits_HandleSmallerThanExpectedDeposits(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
finalizedDeposits := []*ethpb.DepositContainer{
|
||||
{
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{0}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
Index: 0,
|
||||
},
|
||||
{
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{1}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
Index: 1,
|
||||
},
|
||||
{
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{2}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
Index: 2,
|
||||
},
|
||||
}
|
||||
dc.deposits = finalizedDeposits
|
||||
|
||||
dc.InsertFinalizedDeposits(context.Background(), 5)
|
||||
|
||||
cachedDeposits := dc.FinalizedDeposits(context.Background())
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
assert.Equal(t, int64(2), cachedDeposits.MerkleTrieIndex)
|
||||
}
|
||||
|
||||
func TestFinalizedDeposits_HandleLowerEth1DepositIndex(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
finalizedDeposits := []*ethpb.DepositContainer{
|
||||
{
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{0}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
Index: 0,
|
||||
},
|
||||
{
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{1}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
Index: 1,
|
||||
},
|
||||
{
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{2}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
Index: 2,
|
||||
},
|
||||
{
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{3}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
Index: 3,
|
||||
},
|
||||
{
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{4}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
Index: 4,
|
||||
},
|
||||
{
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{5}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
Index: 5,
|
||||
},
|
||||
}
|
||||
dc.deposits = finalizedDeposits
|
||||
|
||||
dc.InsertFinalizedDeposits(context.Background(), 5)
|
||||
|
||||
// Reinsert finalized deposits with a lower index.
|
||||
dc.InsertFinalizedDeposits(context.Background(), 2)
|
||||
|
||||
cachedDeposits := dc.FinalizedDeposits(context.Background())
|
||||
require.NotNil(t, cachedDeposits, "Deposits not cached")
|
||||
assert.Equal(t, int64(5), cachedDeposits.MerkleTrieIndex)
|
||||
}
|
||||
|
||||
func TestFinalizedDeposits_InitializedCorrectly(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
@@ -554,7 +688,7 @@ func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits(t *testing.T) {
|
||||
})
|
||||
dc.InsertFinalizedDeposits(context.Background(), 1)
|
||||
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), nil)
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), 1, nil)
|
||||
assert.Equal(t, 2, len(deps))
|
||||
}
|
||||
|
||||
@@ -611,10 +745,89 @@ func TestNonFinalizedDeposits_ReturnsNonFinalizedDepositsUpToBlockNumber(t *test
|
||||
})
|
||||
dc.InsertFinalizedDeposits(context.Background(), 1)
|
||||
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), big.NewInt(10))
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), 1, big.NewInt(10))
|
||||
assert.Equal(t, 1, len(deps))
|
||||
}
|
||||
|
||||
func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
generateCtr := func(height uint64, index int64) *ethpb.DepositContainer {
|
||||
return ðpb.DepositContainer{
|
||||
Eth1BlockHeight: height,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{uint8(index)}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
Index: index,
|
||||
}
|
||||
}
|
||||
|
||||
finalizedDeposits := []*ethpb.DepositContainer{
|
||||
generateCtr(10, 0),
|
||||
generateCtr(11, 1),
|
||||
generateCtr(12, 2),
|
||||
generateCtr(12, 3),
|
||||
generateCtr(13, 4),
|
||||
generateCtr(13, 5),
|
||||
generateCtr(13, 6),
|
||||
generateCtr(14, 7),
|
||||
}
|
||||
dc.deposits = append(finalizedDeposits,
|
||||
generateCtr(15, 8),
|
||||
generateCtr(15, 9),
|
||||
generateCtr(30, 10))
|
||||
trieItems := make([][]byte, 0, len(dc.deposits))
|
||||
for _, dep := range dc.allDeposits(big.NewInt(30)) {
|
||||
depHash, err := dep.Data.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
trieItems = append(trieItems, depHash[:])
|
||||
}
|
||||
depositTrie, err := trie.GenerateTrieFromItems(trieItems, params.BeaconConfig().DepositContractTreeDepth)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Perform this in a non-sensical ordering
|
||||
dc.InsertFinalizedDeposits(context.Background(), 10)
|
||||
dc.InsertFinalizedDeposits(context.Background(), 2)
|
||||
dc.InsertFinalizedDeposits(context.Background(), 3)
|
||||
dc.InsertFinalizedDeposits(context.Background(), 4)
|
||||
|
||||
// Mimick finalized deposit trie fetch.
|
||||
fd := dc.FinalizedDeposits(context.Background())
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex, big.NewInt(14))
|
||||
insertIndex := fd.MerkleTrieIndex + 1
|
||||
|
||||
for _, dep := range deps {
|
||||
depHash, err := dep.Data.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
if err = fd.Deposits.Insert(depHash[:], int(insertIndex)); err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
insertIndex++
|
||||
}
|
||||
dc.InsertFinalizedDeposits(context.Background(), 15)
|
||||
dc.InsertFinalizedDeposits(context.Background(), 15)
|
||||
dc.InsertFinalizedDeposits(context.Background(), 14)
|
||||
|
||||
fd = dc.FinalizedDeposits(context.Background())
|
||||
deps = dc.NonFinalizedDeposits(context.Background(), fd.MerkleTrieIndex, big.NewInt(30))
|
||||
insertIndex = fd.MerkleTrieIndex + 1
|
||||
|
||||
for _, dep := range deps {
|
||||
depHash, err := dep.Data.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
if err = fd.Deposits.Insert(depHash[:], int(insertIndex)); err != nil {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
insertIndex++
|
||||
}
|
||||
assert.Equal(t, fd.Deposits.NumOfItems(), depositTrie.NumOfItems())
|
||||
}
|
||||
|
||||
func TestPruneProofs_Ok(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -2,7 +2,7 @@ package blocks_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
// valid att.Data.Committee index would be 0, so this is an off by one error.
|
||||
// See: https://github.com/sigp/beacon-fuzz/issues/78
|
||||
func TestProcessAttestationNoVerifySignature_BeaconFuzzIssue78(t *testing.T) {
|
||||
attData, err := ioutil.ReadFile("testdata/beaconfuzz_78_attestation.ssz")
|
||||
attData, err := os.ReadFile("testdata/beaconfuzz_78_attestation.ssz")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -28,7 +28,7 @@ func TestProcessAttestationNoVerifySignature_BeaconFuzzIssue78(t *testing.T) {
|
||||
if err := att.UnmarshalSSZ(attData); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
stateData, err := ioutil.ReadFile("testdata/beaconfuzz_78_beacon.ssz")
|
||||
stateData, err := os.ReadFile("testdata/beaconfuzz_78_beacon.ssz")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ package blocks_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
logrus.SetOutput(ioutil.Discard) // Ignore "validator activated" logs
|
||||
logrus.SetOutput(io.Discard) // Ignore "validator activated" logs
|
||||
}
|
||||
|
||||
func TestProcessBlockHeader_ImproperBlockSlot(t *testing.T) {
|
||||
|
||||
@@ -18,6 +18,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrInvalidPayloadBlockHash = errors.New("invalid payload block hash")
|
||||
ErrInvalidPayloadTimeStamp = errors.New("invalid payload timestamp")
|
||||
ErrInvalidPayloadPrevRandao = errors.New("invalid payload previous randao")
|
||||
)
|
||||
|
||||
// IsMergeTransitionComplete returns true if the transition to Bellatrix has completed.
|
||||
// Meaning the payload header in beacon state is not `ExecutionPayloadHeader()` (i.e. not empty).
|
||||
//
|
||||
@@ -68,7 +74,7 @@ func IsExecutionBlock(body block.BeaconBlockBody) (bool, error) {
|
||||
return false, err
|
||||
default:
|
||||
}
|
||||
return !isEmptyPayload(payload), nil
|
||||
return !IsEmptyPayload(payload), nil
|
||||
}
|
||||
|
||||
// IsExecutionEnabled returns true if the beacon chain can begin executing.
|
||||
@@ -146,14 +152,14 @@ func ValidatePayload(st state.BeaconState, payload *enginev1.ExecutionPayload) e
|
||||
}
|
||||
|
||||
if !bytes.Equal(payload.PrevRandao, random) {
|
||||
return errors.New("incorrect prev randao")
|
||||
return ErrInvalidPayloadPrevRandao
|
||||
}
|
||||
t, err := slots.ToTime(st.GenesisTime(), st.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload.Timestamp != uint64(t.Unix()) {
|
||||
return errors.New("incorrect timestamp")
|
||||
return ErrInvalidPayloadTimeStamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -220,7 +226,7 @@ func PayloadToHeader(payload *enginev1.ExecutionPayload) (*ethpb.ExecutionPayloa
|
||||
ParentHash: bytesutil.SafeCopyBytes(payload.ParentHash),
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(payload.FeeRecipient),
|
||||
StateRoot: bytesutil.SafeCopyBytes(payload.StateRoot),
|
||||
ReceiptRoot: bytesutil.SafeCopyBytes(payload.ReceiptsRoot),
|
||||
ReceiptsRoot: bytesutil.SafeCopyBytes(payload.ReceiptsRoot),
|
||||
LogsBloom: bytesutil.SafeCopyBytes(payload.LogsBloom),
|
||||
PrevRandao: bytesutil.SafeCopyBytes(payload.PrevRandao),
|
||||
BlockNumber: payload.BlockNumber,
|
||||
@@ -234,7 +240,7 @@ func PayloadToHeader(payload *enginev1.ExecutionPayload) (*ethpb.ExecutionPayloa
|
||||
}, nil
|
||||
}
|
||||
|
||||
func isEmptyPayload(p *enginev1.ExecutionPayload) bool {
|
||||
func IsEmptyPayload(p *enginev1.ExecutionPayload) bool {
|
||||
if p == nil {
|
||||
return true
|
||||
}
|
||||
@@ -293,7 +299,7 @@ func isEmptyHeader(h *ethpb.ExecutionPayloadHeader) bool {
|
||||
if !bytes.Equal(h.StateRoot, make([]byte, fieldparams.RootLength)) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(h.ReceiptRoot, make([]byte, fieldparams.RootLength)) {
|
||||
if !bytes.Equal(h.ReceiptsRoot, make([]byte, fieldparams.RootLength)) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(h.LogsBloom, make([]byte, fieldparams.LogsBloomLength)) {
|
||||
@@ -328,3 +334,62 @@ func isEmptyHeader(h *ethpb.ExecutionPayloadHeader) bool {
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ValidatePayloadHeaderWhenMergeCompletes validates the payload header when the merge completes.
|
||||
func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header *ethpb.ExecutionPayloadHeader) error {
|
||||
// Skip validation if the state is not merge compatible.
|
||||
complete, err := IsMergeTransitionComplete(st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !complete {
|
||||
return nil
|
||||
}
|
||||
// Validate current header's parent hash matches state header's block hash.
|
||||
h, err := st.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(header.ParentHash, h.BlockHash) {
|
||||
return ErrInvalidPayloadBlockHash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidatePayloadHeader validates the payload header.
|
||||
func ValidatePayloadHeader(st state.BeaconState, header *ethpb.ExecutionPayloadHeader) error {
|
||||
// Validate header's random mix matches with state in current epoch
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(header.PrevRandao, random) {
|
||||
return ErrInvalidPayloadPrevRandao
|
||||
}
|
||||
|
||||
// Validate header's timestamp matches with state in current slot.
|
||||
t, err := slots.ToTime(st.GenesisTime(), st.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if header.Timestamp != uint64(t.Unix()) {
|
||||
return ErrInvalidPayloadTimeStamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessPayloadHeader processes the payload header.
|
||||
func ProcessPayloadHeader(st state.BeaconState, header *ethpb.ExecutionPayloadHeader) (state.BeaconState, error) {
|
||||
if err := ValidatePayloadHeaderWhenMergeCompletes(st, header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := ValidatePayloadHeader(st, header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := st.SetLatestExecutionPayloadHeader(header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
@@ -60,7 +61,7 @@ func Test_IsMergeComplete(t *testing.T) {
|
||||
name: "has receipt root",
|
||||
payload: func() *ethpb.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.ReceiptRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
h.ReceiptsRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return h
|
||||
}(),
|
||||
want: true,
|
||||
@@ -600,7 +601,7 @@ func Test_ValidatePayload(t *testing.T) {
|
||||
{
|
||||
name: "incorrect prev randao",
|
||||
payload: emptyPayload(),
|
||||
err: errors.New("incorrect prev randao"),
|
||||
err: blocks.ErrInvalidPayloadPrevRandao,
|
||||
},
|
||||
{
|
||||
name: "incorrect timestamp",
|
||||
@@ -610,7 +611,7 @@ func Test_ValidatePayload(t *testing.T) {
|
||||
h.Timestamp = 1
|
||||
return h
|
||||
}(),
|
||||
err: errors.New("incorrect timestamp"),
|
||||
err: blocks.ErrInvalidPayloadTimeStamp,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
@@ -648,7 +649,7 @@ func Test_ProcessPayload(t *testing.T) {
|
||||
{
|
||||
name: "incorrect prev randao",
|
||||
payload: emptyPayload(),
|
||||
err: errors.New("incorrect prev randao"),
|
||||
err: blocks.ErrInvalidPayloadPrevRandao,
|
||||
},
|
||||
{
|
||||
name: "incorrect timestamp",
|
||||
@@ -658,7 +659,7 @@ func Test_ProcessPayload(t *testing.T) {
|
||||
h.Timestamp = 1
|
||||
return h
|
||||
}(),
|
||||
err: errors.New("incorrect timestamp"),
|
||||
err: blocks.ErrInvalidPayloadTimeStamp,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
@@ -678,6 +679,149 @@ func Test_ProcessPayload(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ProcessPayloadHeader(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
require.NoError(t, err)
|
||||
ts, err := slots.ToTime(st.GenesisTime(), st.Slot())
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
name string
|
||||
header *ethpb.ExecutionPayloadHeader
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "process passes",
|
||||
header: func() *ethpb.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.PrevRandao = random
|
||||
h.Timestamp = uint64(ts.Unix())
|
||||
return h
|
||||
}(), err: nil,
|
||||
},
|
||||
{
|
||||
name: "incorrect prev randao",
|
||||
header: emptyPayloadHeader(),
|
||||
err: blocks.ErrInvalidPayloadPrevRandao,
|
||||
},
|
||||
{
|
||||
name: "incorrect timestamp",
|
||||
header: func() *ethpb.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.PrevRandao = random
|
||||
h.Timestamp = 1
|
||||
return h
|
||||
}(),
|
||||
err: blocks.ErrInvalidPayloadTimeStamp,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
st, err := blocks.ProcessPayloadHeader(st, tt.header)
|
||||
if err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
require.Equal(t, tt.err, err)
|
||||
got, err := st.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, tt.header, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ValidatePayloadHeader(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
require.NoError(t, err)
|
||||
ts, err := slots.ToTime(st.GenesisTime(), st.Slot())
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
name string
|
||||
header *ethpb.ExecutionPayloadHeader
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "process passes",
|
||||
header: func() *ethpb.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.PrevRandao = random
|
||||
h.Timestamp = uint64(ts.Unix())
|
||||
return h
|
||||
}(), err: nil,
|
||||
},
|
||||
{
|
||||
name: "incorrect prev randao",
|
||||
header: emptyPayloadHeader(),
|
||||
err: blocks.ErrInvalidPayloadPrevRandao,
|
||||
},
|
||||
{
|
||||
name: "incorrect timestamp",
|
||||
header: func() *ethpb.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.PrevRandao = random
|
||||
h.Timestamp = 1
|
||||
return h
|
||||
}(),
|
||||
err: blocks.ErrInvalidPayloadTimeStamp,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := blocks.ValidatePayloadHeader(st, tt.header)
|
||||
require.Equal(t, tt.err, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
emptySt := st.Copy()
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(ðpb.ExecutionPayloadHeader{BlockHash: []byte{'a'}}))
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
header *ethpb.ExecutionPayloadHeader
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "no merge",
|
||||
header: func() *ethpb.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
return h
|
||||
}(),
|
||||
state: emptySt,
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
name: "process passes",
|
||||
header: func() *ethpb.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.ParentHash = []byte{'a'}
|
||||
return h
|
||||
}(),
|
||||
state: st,
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
name: "invalid block hash",
|
||||
header: func() *ethpb.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.ParentHash = []byte{'b'}
|
||||
return h
|
||||
}(),
|
||||
state: st,
|
||||
err: blocks.ErrInvalidPayloadBlockHash,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := blocks.ValidatePayloadHeaderWhenMergeCompletes(tt.state, tt.header)
|
||||
require.Equal(t, tt.err, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_PayloadToHeader(t *testing.T) {
|
||||
p := emptyPayload()
|
||||
h, err := blocks.PayloadToHeader(p)
|
||||
@@ -705,7 +849,7 @@ func Test_PayloadToHeader(t *testing.T) {
|
||||
require.DeepSSZEqual(t, h.ParentHash, make([]byte, fieldparams.RootLength))
|
||||
require.DeepSSZEqual(t, h.FeeRecipient, make([]byte, fieldparams.FeeRecipientLength))
|
||||
require.DeepSSZEqual(t, h.StateRoot, make([]byte, fieldparams.RootLength))
|
||||
require.DeepSSZEqual(t, h.ReceiptRoot, make([]byte, fieldparams.RootLength))
|
||||
require.DeepSSZEqual(t, h.ReceiptsRoot, make([]byte, fieldparams.RootLength))
|
||||
require.DeepSSZEqual(t, h.LogsBloom, make([]byte, fieldparams.LogsBloomLength))
|
||||
require.DeepSSZEqual(t, h.PrevRandao, make([]byte, fieldparams.RootLength))
|
||||
require.DeepSSZEqual(t, h.ExtraData, make([]byte, 0))
|
||||
@@ -733,7 +877,7 @@ func emptyPayloadHeader() *ethpb.ExecutionPayloadHeader {
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
// when it was not in actuality.
|
||||
// See: https://github.com/sigp/beacon-fuzz/issues/91
|
||||
func TestVerifyProposerSlashing_BeaconFuzzIssue91(t *testing.T) {
|
||||
file, err := ioutil.ReadFile("testdata/beaconfuzz_91_beacon.ssz")
|
||||
file, err := os.ReadFile("testdata/beaconfuzz_91_beacon.ssz")
|
||||
require.NoError(t, err)
|
||||
rawState := ðpb.BeaconState{}
|
||||
err = rawState.UnmarshalSSZ(file)
|
||||
@@ -25,7 +25,7 @@ func TestVerifyProposerSlashing_BeaconFuzzIssue91(t *testing.T) {
|
||||
st, err := v1.InitializeFromProtoUnsafe(rawState)
|
||||
require.NoError(t, err)
|
||||
|
||||
file, err = ioutil.ReadFile("testdata/beaconfuzz_91_proposer_slashing.ssz")
|
||||
file, err = os.ReadFile("testdata/beaconfuzz_91_proposer_slashing.ssz")
|
||||
require.NoError(t, err)
|
||||
slashing := ðpb.ProposerSlashing{}
|
||||
err = slashing.UnmarshalSSZ(file)
|
||||
|
||||
@@ -13,7 +13,6 @@ go_library(
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -250,6 +249,7 @@ func ProcessEffectiveBalanceUpdates(state state.BeaconState) (state.BeaconState,
|
||||
upwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisUpwardMultiplier
|
||||
|
||||
bals := state.Balances()
|
||||
|
||||
// Update effective balances with hysteresis.
|
||||
validatorFunc := func(idx int, val *ethpb.Validator) (bool, *ethpb.Validator, error) {
|
||||
if val == nil {
|
||||
@@ -261,40 +261,18 @@ func ProcessEffectiveBalanceUpdates(state state.BeaconState) (state.BeaconState,
|
||||
balance := bals[idx]
|
||||
|
||||
if balance+downwardThreshold < val.EffectiveBalance || val.EffectiveBalance+upwardThreshold < balance {
|
||||
newVal := ethpb.CopyValidator(val)
|
||||
newVal.EffectiveBalance = maxEffBalance
|
||||
if newVal.EffectiveBalance > balance-balance%effBalanceInc {
|
||||
newVal.EffectiveBalance = balance - balance%effBalanceInc
|
||||
effectiveBal := maxEffBalance
|
||||
if effectiveBal > balance-balance%effBalanceInc {
|
||||
effectiveBal = balance - balance%effBalanceInc
|
||||
}
|
||||
return true, newVal, nil
|
||||
}
|
||||
return false, val, nil
|
||||
}
|
||||
|
||||
if features.Get().EnableOptimizedBalanceUpdate {
|
||||
validatorFunc = func(idx int, val *ethpb.Validator) (bool, *ethpb.Validator, error) {
|
||||
if val == nil {
|
||||
return false, nil, fmt.Errorf("validator %d is nil in state", idx)
|
||||
}
|
||||
if idx >= len(bals) {
|
||||
return false, nil, fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(state.Balances()))
|
||||
}
|
||||
balance := bals[idx]
|
||||
|
||||
if balance+downwardThreshold < val.EffectiveBalance || val.EffectiveBalance+upwardThreshold < balance {
|
||||
effectiveBal := maxEffBalance
|
||||
if effectiveBal > balance-balance%effBalanceInc {
|
||||
effectiveBal = balance - balance%effBalanceInc
|
||||
}
|
||||
if effectiveBal != val.EffectiveBalance {
|
||||
newVal := ethpb.CopyValidator(val)
|
||||
newVal.EffectiveBalance = effectiveBal
|
||||
return true, newVal, nil
|
||||
}
|
||||
return false, val, nil
|
||||
if effectiveBal != val.EffectiveBalance {
|
||||
newVal := ethpb.CopyValidator(val)
|
||||
newVal.EffectiveBalance = effectiveBal
|
||||
return true, newVal, nil
|
||||
}
|
||||
return false, val, nil
|
||||
}
|
||||
return false, val, nil
|
||||
}
|
||||
|
||||
if err := state.ApplyToEveryValidator(validatorFunc); err != nil {
|
||||
|
||||
@@ -69,7 +69,7 @@ func UpgradeToBellatrix(ctx context.Context, state state.BeaconState) (state.Bea
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
BlockNumber: 0,
|
||||
|
||||
@@ -65,7 +65,7 @@ func TestUpgradeToBellatrix(t *testing.T) {
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
BlockNumber: 0,
|
||||
|
||||
@@ -63,7 +63,6 @@ go_test(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//beacon-chain/state/v2:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
|
||||
@@ -374,6 +374,7 @@ func ClearCache() {
|
||||
committeeCache = cache.NewCommitteesCache()
|
||||
proposerIndicesCache = cache.NewProposerIndicesCache()
|
||||
syncCommitteeCache = cache.NewSyncCommittee()
|
||||
balanceCache = cache.NewEffectiveBalanceCache()
|
||||
}
|
||||
|
||||
// computeCommittee returns the requested shuffled committee out of the total committees using
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
@@ -76,11 +75,6 @@ func TestTotalActiveBalance(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTotalActiveBalance_WithCache(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableActiveBalanceCache: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
tests := []struct {
|
||||
vCount int
|
||||
wantCount int
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -31,7 +32,7 @@ func IsCurrentPeriodSyncCommittee(
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
indices, err := syncCommitteeCache.CurrentPeriodIndexPosition(bytesutil.ToBytes32(root), valIdx)
|
||||
indices, err := syncCommitteeCache.CurrentPeriodIndexPosition(root, valIdx)
|
||||
if err == cache.ErrNonExistingSyncCommitteeKey {
|
||||
val, err := st.ValidatorAtIndex(valIdx)
|
||||
if err != nil {
|
||||
@@ -44,7 +45,7 @@ func IsCurrentPeriodSyncCommittee(
|
||||
|
||||
// Fill in the cache on miss.
|
||||
go func() {
|
||||
if err := syncCommitteeCache.UpdatePositionsInCommittee(bytesutil.ToBytes32(root), st); err != nil {
|
||||
if err := syncCommitteeCache.UpdatePositionsInCommittee(root, st); err != nil {
|
||||
log.Errorf("Could not fill sync committee cache on miss: %v", err)
|
||||
}
|
||||
}()
|
||||
@@ -68,7 +69,7 @@ func IsNextPeriodSyncCommittee(
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
indices, err := syncCommitteeCache.NextPeriodIndexPosition(bytesutil.ToBytes32(root), valIdx)
|
||||
indices, err := syncCommitteeCache.NextPeriodIndexPosition(root, valIdx)
|
||||
if err == cache.ErrNonExistingSyncCommitteeKey {
|
||||
val, err := st.ValidatorAtIndex(valIdx)
|
||||
if err != nil {
|
||||
@@ -95,7 +96,7 @@ func CurrentPeriodSyncSubcommitteeIndices(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indices, err := syncCommitteeCache.CurrentPeriodIndexPosition(bytesutil.ToBytes32(root), valIdx)
|
||||
indices, err := syncCommitteeCache.CurrentPeriodIndexPosition(root, valIdx)
|
||||
if err == cache.ErrNonExistingSyncCommitteeKey {
|
||||
val, err := st.ValidatorAtIndex(valIdx)
|
||||
if err != nil {
|
||||
@@ -108,7 +109,7 @@ func CurrentPeriodSyncSubcommitteeIndices(
|
||||
|
||||
// Fill in the cache on miss.
|
||||
go func() {
|
||||
if err := syncCommitteeCache.UpdatePositionsInCommittee(bytesutil.ToBytes32(root), st); err != nil {
|
||||
if err := syncCommitteeCache.UpdatePositionsInCommittee(root, st); err != nil {
|
||||
log.Errorf("Could not fill sync committee cache on miss: %v", err)
|
||||
}
|
||||
}()
|
||||
@@ -129,7 +130,7 @@ func NextPeriodSyncSubcommitteeIndices(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indices, err := syncCommitteeCache.NextPeriodIndexPosition(bytesutil.ToBytes32(root), valIdx)
|
||||
indices, err := syncCommitteeCache.NextPeriodIndexPosition(root, valIdx)
|
||||
if err == cache.ErrNonExistingSyncCommitteeKey {
|
||||
val, err := st.ValidatorAtIndex(valIdx)
|
||||
if err != nil {
|
||||
@@ -169,7 +170,7 @@ func UpdateSyncCommitteeCache(st state.BeaconStateAltair) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return syncCommitteeCache.UpdatePositionsInCommittee(prevBlockRoot, st)
|
||||
return syncCommitteeCache.UpdatePositionsInCommittee(combineRootAndSlot(prevBlockRoot[:], uint64(header.Slot)), st)
|
||||
}
|
||||
|
||||
// Loop through `pubKeys` for matching `pubKey` and get the indices where it matches.
|
||||
@@ -186,19 +187,19 @@ func findSubCommitteeIndices(pubKey []byte, pubKeys [][]byte) []types.CommitteeI
|
||||
// Retrieve the current sync period boundary root by calculating sync period start epoch
|
||||
// and calling `BlockRoot`.
|
||||
// It uses the boundary slot - 1 for block root. (Ex: SlotsPerEpoch * EpochsPerSyncCommitteePeriod - 1)
|
||||
func syncPeriodBoundaryRoot(st state.ReadOnlyBeaconState) ([]byte, error) {
|
||||
func syncPeriodBoundaryRoot(st state.ReadOnlyBeaconState) ([32]byte, error) {
|
||||
// Can't call `BlockRoot` until the first slot.
|
||||
if st.Slot() == params.BeaconConfig().GenesisSlot {
|
||||
return params.BeaconConfig().ZeroHash[:], nil
|
||||
return params.BeaconConfig().ZeroHash, nil
|
||||
}
|
||||
|
||||
startEpoch, err := slots.SyncCommitteePeriodStartEpoch(time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return [32]byte{}, err
|
||||
}
|
||||
startEpochSlot, err := slots.EpochStart(startEpoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
// Prevent underflow
|
||||
@@ -206,5 +207,15 @@ func syncPeriodBoundaryRoot(st state.ReadOnlyBeaconState) ([]byte, error) {
|
||||
startEpochSlot--
|
||||
}
|
||||
|
||||
return BlockRootAtSlot(st, startEpochSlot)
|
||||
root, err := BlockRootAtSlot(st, startEpochSlot)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
return combineRootAndSlot(root, uint64(startEpochSlot)), nil
|
||||
}
|
||||
|
||||
func combineRootAndSlot(root []byte, slot uint64) [32]byte {
|
||||
slotBytes := bytesutil.Uint64ToBytesLittleEndian(slot)
|
||||
keyHash := hash.Hash(append(root, slotBytes...))
|
||||
return keyHash
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -12,6 +13,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
@@ -234,7 +236,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test that cache was empty.
|
||||
_, err = syncCommitteeCache.CurrentPeriodIndexPosition(bytesutil.ToBytes32(root), 0)
|
||||
_, err = syncCommitteeCache.CurrentPeriodIndexPosition(root, 0)
|
||||
require.Equal(t, cache.ErrNonExistingSyncCommitteeKey, err)
|
||||
|
||||
// Test that helper can retrieve the index given empty cache.
|
||||
@@ -244,7 +246,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
|
||||
// Test that cache was able to fill on miss.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
index, err = syncCommitteeCache.CurrentPeriodIndexPosition(bytesutil.ToBytes32(root), 0)
|
||||
index, err = syncCommitteeCache.CurrentPeriodIndexPosition(root, 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []types.CommitteeIndex{0}, index)
|
||||
}
|
||||
@@ -384,3 +386,46 @@ func TestUpdateSyncCommitteeCache_BadRoot(t *testing.T) {
|
||||
err = UpdateSyncCommitteeCache(state)
|
||||
require.ErrorContains(t, "zero hash state root can't be used to update cache", err)
|
||||
}
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: k,
|
||||
}
|
||||
syncCommittee.Pubkeys = append(syncCommittee.Pubkeys, bytesutil.PadTo(k, 48))
|
||||
}
|
||||
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = make([]byte, 32)
|
||||
}
|
||||
state, err := v2.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
BlockRoots: blockRoots,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
ClearCache()
|
||||
comIdxs, err := CurrentPeriodSyncSubcommitteeIndices(state, 200)
|
||||
require.NoError(t, err)
|
||||
|
||||
wantedSlot := params.BeaconConfig().EpochsPerSyncCommitteePeriod.Mul(uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
assert.NoError(t, state.SetSlot(types.Slot(wantedSlot)))
|
||||
syncCommittee, err = state.CurrentSyncCommittee()
|
||||
assert.NoError(t, err)
|
||||
rand.Shuffle(len(syncCommittee.Pubkeys), func(i, j int) {
|
||||
syncCommittee.Pubkeys[i], syncCommittee.Pubkeys[j] = syncCommittee.Pubkeys[j], syncCommittee.Pubkeys[i]
|
||||
})
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
newIdxs, err := CurrentPeriodSyncSubcommitteeIndices(state, 200)
|
||||
require.NoError(t, err)
|
||||
require.DeepNotEqual(t, comIdxs, newIdxs)
|
||||
}
|
||||
|
||||
@@ -290,13 +290,24 @@ func ProcessBlockForStateRoot(
|
||||
return nil, errors.Wrap(err, "could not check if execution is enabled")
|
||||
}
|
||||
if enabled {
|
||||
payload, err := blk.Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = b.ProcessPayload(state, payload)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution payload")
|
||||
if blk.IsBlinded() {
|
||||
header, err := blk.Body().ExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = b.ProcessPayloadHeader(state, header)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution payload header")
|
||||
}
|
||||
} else {
|
||||
payload, err := blk.Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = b.ProcessPayload(state, payload)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution payload")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@@ -34,7 +33,7 @@ func TestStore_Backup(t *testing.T) {
|
||||
require.NoError(t, db.Backup(ctx, "", false))
|
||||
|
||||
backupsPath := filepath.Join(db.databasePath, backupsDirectoryName)
|
||||
files, err := ioutil.ReadDir(backupsPath)
|
||||
files, err := os.ReadDir(backupsPath)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, 0, len(files), "No backups created")
|
||||
require.NoError(t, db.Close(), "Failed to close database")
|
||||
@@ -78,7 +77,7 @@ func TestStore_BackupMultipleBuckets(t *testing.T) {
|
||||
require.NoError(t, db.Backup(ctx, "", false))
|
||||
|
||||
backupsPath := filepath.Join(db.databasePath, backupsDirectoryName)
|
||||
files, err := ioutil.ReadDir(backupsPath)
|
||||
files, err := os.ReadDir(backupsPath)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, 0, len(files), "No backups created")
|
||||
require.NoError(t, db.Close(), "Failed to close database")
|
||||
|
||||
@@ -18,7 +18,7 @@ const batchSize = 10
|
||||
|
||||
var migrationStateValidatorsKey = []byte("migration_state_validator")
|
||||
|
||||
func migrateStateValidators(ctx context.Context, db *bolt.DB) error {
|
||||
func shouldMigrateValidators(db *bolt.DB) (bool, error) {
|
||||
migrateDB := false
|
||||
if updateErr := db.View(func(tx *bolt.Tx) error {
|
||||
mb := tx.Bucket(migrationsBucket)
|
||||
@@ -46,11 +46,17 @@ func migrateStateValidators(ctx context.Context, db *bolt.DB) error {
|
||||
return nil
|
||||
}); updateErr != nil {
|
||||
log.WithError(updateErr).Errorf("could not migrate bucket: %s", stateBucket)
|
||||
return updateErr
|
||||
return false, updateErr
|
||||
}
|
||||
|
||||
// do not migrate the DB
|
||||
if !migrateDB {
|
||||
return migrateDB, nil
|
||||
}
|
||||
|
||||
func migrateStateValidators(ctx context.Context, db *bolt.DB) error {
|
||||
if ok, err := shouldMigrateValidators(db); err != nil {
|
||||
return err
|
||||
} else if !ok {
|
||||
// A migration is not required.
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -151,6 +151,10 @@ func (s *Store) SaveStates(ctx context.Context, states []state.ReadOnlyBeaconSta
|
||||
})
|
||||
}
|
||||
|
||||
type withValidators interface {
|
||||
GetValidators() []*ethpb.Validator
|
||||
}
|
||||
|
||||
// SaveStatesEfficient stores multiple states to the db (new schema) using the provided corresponding roots.
|
||||
func (s *Store) SaveStatesEfficient(ctx context.Context, states []state.ReadOnlyBeaconState, blockRoots [][32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveStatesEfficient")
|
||||
@@ -161,29 +165,12 @@ func (s *Store) SaveStatesEfficient(ctx context.Context, states []state.ReadOnly
|
||||
validatorsEntries := make(map[string]*ethpb.Validator) // It's a map to make sure that you store only new validator entries.
|
||||
validatorKeys := make([][]byte, len(states)) // For every state, this stores a compressed list of validator keys.
|
||||
for i, st := range states {
|
||||
var validators []*ethpb.Validator
|
||||
switch st.InnerStateUnsafe().(type) {
|
||||
case *ethpb.BeaconState:
|
||||
pbState, err := v1.ProtobufBeaconState(st.InnerStateUnsafe())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
validators = pbState.Validators
|
||||
case *ethpb.BeaconStateAltair:
|
||||
pbState, err := v2.ProtobufBeaconState(st.InnerStateUnsafe())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
validators = pbState.Validators
|
||||
case *ethpb.BeaconStateBellatrix:
|
||||
pbState, err := v3.ProtobufBeaconState(st.InnerStateUnsafe())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
validators = pbState.Validators
|
||||
default:
|
||||
return errors.New("invalid state type")
|
||||
pb, ok := st.InnerStateUnsafe().(withValidators)
|
||||
if !ok {
|
||||
return errors.New("could not cast state to interface with GetValidators()")
|
||||
}
|
||||
validators := pb.GetValidators()
|
||||
|
||||
// yank out the validators and store them in separate table to save space.
|
||||
var hashes []byte
|
||||
for _, val := range validators {
|
||||
|
||||
@@ -3,7 +3,6 @@ package db
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
@@ -55,7 +54,7 @@ func TestRestore(t *testing.T) {
|
||||
|
||||
assert.NoError(t, Restore(cliCtx))
|
||||
|
||||
files, err := ioutil.ReadDir(path.Join(restoreDir, kv.BeaconNodeDbDirName))
|
||||
files, err := os.ReadDir(path.Join(restoreDir, kv.BeaconNodeDbDirName))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(files))
|
||||
assert.Equal(t, kv.DatabaseFileName, files[0].Name())
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package slasherkv
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -9,6 +9,6 @@ import (
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
logrus.SetOutput(io.Discard)
|
||||
m.Run()
|
||||
}
|
||||
|
||||
@@ -5,8 +5,8 @@ package interopcoldstart
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
@@ -47,17 +47,21 @@ type Config struct {
|
||||
// into the beacon chain database and running services at start up. This service should not be used in production
|
||||
// as it does not have any value other than ease of use for testing purposes.
|
||||
func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
log.Warn("Saving generated genesis state in database for interop testing")
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
s := &Service{
|
||||
return &Service{
|
||||
cfg: cfg,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
}
|
||||
}
|
||||
|
||||
// Start initializes the genesis state from configured flags.
|
||||
func (s *Service) Start() {
|
||||
log.Warn("Saving generated genesis state in database for interop testing")
|
||||
|
||||
if s.cfg.GenesisPath != "" {
|
||||
data, err := ioutil.ReadFile(s.cfg.GenesisPath)
|
||||
data, err := os.ReadFile(s.cfg.GenesisPath)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not read pre-loaded state: %v", err)
|
||||
}
|
||||
@@ -69,14 +73,14 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
if err != nil {
|
||||
log.Fatalf("Could not get state trie: %v", err)
|
||||
}
|
||||
if err := s.saveGenesisState(ctx, genesisTrie); err != nil {
|
||||
if err := s.saveGenesisState(s.ctx, genesisTrie); err != nil {
|
||||
log.Fatalf("Could not save interop genesis state %v", err)
|
||||
}
|
||||
return s
|
||||
return
|
||||
}
|
||||
|
||||
// Save genesis state in db
|
||||
genesisState, _, err := interop.GenerateGenesisState(ctx, s.cfg.GenesisTime, s.cfg.NumValidators)
|
||||
genesisState, _, err := interop.GenerateGenesisState(s.ctx, s.cfg.GenesisTime, s.cfg.NumValidators)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not generate interop genesis state: %v", err)
|
||||
}
|
||||
@@ -92,17 +96,11 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
if err != nil {
|
||||
log.Fatalf("Could not hash tree root genesis state: %v", err)
|
||||
}
|
||||
go slots.CountdownToGenesis(ctx, time.Unix(int64(s.cfg.GenesisTime), 0), s.cfg.NumValidators, gRoot)
|
||||
go slots.CountdownToGenesis(s.ctx, time.Unix(int64(s.cfg.GenesisTime), 0), s.cfg.NumValidators, gRoot)
|
||||
|
||||
if err := s.saveGenesisState(ctx, genesisTrie); err != nil {
|
||||
if err := s.saveGenesisState(s.ctx, genesisTrie); err != nil {
|
||||
log.Fatalf("Could not save interop genesis state %v", err)
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Start initializes the genesis state from configured flags.
|
||||
func (_ *Service) Start() {
|
||||
}
|
||||
|
||||
// Stop does nothing.
|
||||
@@ -155,7 +153,7 @@ func (_ *Service) FinalizedDeposits(_ context.Context) *depositcache.FinalizedDe
|
||||
}
|
||||
|
||||
// NonFinalizedDeposits mocks out the deposit cache functionality for interop.
|
||||
func (_ *Service) NonFinalizedDeposits(_ context.Context, _ *big.Int) []*ethpb.Deposit {
|
||||
func (_ *Service) NonFinalizedDeposits(_ context.Context, _ int64, _ *big.Int) []*ethpb.Deposit {
|
||||
return []*ethpb.Deposit{}
|
||||
}
|
||||
|
||||
|
||||
@@ -51,10 +51,4 @@ var (
|
||||
Help: "The number of times pruning happened.",
|
||||
},
|
||||
)
|
||||
validatedCount = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "doublylinkedtree_validated_count",
|
||||
Help: "The number of blocks that have been fully validated.",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -120,7 +120,6 @@ func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
|
||||
}
|
||||
|
||||
n.optimistic = false
|
||||
validatedCount.Inc()
|
||||
return n.parent.setNodeAndParentValidated(ctx)
|
||||
}
|
||||
|
||||
|
||||
@@ -51,10 +51,4 @@ var (
|
||||
Help: "The number of times pruning happened.",
|
||||
},
|
||||
)
|
||||
validatedNodesCount = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "proto_array_validated_nodes_count",
|
||||
Help: "The number of nodes that have been fully validated.",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -43,7 +43,6 @@ func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [32]byte) er
|
||||
if index == NonExistentNode {
|
||||
break
|
||||
}
|
||||
validatedNodesCount.Inc()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -614,16 +614,22 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte) error {
|
||||
node := copyNode(s.nodes[idx])
|
||||
parentIdx, ok := canonicalNodesMap[node.parent]
|
||||
if ok {
|
||||
s.nodesIndices[node.root] = uint64(len(canonicalNodes))
|
||||
canonicalNodesMap[idx] = uint64(len(canonicalNodes))
|
||||
currentIndex := uint64(len(canonicalNodes))
|
||||
s.nodesIndices[node.root] = currentIndex
|
||||
s.payloadIndices[node.payloadHash] = currentIndex
|
||||
canonicalNodesMap[idx] = currentIndex
|
||||
node.parent = parentIdx
|
||||
canonicalNodes = append(canonicalNodes, node)
|
||||
} else {
|
||||
// Remove node and synced tip that is not part of finalized branch.
|
||||
// Remove node that is not part of finalized branch.
|
||||
delete(s.nodesIndices, node.root)
|
||||
delete(s.canonicalNodes, node.root)
|
||||
delete(s.payloadIndices, node.payloadHash)
|
||||
}
|
||||
}
|
||||
s.nodesIndices[finalizedRoot] = uint64(0)
|
||||
s.canonicalNodes[finalizedRoot] = true
|
||||
s.payloadIndices[finalizedNode.payloadHash] = uint64(0)
|
||||
|
||||
// Recompute the best child and descendant for each canonical nodes.
|
||||
for _, node := range canonicalNodes {
|
||||
|
||||
@@ -375,7 +375,7 @@ func TestStore_Prune_MoreThanThreshold(t *testing.T) {
|
||||
parent: uint64(numOfNodes - 2),
|
||||
})
|
||||
indices[indexToHash(uint64(numOfNodes-1))] = uint64(numOfNodes - 1)
|
||||
s := &Store{nodes: nodes, nodesIndices: indices}
|
||||
s := &Store{nodes: nodes, nodesIndices: indices, canonicalNodes: map[[32]byte]bool{}, payloadIndices: map[[32]byte]uint64{}}
|
||||
|
||||
// Finalized root is at index 99 so everything before 99 should be pruned.
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(99)))
|
||||
@@ -413,7 +413,7 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
|
||||
parent: uint64(numOfNodes - 2),
|
||||
})
|
||||
|
||||
s := &Store{nodes: nodes, nodesIndices: indices}
|
||||
s := &Store{nodes: nodes, nodesIndices: indices, canonicalNodes: map[[32]byte]bool{}, payloadIndices: map[[32]byte]uint64{}}
|
||||
|
||||
// Finalized root is at index 11 so everything before 11 should be pruned.
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(10)))
|
||||
@@ -441,6 +441,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
bestDescendant: 1,
|
||||
root: indexToHash(uint64(0)),
|
||||
parent: NonExistentNode,
|
||||
payloadHash: [32]byte{'A'},
|
||||
},
|
||||
{
|
||||
slot: 101,
|
||||
@@ -448,6 +449,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
bestChild: NonExistentNode,
|
||||
bestDescendant: NonExistentNode,
|
||||
parent: 0,
|
||||
payloadHash: [32]byte{'B'},
|
||||
},
|
||||
{
|
||||
slot: 101,
|
||||
@@ -455,6 +457,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
parent: 0,
|
||||
bestChild: NonExistentNode,
|
||||
bestDescendant: NonExistentNode,
|
||||
payloadHash: [32]byte{'C'},
|
||||
},
|
||||
}
|
||||
s := &Store{
|
||||
@@ -465,9 +468,22 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
indexToHash(uint64(1)): 1,
|
||||
indexToHash(uint64(2)): 2,
|
||||
},
|
||||
canonicalNodes: map[[32]byte]bool{
|
||||
indexToHash(uint64(0)): true,
|
||||
indexToHash(uint64(1)): true,
|
||||
indexToHash(uint64(2)): true,
|
||||
},
|
||||
payloadIndices: map[[32]byte]uint64{
|
||||
[32]byte{'A'}: 0,
|
||||
[32]byte{'B'}: 1,
|
||||
[32]byte{'C'}: 2,
|
||||
},
|
||||
}
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(uint64(1))))
|
||||
require.Equal(t, len(s.nodes), 1)
|
||||
require.Equal(t, 1, len(s.nodes))
|
||||
require.Equal(t, 1, len(s.nodesIndices))
|
||||
require.Equal(t, 1, len(s.canonicalNodes))
|
||||
require.Equal(t, 1, len(s.payloadIndices))
|
||||
}
|
||||
|
||||
// This test starts with the following branching diagram
|
||||
@@ -482,25 +498,74 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
// J -- K -- L
|
||||
//
|
||||
//
|
||||
func TestStore_PruneSyncedTips(t *testing.T) {
|
||||
func TestStore_PruneBranched(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
f.store.pruneThreshold = 0
|
||||
require.NoError(t, f.Prune(ctx, [32]byte{'f'}))
|
||||
require.Equal(t, 1, f.NodeCount())
|
||||
tests := []struct {
|
||||
finalizedRoot [32]byte
|
||||
wantedCanonical [32]byte
|
||||
wantedNonCanonical [32]byte
|
||||
canonicalCount int
|
||||
payloadHash [32]byte
|
||||
payloadIndex uint64
|
||||
nonExistentPayload [32]byte
|
||||
}{
|
||||
{
|
||||
[32]byte{'f'},
|
||||
[32]byte{'f'},
|
||||
[32]byte{'a'},
|
||||
1,
|
||||
[32]byte{'F'},
|
||||
0,
|
||||
[32]byte{'H'},
|
||||
},
|
||||
{
|
||||
[32]byte{'d'},
|
||||
[32]byte{'e'},
|
||||
[32]byte{'i'},
|
||||
3,
|
||||
[32]byte{'E'},
|
||||
1,
|
||||
[32]byte{'C'},
|
||||
},
|
||||
{
|
||||
[32]byte{'b'},
|
||||
[32]byte{'f'},
|
||||
[32]byte{'h'},
|
||||
5,
|
||||
[32]byte{'D'},
|
||||
3,
|
||||
[32]byte{'A'},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
f := setup(1, 1)
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1))
|
||||
f.store.pruneThreshold = 0
|
||||
require.NoError(t, f.store.updateCanonicalNodes(ctx, [32]byte{'f'}))
|
||||
require.Equal(t, true, f.IsCanonical([32]byte{'a'}))
|
||||
require.Equal(t, true, f.IsCanonical([32]byte{'f'}))
|
||||
|
||||
require.NoError(t, f.Prune(ctx, tc.finalizedRoot))
|
||||
require.Equal(t, tc.canonicalCount, len(f.store.canonicalNodes))
|
||||
require.Equal(t, true, f.IsCanonical(tc.wantedCanonical))
|
||||
require.Equal(t, false, f.IsCanonical(tc.wantedNonCanonical))
|
||||
require.Equal(t, tc.payloadIndex, f.store.payloadIndices[tc.payloadHash])
|
||||
_, ok := f.store.payloadIndices[tc.nonExistentPayload]
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LeadsToViableHead(t *testing.T) {
|
||||
|
||||
@@ -95,6 +95,7 @@ func NewService(ctx context.Context, config *ValidatorMonitorConfig, tracked []t
|
||||
latestPerformance: make(map[types.ValidatorIndex]ValidatorLatestPerformance),
|
||||
aggregatedPerformance: make(map[types.ValidatorIndex]ValidatorAggregatedPerformance),
|
||||
trackedSyncCommitteeIndices: make(map[types.ValidatorIndex][]types.CommitteeIndex),
|
||||
isLogging: false,
|
||||
}
|
||||
for _, idx := range tracked {
|
||||
r.TrackedValidators[idx] = true
|
||||
@@ -117,7 +118,6 @@ func (s *Service) Start() {
|
||||
"ValidatorIndices": tracked,
|
||||
}).Info("Starting service")
|
||||
|
||||
s.isLogging = false
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
|
||||
|
||||
@@ -906,6 +906,7 @@ func (b *BeaconNode) registerDeterminsticGenesisService() error {
|
||||
DepositCache: b.depositCache,
|
||||
GenesisPath: genesisStatePath,
|
||||
})
|
||||
svc.Start()
|
||||
|
||||
// Register genesis state as start-up state when interop mode.
|
||||
// The start-up state gets reused across services.
|
||||
|
||||
@@ -3,7 +3,6 @@ package node
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
@@ -41,7 +40,6 @@ func TestNodeClose_OK(t *testing.T) {
|
||||
node.Close()
|
||||
|
||||
require.LogsContain(t, hook, "Stopping beacon node")
|
||||
require.NoError(t, os.RemoveAll(tmp))
|
||||
}
|
||||
|
||||
// TestClearDB tests clearing the database
|
||||
@@ -67,5 +65,4 @@ func TestClearDB(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
require.LogsContain(t, hook, "Removing database")
|
||||
require.NoError(t, os.RemoveAll(tmp))
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package registration
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/cmd"
|
||||
@@ -42,7 +42,7 @@ func P2PPreregistration(cliCtx *cli.Context) (bootstrapNodeAddrs []string, dataD
|
||||
}
|
||||
|
||||
func readbootNodes(fileName string) ([]string, error) {
|
||||
fileContent, err := ioutil.ReadFile(fileName) // #nosec G304
|
||||
fileContent, err := os.ReadFile(fileName) // #nosec G304
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ package registration
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/cmd"
|
||||
@@ -27,9 +27,9 @@ func TestP2PPreregistration(t *testing.T) {
|
||||
sampleNode := "- enr:-TESTNODE"
|
||||
testDataDir := "testDataDir"
|
||||
|
||||
file, err := ioutil.TempFile(t.TempDir(), "bootstrapFile*.yaml")
|
||||
file, err := os.CreateTemp(t.TempDir(), "bootstrapFile*.yaml")
|
||||
require.NoError(t, err)
|
||||
err = ioutil.WriteFile(file.Name(), []byte(sampleNode), 0644)
|
||||
err = os.WriteFile(file.Name(), []byte(sampleNode), 0644)
|
||||
require.NoError(t, err, "Error in WriteFile call")
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconNetworkConfig()
|
||||
@@ -49,7 +49,7 @@ func TestP2PPreregistration(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBootStrapNodeFile(t *testing.T) {
|
||||
file, err := ioutil.TempFile(t.TempDir(), "bootstrapFile")
|
||||
file, err := os.CreateTemp(t.TempDir(), "bootstrapFile")
|
||||
require.NoError(t, err)
|
||||
|
||||
sampleNode0 := "- enr:-Ku4QMKVC_MowDsmEa20d5uGjrChI0h8_KsKXDmgVQbIbngZV0i" +
|
||||
@@ -58,7 +58,7 @@ func TestBootStrapNodeFile(t *testing.T) {
|
||||
"E1rtwzvGy40mq9eD66XfHPBWgIIN1ZHCCD6A"
|
||||
sampleNode1 := "- enr:-TESTNODE2"
|
||||
sampleNode2 := "- enr:-TESTNODE3"
|
||||
err = ioutil.WriteFile(file.Name(), []byte(sampleNode0+"\n"+sampleNode1+"\n"+sampleNode2), 0644)
|
||||
err = os.WriteFile(file.Name(), []byte(sampleNode0+"\n"+sampleNode1+"\n"+sampleNode2), 0644)
|
||||
require.NoError(t, err, "Error in WriteFile call")
|
||||
nodeList, err := readbootNodes(file.Name())
|
||||
require.NoError(t, err, "Error in readbootNodes call")
|
||||
|
||||
@@ -127,6 +127,7 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
flaky = True,
|
||||
tags = ["requires-network"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
|
||||
@@ -3,8 +3,8 @@ package p2p
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
gethCrypto "github.com/ethereum/go-ethereum/crypto"
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
)
|
||||
|
||||
func TestPrivateKeyLoading(t *testing.T) {
|
||||
file, err := ioutil.TempFile(t.TempDir(), "key")
|
||||
file, err := os.CreateTemp(t.TempDir(), "key")
|
||||
require.NoError(t, err)
|
||||
key, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
require.NoError(t, err, "Could not generate key")
|
||||
@@ -27,7 +27,7 @@ func TestPrivateKeyLoading(t *testing.T) {
|
||||
}
|
||||
out := hex.EncodeToString(raw)
|
||||
|
||||
err = ioutil.WriteFile(file.Name(), []byte(out), params.BeaconIoConfig().ReadWritePermissions)
|
||||
err = os.WriteFile(file.Name(), []byte(out), params.BeaconIoConfig().ReadWritePermissions)
|
||||
require.NoError(t, err, "Could not write key to file")
|
||||
log.WithField("file", file.Name()).WithField("key", out).Info("Wrote key to file")
|
||||
cfg := &Config{
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package peers_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
logrus.SetOutput(io.Discard)
|
||||
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnablePeerScorer: true,
|
||||
|
||||
@@ -145,5 +145,7 @@ func (s *PeerStatusScorer) peerStatus(pid peer.ID) (*pb.Status, error) {
|
||||
|
||||
// SetHeadSlot updates known head slot.
|
||||
func (s *PeerStatusScorer) SetHeadSlot(slot types.Slot) {
|
||||
s.store.Lock()
|
||||
defer s.store.Unlock()
|
||||
s.ourHeadSlot = slot
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package scorers_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
logrus.SetOutput(io.Discard)
|
||||
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnablePeerScorer: true,
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
@@ -89,7 +88,7 @@ func privKey(cfg *Config) (*ecdsa.PrivateKey, error) {
|
||||
|
||||
// Retrieves a p2p networking private key from a file path.
|
||||
func privKeyFromFile(path string) (*ecdsa.PrivateKey, error) {
|
||||
src, err := ioutil.ReadFile(path) // #nosec G304
|
||||
src, err := os.ReadFile(path) // #nosec G304
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error reading private key from file")
|
||||
return nil, err
|
||||
@@ -135,7 +134,7 @@ func metaDataFromConfig(cfg *Config) (metadata.Metadata, error) {
|
||||
if defaultMetadataExist && metaDataPath == "" {
|
||||
metaDataPath = defaultKeyPath
|
||||
}
|
||||
src, err := ioutil.ReadFile(metaDataPath) // #nosec G304
|
||||
src, err := os.ReadFile(metaDataPath) // #nosec G304
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error reading metadata from file")
|
||||
return nil, err
|
||||
|
||||
@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"auth.go",
|
||||
"block_cache.go",
|
||||
"block_reader.go",
|
||||
"check_transition_config.go",
|
||||
@@ -16,6 +15,7 @@ go_library(
|
||||
"options.go",
|
||||
"prometheus.go",
|
||||
"provider.go",
|
||||
"rpc_connection.go",
|
||||
"service.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/powchain",
|
||||
@@ -59,7 +59,6 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//ethclient:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
|
||||
"@com_github_golang_jwt_jwt_v4//:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
@@ -74,7 +73,6 @@ go_test(
|
||||
name = "go_default_test",
|
||||
size = "medium",
|
||||
srcs = [
|
||||
"auth_test.go",
|
||||
"block_cache_test.go",
|
||||
"block_reader_test.go",
|
||||
"check_transition_config_test.go",
|
||||
@@ -125,7 +123,6 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//trie:go_default_library",
|
||||
"@com_github_golang_jwt_jwt_v4//:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
|
||||
@@ -114,11 +114,13 @@ func (s *Service) BlockByTimestamp(ctx context.Context, time uint64) (*types.Hea
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.web3service.BlockByTimestamp")
|
||||
defer span.End()
|
||||
|
||||
s.latestEth1DataLock.RLock()
|
||||
latestBlkHeight := s.latestEth1Data.BlockHeight
|
||||
latestBlkTime := s.latestEth1Data.BlockTime
|
||||
s.latestEth1DataLock.RUnlock()
|
||||
|
||||
if time > latestBlkTime {
|
||||
return nil, errors.New("provided time is later than the current eth1 head")
|
||||
return nil, errors.Errorf("provided time is later than the current eth1 head. %d > %d", time, latestBlkTime)
|
||||
}
|
||||
// Initialize a pointer to eth1 chain's history to start our search
|
||||
// from.
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -81,7 +82,7 @@ func (s *Service) checkTransitionConfiguration(
|
||||
return
|
||||
}
|
||||
case tm := <-ticker.C:
|
||||
ctx, cancel := context.WithDeadline(ctx, tm.Add(DefaultRPCHTTPTimeout))
|
||||
ctx, cancel := context.WithDeadline(ctx, tm.Add(network.DefaultRPCHTTPTimeout))
|
||||
err = s.ExchangeTransitionConfiguration(ctx, cfg)
|
||||
s.handleExchangeConfigurationError(err)
|
||||
if !hasTtdReached {
|
||||
@@ -119,11 +120,16 @@ func (s *Service) handleExchangeConfigurationError(err error) {
|
||||
// Logs the terminal total difficulty status.
|
||||
func (s *Service) logTtdStatus(ctx context.Context, ttd *uint256.Int) (bool, error) {
|
||||
latest, err := s.LatestExecutionBlock(ctx)
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Is(err, hexutil.ErrEmptyString):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, err
|
||||
}
|
||||
if latest == nil {
|
||||
case latest == nil:
|
||||
return false, errors.New("latest block is nil")
|
||||
case latest.TotalDifficulty == "":
|
||||
return false, nil
|
||||
default:
|
||||
}
|
||||
latestTtd, err := hexutil.DecodeBig(latest.TotalDifficulty)
|
||||
if err != nil {
|
||||
|
||||
@@ -190,6 +190,38 @@ func TestService_logTtdStatus(t *testing.T) {
|
||||
require.Equal(t, false, reached)
|
||||
}
|
||||
|
||||
func TestService_logTtdStatus_NotSyncedClient(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
resp := (*pb.ExecutionBlock)(nil) // Nil response when a client is not synced
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": resp,
|
||||
}
|
||||
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
|
||||
}))
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, err := rpc.DialHTTP(srv.URL)
|
||||
require.NoError(t, err)
|
||||
defer rpcClient.Close()
|
||||
|
||||
service := &Service{
|
||||
cfg: &config{},
|
||||
}
|
||||
service.rpcClient = rpcClient
|
||||
|
||||
ttd := new(uint256.Int)
|
||||
reached, err := service.logTtdStatus(context.Background(), ttd.SetUint64(24343))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, reached)
|
||||
}
|
||||
|
||||
func emptyPayload() *pb.ExecutionPayload {
|
||||
return &pb.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
|
||||
@@ -10,9 +10,12 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -49,8 +52,8 @@ type EngineCaller interface {
|
||||
ExchangeTransitionConfiguration(
|
||||
ctx context.Context, cfg *pb.TransitionConfiguration,
|
||||
) error
|
||||
LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock, error)
|
||||
ExecutionBlockByHash(ctx context.Context, hash common.Hash) (*pb.ExecutionBlock, error)
|
||||
GetTerminalBlockHash(ctx context.Context) ([]byte, bool, error)
|
||||
}
|
||||
|
||||
// NewPayload calls the engine_newPayloadV1 method via JSON-RPC.
|
||||
@@ -174,6 +177,78 @@ func (s *Service) ExchangeTransitionConfiguration(
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetTerminalBlockHash returns the valid terminal block hash based on total difficulty.
|
||||
//
|
||||
// Spec code:
|
||||
// def get_pow_block_at_terminal_total_difficulty(pow_chain: Dict[Hash32, PowBlock]) -> Optional[PowBlock]:
|
||||
// # `pow_chain` abstractly represents all blocks in the PoW chain
|
||||
// for block in pow_chain:
|
||||
// parent = pow_chain[block.parent_hash]
|
||||
// block_reached_ttd = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
|
||||
// parent_reached_ttd = parent.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
|
||||
// if block_reached_ttd and not parent_reached_ttd:
|
||||
// return block
|
||||
//
|
||||
// return None
|
||||
func (s *Service) GetTerminalBlockHash(ctx context.Context) ([]byte, bool, error) {
|
||||
ttd := new(big.Int)
|
||||
ttd.SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
|
||||
terminalTotalDifficulty, overflows := uint256.FromBig(ttd)
|
||||
if overflows {
|
||||
return nil, false, errors.New("could not convert terminal total difficulty to uint256")
|
||||
}
|
||||
blk, err := s.LatestExecutionBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not get latest execution block")
|
||||
}
|
||||
if blk == nil {
|
||||
return nil, false, errors.New("latest execution block is nil")
|
||||
}
|
||||
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return nil, false, ctx.Err()
|
||||
}
|
||||
currentTotalDifficulty, err := tDStringToUint256(blk.TotalDifficulty)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
|
||||
}
|
||||
blockReachedTTD := currentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
|
||||
|
||||
parentHash := bytesutil.ToBytes32(blk.ParentHash)
|
||||
if len(blk.ParentHash) == 0 || parentHash == params.BeaconConfig().ZeroHash {
|
||||
return nil, false, nil
|
||||
}
|
||||
parentBlk, err := s.ExecutionBlockByHash(ctx, parentHash)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not get parent execution block")
|
||||
}
|
||||
if parentBlk == nil {
|
||||
return nil, false, errors.New("parent execution block is nil")
|
||||
}
|
||||
if blockReachedTTD {
|
||||
parentTotalDifficulty, err := tDStringToUint256(parentBlk.TotalDifficulty)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
|
||||
}
|
||||
parentReachedTTD := parentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
|
||||
if !parentReachedTTD {
|
||||
log.WithFields(logrus.Fields{
|
||||
"number": blk.Number,
|
||||
"hash": fmt.Sprintf("%#x", bytesutil.Trunc(blk.Hash)),
|
||||
"td": blk.TotalDifficulty,
|
||||
"parentTd": parentBlk.TotalDifficulty,
|
||||
"ttd": terminalTotalDifficulty,
|
||||
}).Info("Retrieved terminal block hash")
|
||||
return blk.Hash, true, nil
|
||||
}
|
||||
} else {
|
||||
return nil, false, nil
|
||||
}
|
||||
blk = parentBlk
|
||||
}
|
||||
}
|
||||
|
||||
// LatestExecutionBlock fetches the latest execution engine block by calling
|
||||
// eth_blockByNumber via JSON-RPC.
|
||||
func (s *Service) LatestExecutionBlock(ctx context.Context) (*pb.ExecutionBlock, error) {
|
||||
@@ -251,3 +326,15 @@ func isTimeout(e error) bool {
|
||||
t, ok := e.(httpTimeoutError)
|
||||
return ok && t.Timeout()
|
||||
}
|
||||
|
||||
func tDStringToUint256(td string) (*uint256.Int, error) {
|
||||
b, err := hexutil.DecodeBig(td)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i, overflows := uint256.FromBig(b)
|
||||
if overflows {
|
||||
return nil, errors.New("total difficulty overflowed")
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -100,7 +100,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := ioutil.ReadAll(r.Body)
|
||||
enc, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
|
||||
@@ -350,7 +350,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := ioutil.ReadAll(r.Body)
|
||||
enc, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
// We expect the JSON string RPC request contains the right arguments.
|
||||
@@ -387,7 +387,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := ioutil.ReadAll(r.Body)
|
||||
enc, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
// We expect the JSON string RPC request contains the right arguments.
|
||||
@@ -418,6 +418,155 @@ func TestClient_HTTP(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_getPowBlockHashAtTerminalTotalDifficulty(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
paramsTd string
|
||||
currentPowBlock *pb.ExecutionBlock
|
||||
parentPowBlock *pb.ExecutionBlock
|
||||
errLatestExecutionBlk error
|
||||
wantTerminalBlockHash []byte
|
||||
wantExists bool
|
||||
errString string
|
||||
}{
|
||||
{
|
||||
name: "config td overflows",
|
||||
paramsTd: "1115792089237316195423570985008687907853269984665640564039457584007913129638912",
|
||||
errString: "could not convert terminal total difficulty to uint256",
|
||||
},
|
||||
{
|
||||
name: "could not get latest execution block",
|
||||
paramsTd: "1",
|
||||
errLatestExecutionBlk: errors.New("blah"),
|
||||
errString: "could not get latest execution block",
|
||||
},
|
||||
{
|
||||
name: "nil latest execution block",
|
||||
paramsTd: "1",
|
||||
errString: "latest execution block is nil",
|
||||
},
|
||||
{
|
||||
name: "current execution block invalid TD",
|
||||
paramsTd: "1",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'a'},
|
||||
TotalDifficulty: "1115792089237316195423570985008687907853269984665640564039457584007913129638912",
|
||||
},
|
||||
errString: "could not convert total difficulty to uint256",
|
||||
},
|
||||
{
|
||||
name: "current execution block has zero hash parent",
|
||||
paramsTd: "2",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'a'},
|
||||
ParentHash: params.BeaconConfig().ZeroHash[:],
|
||||
TotalDifficulty: "0x3",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "could not get parent block",
|
||||
paramsTd: "2",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'a'},
|
||||
ParentHash: []byte{'b'},
|
||||
TotalDifficulty: "0x3",
|
||||
},
|
||||
errString: "could not get parent execution block",
|
||||
},
|
||||
{
|
||||
name: "parent execution block invalid TD",
|
||||
paramsTd: "2",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'a'},
|
||||
ParentHash: []byte{'b'},
|
||||
TotalDifficulty: "0x3",
|
||||
},
|
||||
parentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'b'},
|
||||
ParentHash: []byte{'c'},
|
||||
TotalDifficulty: "1",
|
||||
},
|
||||
errString: "could not convert total difficulty to uint256",
|
||||
},
|
||||
{
|
||||
name: "happy case",
|
||||
paramsTd: "2",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'a'},
|
||||
ParentHash: []byte{'b'},
|
||||
TotalDifficulty: "0x3",
|
||||
},
|
||||
parentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'b'},
|
||||
ParentHash: []byte{'c'},
|
||||
TotalDifficulty: "0x1",
|
||||
},
|
||||
wantExists: true,
|
||||
wantTerminalBlockHash: []byte{'a'},
|
||||
},
|
||||
{
|
||||
name: "ttd not reached",
|
||||
paramsTd: "3",
|
||||
currentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'a'},
|
||||
ParentHash: []byte{'b'},
|
||||
TotalDifficulty: "0x2",
|
||||
},
|
||||
parentPowBlock: &pb.ExecutionBlock{
|
||||
Hash: []byte{'b'},
|
||||
ParentHash: []byte{'c'},
|
||||
TotalDifficulty: "0x1",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.TerminalTotalDifficulty = tt.paramsTd
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
var m map[[32]byte]*pb.ExecutionBlock
|
||||
if tt.parentPowBlock != nil {
|
||||
m = map[[32]byte]*pb.ExecutionBlock{
|
||||
bytesutil.ToBytes32(tt.parentPowBlock.Hash): tt.parentPowBlock,
|
||||
}
|
||||
}
|
||||
client := mocks.EngineClient{
|
||||
ErrLatestExecBlock: tt.errLatestExecutionBlk,
|
||||
ExecutionBlock: tt.currentPowBlock,
|
||||
BlockByHashMap: m,
|
||||
}
|
||||
b, e, err := client.GetTerminalBlockHash(context.Background())
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, tt.wantExists, e)
|
||||
require.DeepEqual(t, tt.wantTerminalBlockHash, b)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_tDStringToUint256(t *testing.T) {
|
||||
i, err := tDStringToUint256("0x0")
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, uint256.NewInt(0), i)
|
||||
|
||||
i, err = tDStringToUint256("0x10000")
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, uint256.NewInt(65536), i)
|
||||
|
||||
_, err = tDStringToUint256("100")
|
||||
require.ErrorContains(t, "hex string without 0x prefix", err)
|
||||
|
||||
_, err = tDStringToUint256("0xzzzzzz")
|
||||
require.ErrorContains(t, "invalid hex string", err)
|
||||
|
||||
_, err = tDStringToUint256("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" +
|
||||
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")
|
||||
require.ErrorContains(t, "hex number > 256 bits", err)
|
||||
}
|
||||
|
||||
func TestExchangeTransitionConfiguration(t *testing.T) {
|
||||
fix := fixtures()
|
||||
ctx := context.Background()
|
||||
@@ -823,7 +972,7 @@ func forkchoiceUpdateSetup(t *testing.T, fcs *pb.ForkchoiceState, att *pb.Payloa
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := ioutil.ReadAll(r.Body)
|
||||
enc, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
|
||||
@@ -862,7 +1011,7 @@ func newPayloadSetup(t *testing.T, status *pb.PayloadStatus, payload *pb.Executi
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
enc, err := ioutil.ReadAll(r.Body)
|
||||
enc, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
|
||||
|
||||
@@ -342,7 +342,9 @@ func (s *Service) processPastLogs(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
// set new block number after checking for chainstart for previous block.
|
||||
s.latestEth1DataLock.Lock()
|
||||
s.latestEth1Data.LastRequestedBlock = currentBlockNum
|
||||
s.latestEth1DataLock.Unlock()
|
||||
currentBlockNum = filterLog.BlockNumber
|
||||
}
|
||||
if err := s.ProcessLog(ctx, filterLog); err != nil {
|
||||
@@ -363,7 +365,9 @@ func (s *Service) processPastLogs(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
s.latestEth1DataLock.Lock()
|
||||
s.latestEth1Data.LastRequestedBlock = currentBlockNum
|
||||
s.latestEth1DataLock.Unlock()
|
||||
|
||||
c, err := s.cfg.beaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
@@ -421,7 +425,9 @@ func (s *Service) requestBatchedHeadersAndLogs(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.latestEth1DataLock.Lock()
|
||||
s.latestEth1Data.LastRequestedBlock = i
|
||||
s.latestEth1DataLock.Unlock()
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
package powchain
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
@@ -11,11 +8,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
"github.com/prysmaticlabs/prysm/network/authorization"
|
||||
)
|
||||
|
||||
// DefaultRPCHTTPTimeout for HTTP requests via an RPC connection to an execution node.
|
||||
const DefaultRPCHTTPTimeout = time.Second * 6
|
||||
|
||||
type Option func(s *Service) error
|
||||
|
||||
// WithHttpEndpoints deduplicates and parses http endpoints for the powchain service to use,
|
||||
@@ -38,20 +33,29 @@ func WithHttpEndpoints(endpointStrings []string) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithJWTSecret for authenticating the execution node JSON-RPC endpoint.
|
||||
func WithJWTSecret(secret []byte) Option {
|
||||
return func(c *Service) error {
|
||||
// WithHttpEndpointsAndJWTSecret for authenticating the execution node JSON-RPC endpoint.
|
||||
func WithHttpEndpointsAndJWTSecret(endpointStrings []string, secret []byte) Option {
|
||||
return func(s *Service) error {
|
||||
if len(secret) == 0 {
|
||||
return nil
|
||||
}
|
||||
authTransport := &jwtTransport{
|
||||
underlyingTransport: http.DefaultTransport,
|
||||
jwtSecret: secret,
|
||||
stringEndpoints := dedupEndpoints(endpointStrings)
|
||||
endpoints := make([]network.Endpoint, len(stringEndpoints))
|
||||
// Overwrite authorization type for all endpoints to be of a bearer
|
||||
// type.
|
||||
for i, e := range stringEndpoints {
|
||||
hEndpoint := HttpEndpoint(e)
|
||||
hEndpoint.Auth.Method = authorization.Bearer
|
||||
hEndpoint.Auth.Value = string(secret)
|
||||
endpoints[i] = hEndpoint
|
||||
}
|
||||
c.cfg.httpRPCClient = &http.Client{
|
||||
Timeout: DefaultRPCHTTPTimeout,
|
||||
Transport: authTransport,
|
||||
// Select first http endpoint in the provided list.
|
||||
var currEndpoint network.Endpoint
|
||||
if len(endpointStrings) > 0 {
|
||||
currEndpoint = endpoints[0]
|
||||
}
|
||||
s.cfg.httpEndpoints = endpoints
|
||||
s.cfg.currHttpEndpoint = currEndpoint
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package powchain
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
logrus.SetOutput(io.Discard)
|
||||
|
||||
m.Run()
|
||||
}
|
||||
|
||||
176
beacon-chain/powchain/rpc_connection.go
Normal file
176
beacon-chain/powchain/rpc_connection.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package powchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
gethRPC "github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
contracts "github.com/prysmaticlabs/prysm/contracts/deposit"
|
||||
"github.com/prysmaticlabs/prysm/io/logs"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
"github.com/prysmaticlabs/prysm/network/authorization"
|
||||
)
|
||||
|
||||
func (s *Service) setupExecutionClientConnections(ctx context.Context, currEndpoint network.Endpoint) error {
|
||||
client, err := s.newRPCClientWithAuth(ctx, currEndpoint)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not dial execution node")
|
||||
}
|
||||
// Attach the clients to the service struct.
|
||||
fetcher := ethclient.NewClient(client)
|
||||
s.rpcClient = client
|
||||
s.httpLogger = fetcher
|
||||
s.eth1DataFetcher = fetcher
|
||||
|
||||
depositContractCaller, err := contracts.NewDepositContractCaller(s.cfg.depositContractAddr, fetcher)
|
||||
if err != nil {
|
||||
client.Close()
|
||||
return errors.Wrap(err, "could not initialize deposit contract caller")
|
||||
}
|
||||
s.depositContractCaller = depositContractCaller
|
||||
|
||||
// Ensure we have the correct chain and deposit IDs.
|
||||
if err := ensureCorrectExecutionChain(ctx, fetcher); err != nil {
|
||||
client.Close()
|
||||
return errors.Wrap(err, "could not make initial request to verify execution chain ID")
|
||||
}
|
||||
s.updateConnectedETH1(true)
|
||||
s.runError = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Every N seconds, defined as a backoffPeriod, attempts to re-establish an execution client
|
||||
// connection and if this does not work, we fallback to the next endpoint if defined.
|
||||
func (s *Service) pollConnectionStatus(ctx context.Context) {
|
||||
// Use a custom logger to only log errors
|
||||
logCounter := 0
|
||||
errorLogger := func(err error, msg string) {
|
||||
if logCounter > logThreshold {
|
||||
log.Errorf("%s: %v", msg, err)
|
||||
logCounter = 0
|
||||
}
|
||||
logCounter++
|
||||
}
|
||||
ticker := time.NewTicker(backOffPeriod)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
log.Debugf("Trying to dial endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
|
||||
if err := s.setupExecutionClientConnections(ctx, s.cfg.currHttpEndpoint); err != nil {
|
||||
errorLogger(err, "Could not connect to execution client endpoint")
|
||||
s.runError = err
|
||||
s.fallbackToNextEndpoint()
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Received cancelled context,closing existing powchain service")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Forces to retry an execution client connection.
|
||||
func (s *Service) retryExecutionClientConnection(ctx context.Context, err error) {
|
||||
s.runError = err
|
||||
s.updateConnectedETH1(false)
|
||||
// Back off for a while before redialing.
|
||||
time.Sleep(backOffPeriod)
|
||||
if err := s.setupExecutionClientConnections(ctx, s.cfg.currHttpEndpoint); err != nil {
|
||||
s.runError = err
|
||||
return
|
||||
}
|
||||
// Reset run error in the event of a successful connection.
|
||||
s.runError = nil
|
||||
}
|
||||
|
||||
// This performs a health check on our primary endpoint, and if it
|
||||
// is ready to serve we connect to it again. This method is only
|
||||
// relevant if we are on our backup endpoint.
|
||||
func (s *Service) checkDefaultEndpoint(ctx context.Context) {
|
||||
primaryEndpoint := s.cfg.httpEndpoints[0]
|
||||
// Return early if we are running on our primary
|
||||
// endpoint.
|
||||
if s.cfg.currHttpEndpoint.Equals(primaryEndpoint) {
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.setupExecutionClientConnections(ctx, primaryEndpoint); err != nil {
|
||||
log.Debugf("Primary endpoint not ready: %v", err)
|
||||
return
|
||||
}
|
||||
s.updateCurrHttpEndpoint(primaryEndpoint)
|
||||
}
|
||||
|
||||
// This is an inefficient way to search for the next endpoint, but given N is
|
||||
// expected to be small, it is fine to search this way.
|
||||
func (s *Service) fallbackToNextEndpoint() {
|
||||
currEndpoint := s.cfg.currHttpEndpoint
|
||||
currIndex := 0
|
||||
totalEndpoints := len(s.cfg.httpEndpoints)
|
||||
|
||||
for i, endpoint := range s.cfg.httpEndpoints {
|
||||
if endpoint.Equals(currEndpoint) {
|
||||
currIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
nextIndex := currIndex + 1
|
||||
if nextIndex >= totalEndpoints {
|
||||
nextIndex = 0
|
||||
}
|
||||
s.updateCurrHttpEndpoint(s.cfg.httpEndpoints[nextIndex])
|
||||
if nextIndex != currIndex {
|
||||
log.Infof("Falling back to alternative endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
|
||||
}
|
||||
}
|
||||
|
||||
// Initializes an RPC connection with authentication headers.
|
||||
func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.Endpoint) (*gethRPC.Client, error) {
|
||||
// Need to handle ipc and http
|
||||
var client *gethRPC.Client
|
||||
u, err := url.Parse(endpoint.Url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch u.Scheme {
|
||||
case "http", "https":
|
||||
client, err = gethRPC.DialHTTPWithClient(endpoint.Url, endpoint.HttpClient())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "":
|
||||
client, err = gethRPC.DialIPC(ctx, endpoint.Url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("no known transport for URL scheme %q", u.Scheme)
|
||||
}
|
||||
if endpoint.Auth.Method != authorization.None {
|
||||
header, err := endpoint.Auth.ToHeaderValue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.SetHeader("Authorization", header)
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// Checks the chain ID of the execution client to ensure
|
||||
// it matches local parameters of what Prysm expects.
|
||||
func ensureCorrectExecutionChain(ctx context.Context, client *ethclient.Client) error {
|
||||
cID, err := client.ChainID(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wantChainID := params.BeaconConfig().DepositChainID
|
||||
if cID.Uint64() != wantChainID {
|
||||
return fmt.Errorf("wanted chain ID %d, got %d", wantChainID, cID.Uint64())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
@@ -39,10 +38,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/container/trie"
|
||||
contracts "github.com/prysmaticlabs/prysm/contracts/deposit"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/io/logs"
|
||||
"github.com/prysmaticlabs/prysm/monitoring/clientstats"
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
"github.com/prysmaticlabs/prysm/network/authorization"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/time"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
@@ -114,6 +111,7 @@ type Chain interface {
|
||||
// RPCDataFetcher defines a subset of methods conformed to by ETH1.0 RPC clients for
|
||||
// fetching eth1 data from the clients.
|
||||
type RPCDataFetcher interface {
|
||||
Close()
|
||||
HeaderByNumber(ctx context.Context, number *big.Int) (*gethTypes.Header, error)
|
||||
HeaderByHash(ctx context.Context, hash common.Hash) (*gethTypes.Header, error)
|
||||
SyncProgress(ctx context.Context) (*ethereum.SyncProgress, error)
|
||||
@@ -121,6 +119,7 @@ type RPCDataFetcher interface {
|
||||
|
||||
// RPCClient defines the rpc methods required to interact with the eth1 node.
|
||||
type RPCClient interface {
|
||||
Close()
|
||||
BatchCall(b []gethRPC.BatchElem) error
|
||||
CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error
|
||||
}
|
||||
@@ -135,7 +134,6 @@ type config struct {
|
||||
eth1HeaderReqLimit uint64
|
||||
beaconNodeStatsUpdater BeaconNodeStatsUpdater
|
||||
httpEndpoints []network.Endpoint
|
||||
httpRPCClient *http.Client
|
||||
currHttpEndpoint network.Endpoint
|
||||
finalizedStateAtStartup state.BeaconState
|
||||
}
|
||||
@@ -150,6 +148,7 @@ type Service struct {
|
||||
connectedETH1 bool
|
||||
isRunning bool
|
||||
processingLock sync.RWMutex
|
||||
latestEth1DataLock sync.RWMutex
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
@@ -228,14 +227,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
|
||||
// Start a web3 service's main event loop.
|
||||
func (s *Service) Start() {
|
||||
|
||||
if err := s.connectToPowChain(); err != nil {
|
||||
log.WithError(err).Fatal("Could not connect to execution endpoint")
|
||||
if err := s.setupExecutionClientConnections(s.ctx, s.cfg.currHttpEndpoint); err != nil {
|
||||
log.WithError(err).Error("Could not connect to execution endpoint")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"endpoint": logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url),
|
||||
}).Info("Connected to Ethereum execution client RPC")
|
||||
// If the chain has not started already and we don't have access to eth1 nodes, we will not be
|
||||
// able to generate the genesis state.
|
||||
if !s.chainStartData.Chainstarted && s.cfg.currHttpEndpoint.Url == "" {
|
||||
@@ -253,7 +247,7 @@ func (s *Service) Start() {
|
||||
s.isRunning = true
|
||||
|
||||
// Poll the execution client connection and fallback if errors occur.
|
||||
go s.pollConnectionStatus()
|
||||
go s.pollConnectionStatus(s.ctx)
|
||||
|
||||
// Check transition configuration for the engine API client in the background.
|
||||
go s.checkTransitionConfiguration(s.ctx, make(chan *feed.Event, 1))
|
||||
@@ -266,7 +260,12 @@ func (s *Service) Stop() error {
|
||||
if s.cancel != nil {
|
||||
defer s.cancel()
|
||||
}
|
||||
s.closeClients()
|
||||
if s.rpcClient != nil {
|
||||
s.rpcClient.Close()
|
||||
}
|
||||
if s.eth1DataFetcher != nil {
|
||||
s.eth1DataFetcher.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -338,10 +337,7 @@ func (s *Service) CurrentETH1Endpoint() string {
|
||||
|
||||
// CurrentETH1ConnectionError returns the error (if any) of the current connection.
|
||||
func (s *Service) CurrentETH1ConnectionError() error {
|
||||
httpClient, rpcClient, err := s.dialETH1Nodes(s.cfg.currHttpEndpoint)
|
||||
httpClient.Close()
|
||||
rpcClient.Close()
|
||||
return err
|
||||
return s.runError
|
||||
}
|
||||
|
||||
// ETH1Endpoints returns the slice of HTTP endpoint URLs (default is 0th element).
|
||||
@@ -358,10 +354,17 @@ func (s *Service) ETH1Endpoints() []string {
|
||||
func (s *Service) ETH1ConnectionErrors() []error {
|
||||
var errs []error
|
||||
for _, ep := range s.cfg.httpEndpoints {
|
||||
httpClient, rpcClient, err := s.dialETH1Nodes(ep)
|
||||
httpClient.Close()
|
||||
rpcClient.Close()
|
||||
errs = append(errs, err)
|
||||
client, err := s.newRPCClientWithAuth(s.ctx, ep)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
if err := ensureCorrectExecutionChain(s.ctx, ethclient.NewClient(client)); err != nil {
|
||||
client.Close()
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
client.Close()
|
||||
}
|
||||
return errs
|
||||
}
|
||||
@@ -376,146 +379,6 @@ func (s *Service) followBlockHeight(_ context.Context) (uint64, error) {
|
||||
return latestValidBlock, nil
|
||||
}
|
||||
|
||||
func (s *Service) connectToPowChain() error {
|
||||
httpClient, rpcClient, err := s.dialETH1Nodes(s.cfg.currHttpEndpoint)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not dial execution node")
|
||||
}
|
||||
|
||||
depositContractCaller, err := contracts.NewDepositContractCaller(s.cfg.depositContractAddr, httpClient)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize deposit contract caller")
|
||||
}
|
||||
|
||||
if httpClient == nil || rpcClient == nil || depositContractCaller == nil {
|
||||
return errors.New("execution client RPC is nil")
|
||||
}
|
||||
s.httpLogger = httpClient
|
||||
s.eth1DataFetcher = httpClient
|
||||
s.depositContractCaller = depositContractCaller
|
||||
s.rpcClient = rpcClient
|
||||
|
||||
s.updateConnectedETH1(true)
|
||||
s.runError = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) dialETH1Nodes(endpoint network.Endpoint) (*ethclient.Client, *gethRPC.Client, error) {
|
||||
httpRPCClient, err := gethRPC.Dial(endpoint.Url)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if endpoint.Auth.Method != authorization.None {
|
||||
header, err := endpoint.Auth.ToHeaderValue()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
httpRPCClient.SetHeader("Authorization", header)
|
||||
}
|
||||
httpClient := ethclient.NewClient(httpRPCClient)
|
||||
// Add a method to clean-up and close clients in the event
|
||||
// of any connection failure.
|
||||
closeClients := func() {
|
||||
httpRPCClient.Close()
|
||||
httpClient.Close()
|
||||
}
|
||||
// Make a simple call to ensure we are actually connected to a working node.
|
||||
cID, err := httpClient.ChainID(s.ctx)
|
||||
if err != nil {
|
||||
closeClients()
|
||||
return nil, nil, err
|
||||
}
|
||||
nID, err := httpClient.NetworkID(s.ctx)
|
||||
if err != nil {
|
||||
closeClients()
|
||||
return nil, nil, err
|
||||
}
|
||||
if cID.Uint64() != params.BeaconConfig().DepositChainID {
|
||||
closeClients()
|
||||
return nil, nil, fmt.Errorf("eth1 node using incorrect chain id, %d != %d", cID.Uint64(), params.BeaconConfig().DepositChainID)
|
||||
}
|
||||
if nID.Uint64() != params.BeaconConfig().DepositNetworkID {
|
||||
closeClients()
|
||||
return nil, nil, fmt.Errorf("eth1 node using incorrect network id, %d != %d", nID.Uint64(), params.BeaconConfig().DepositNetworkID)
|
||||
}
|
||||
|
||||
return httpClient, httpRPCClient, nil
|
||||
}
|
||||
|
||||
// closes down our active eth1 clients.
|
||||
func (s *Service) closeClients() {
|
||||
gethClient, ok := s.rpcClient.(*gethRPC.Client)
|
||||
if ok {
|
||||
gethClient.Close()
|
||||
}
|
||||
httpClient, ok := s.eth1DataFetcher.(*ethclient.Client)
|
||||
if ok {
|
||||
httpClient.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) pollConnectionStatus() {
|
||||
// Use a custom logger to only log errors
|
||||
logCounter := 0
|
||||
errorLogger := func(err error, msg string) {
|
||||
if logCounter > logThreshold {
|
||||
log.Errorf("%s: %v", msg, err)
|
||||
logCounter = 0
|
||||
}
|
||||
logCounter++
|
||||
}
|
||||
ticker := time.NewTicker(backOffPeriod)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
log.Debugf("Trying to dial endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
|
||||
errConnect := s.connectToPowChain()
|
||||
if errConnect != nil {
|
||||
errorLogger(errConnect, "Could not connect to powchain endpoint")
|
||||
s.runError = errConnect
|
||||
s.fallbackToNextEndpoint()
|
||||
continue
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Received cancelled context,closing existing powchain service")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checks if the eth1 node is healthy and ready to serve before
|
||||
// fetching data from it.
|
||||
func (s *Service) isEth1NodeSynced() (bool, error) {
|
||||
syncProg, err := s.eth1DataFetcher.SyncProgress(s.ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if syncProg != nil {
|
||||
return false, nil
|
||||
}
|
||||
head, err := s.eth1DataFetcher.HeaderByNumber(s.ctx, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !eth1HeadIsBehind(head.Time), nil
|
||||
}
|
||||
|
||||
// Reconnect to eth1 node in case of any failure.
|
||||
func (s *Service) retryETH1Node(err error) {
|
||||
s.runError = err
|
||||
s.updateConnectedETH1(false)
|
||||
// Back off for a while before
|
||||
// resuming dialing the eth1 node.
|
||||
time.Sleep(backOffPeriod)
|
||||
if err := s.connectToPowChain(); err != nil {
|
||||
s.runError = err
|
||||
return
|
||||
}
|
||||
// Reset run error in the event of a successful connection.
|
||||
s.runError = nil
|
||||
}
|
||||
|
||||
func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositContainer) error {
|
||||
if len(ctrs) == 0 {
|
||||
return nil
|
||||
@@ -551,11 +414,14 @@ func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositCo
|
||||
// accumulates. we finalize them here before we are ready to receive a block.
|
||||
// Otherwise, the first few blocks will be slower to compute as we will
|
||||
// hold the lock and be busy finalizing the deposits.
|
||||
s.cfg.depositCache.InsertFinalizedDeposits(ctx, int64(currIndex)) // lint:ignore uintcast -- deposit index will not exceed int64 in your lifetime.
|
||||
// The deposit index in the state is always the index of the next deposit
|
||||
// to be included(rather than the last one to be processed). This was most likely
|
||||
// done as the state cannot represent signed integers.
|
||||
actualIndex := int64(currIndex) - 1 // lint:ignore uintcast -- deposit index will not exceed int64 in your lifetime.
|
||||
s.cfg.depositCache.InsertFinalizedDeposits(ctx, actualIndex)
|
||||
// Deposit proofs are only used during state transition and can be safely removed to save space.
|
||||
|
||||
// lint:ignore uintcast -- deposit index will not exceed int64 in your lifetime.
|
||||
if err = s.cfg.depositCache.PruneProofs(ctx, int64(currIndex)); err != nil {
|
||||
if err = s.cfg.depositCache.PruneProofs(ctx, actualIndex); err != nil {
|
||||
return errors.Wrap(err, "could not prune deposit proofs")
|
||||
}
|
||||
}
|
||||
@@ -575,9 +441,11 @@ func (s *Service) initDepositCaches(ctx context.Context, ctrs []*ethpb.DepositCo
|
||||
func (s *Service) processBlockHeader(header *gethTypes.Header) {
|
||||
defer safelyHandlePanic()
|
||||
blockNumberGauge.Set(float64(header.Number.Int64()))
|
||||
s.latestEth1DataLock.Lock()
|
||||
s.latestEth1Data.BlockHeight = header.Number.Uint64()
|
||||
s.latestEth1Data.BlockHash = header.Hash().Bytes()
|
||||
s.latestEth1Data.BlockTime = header.Time
|
||||
s.latestEth1DataLock.Unlock()
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockNumber": s.latestEth1Data.BlockHeight,
|
||||
"blockHash": hexutil.Encode(s.latestEth1Data.BlockHash),
|
||||
@@ -650,7 +518,7 @@ func (s *Service) handleETH1FollowDistance() {
|
||||
fiveMinutesTimeout := prysmTime.Now().Add(-5 * time.Minute)
|
||||
// check that web3 client is syncing
|
||||
if time.Unix(int64(s.latestEth1Data.BlockTime), 0).Before(fiveMinutesTimeout) {
|
||||
log.Warn("eth1 client is not syncing")
|
||||
log.Warn("Execution client is not syncing")
|
||||
}
|
||||
if !s.chainStartData.Chainstarted {
|
||||
if err := s.checkBlockNumberForChainStart(ctx, big.NewInt(int64(s.latestEth1Data.LastRequestedBlock))); err != nil {
|
||||
@@ -680,6 +548,15 @@ func (s *Service) handleETH1FollowDistance() {
|
||||
}
|
||||
|
||||
func (s *Service) initPOWService() {
|
||||
// Use a custom logger to only log errors
|
||||
logCounter := 0
|
||||
errorLogger := func(err error, msg string) {
|
||||
if logCounter > logThreshold {
|
||||
log.Errorf("%s: %v", msg, err)
|
||||
logCounter = 0
|
||||
}
|
||||
logCounter++
|
||||
}
|
||||
|
||||
// Run in a select loop to retry in the event of any failures.
|
||||
for {
|
||||
@@ -690,24 +567,26 @@ func (s *Service) initPOWService() {
|
||||
ctx := s.ctx
|
||||
header, err := s.eth1DataFetcher.HeaderByNumber(ctx, nil)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to retrieve latest ETH1.0 chain header: %v", err)
|
||||
s.retryETH1Node(err)
|
||||
s.retryExecutionClientConnection(ctx, err)
|
||||
errorLogger(err, "Unable to retrieve latest execution client header")
|
||||
continue
|
||||
}
|
||||
|
||||
s.latestEth1DataLock.Lock()
|
||||
s.latestEth1Data.BlockHeight = header.Number.Uint64()
|
||||
s.latestEth1Data.BlockHash = header.Hash().Bytes()
|
||||
s.latestEth1Data.BlockTime = header.Time
|
||||
s.latestEth1DataLock.Unlock()
|
||||
|
||||
if err := s.processPastLogs(ctx); err != nil {
|
||||
log.Errorf("Unable to process past logs %v", err)
|
||||
s.retryETH1Node(err)
|
||||
s.retryExecutionClientConnection(ctx, err)
|
||||
errorLogger(err, "Unable to process past deposit contract logs")
|
||||
continue
|
||||
}
|
||||
// Cache eth1 headers from our voting period.
|
||||
if err := s.cacheHeadersForEth1DataVote(ctx); err != nil {
|
||||
log.Errorf("Unable to process past headers %v", err)
|
||||
s.retryETH1Node(err)
|
||||
s.retryExecutionClientConnection(ctx, err)
|
||||
errorLogger(err, "Unable to cache headers for execution client votes")
|
||||
continue
|
||||
}
|
||||
// Handle edge case with embedded genesis state by fetching genesis header to determine
|
||||
@@ -720,15 +599,15 @@ func (s *Service) initPOWService() {
|
||||
if genHash != [32]byte{} {
|
||||
genHeader, err := s.eth1DataFetcher.HeaderByHash(ctx, genHash)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to retrieve genesis ETH1.0 chain header: %v", err)
|
||||
s.retryETH1Node(err)
|
||||
s.retryExecutionClientConnection(ctx, err)
|
||||
errorLogger(err, "Unable to retrieve proof-of-stake genesis block data")
|
||||
continue
|
||||
}
|
||||
genBlock = genHeader.Number.Uint64()
|
||||
}
|
||||
s.chainStartData.GenesisBlock = genBlock
|
||||
if err := s.savePowchainData(ctx); err != nil {
|
||||
log.Errorf("Unable to save powchain data: %v", err)
|
||||
errorLogger(err, "Unable to save execution client data")
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -757,17 +636,16 @@ func (s *Service) run(done <-chan struct{}) {
|
||||
head, err := s.eth1DataFetcher.HeaderByNumber(s.ctx, nil)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not fetch latest eth1 header")
|
||||
s.retryETH1Node(err)
|
||||
continue
|
||||
}
|
||||
if eth1HeadIsBehind(head.Time) {
|
||||
s.retryExecutionClientConnection(s.ctx, err)
|
||||
log.WithError(errFarBehind).Debug("Could not get an up to date eth1 header")
|
||||
s.retryETH1Node(errFarBehind)
|
||||
continue
|
||||
}
|
||||
s.processBlockHeader(head)
|
||||
s.handleETH1FollowDistance()
|
||||
s.checkDefaultEndpoint()
|
||||
s.checkDefaultEndpoint(s.ctx)
|
||||
case <-chainstartTicker.C:
|
||||
if s.chainStartData.Chainstarted {
|
||||
chainstartTicker.Stop()
|
||||
@@ -854,59 +732,6 @@ func (s *Service) determineEarliestVotingBlock(ctx context.Context, followBlock
|
||||
return hdr.Number.Uint64(), nil
|
||||
}
|
||||
|
||||
// This performs a health check on our primary endpoint, and if it
|
||||
// is ready to serve we connect to it again. This method is only
|
||||
// relevant if we are on our backup endpoint.
|
||||
func (s *Service) checkDefaultEndpoint() {
|
||||
primaryEndpoint := s.cfg.httpEndpoints[0]
|
||||
// Return early if we are running on our primary
|
||||
// endpoint.
|
||||
if s.cfg.currHttpEndpoint.Equals(primaryEndpoint) {
|
||||
return
|
||||
}
|
||||
|
||||
httpClient, rpcClient, err := s.dialETH1Nodes(primaryEndpoint)
|
||||
if err != nil {
|
||||
log.Debugf("Primary endpoint not ready: %v", err)
|
||||
return
|
||||
}
|
||||
log.Info("Primary endpoint ready again, switching back to it")
|
||||
// Close the clients and let our main connection routine
|
||||
// properly connect with it.
|
||||
httpClient.Close()
|
||||
rpcClient.Close()
|
||||
// Close current active clients.
|
||||
s.closeClients()
|
||||
|
||||
// Switch back to primary endpoint and try connecting
|
||||
// to it again.
|
||||
s.updateCurrHttpEndpoint(primaryEndpoint)
|
||||
s.retryETH1Node(nil)
|
||||
}
|
||||
|
||||
// This is an inefficient way to search for the next endpoint, but given N is expected to be
|
||||
// small ( < 25), it is fine to search this way.
|
||||
func (s *Service) fallbackToNextEndpoint() {
|
||||
currEndpoint := s.cfg.currHttpEndpoint
|
||||
currIndex := 0
|
||||
totalEndpoints := len(s.cfg.httpEndpoints)
|
||||
|
||||
for i, endpoint := range s.cfg.httpEndpoints {
|
||||
if endpoint.Equals(currEndpoint) {
|
||||
currIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
nextIndex := currIndex + 1
|
||||
if nextIndex >= totalEndpoints {
|
||||
nextIndex = 0
|
||||
}
|
||||
s.updateCurrHttpEndpoint(s.cfg.httpEndpoints[nextIndex])
|
||||
if nextIndex != currIndex {
|
||||
log.Infof("Falling back to alternative endpoint: %s", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url))
|
||||
}
|
||||
}
|
||||
|
||||
// initializes our service from the provided eth1data object by initializing all the relevant
|
||||
// fields and data.
|
||||
func (s *Service) initializeEth1Data(ctx context.Context, eth1DataInDB *ethpb.ETH1ChainData) error {
|
||||
|
||||
@@ -42,6 +42,8 @@ type goodLogger struct {
|
||||
backend *backends.SimulatedBackend
|
||||
}
|
||||
|
||||
func (_ *goodLogger) Close() {}
|
||||
|
||||
func (g *goodLogger) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- gethTypes.Log) (ethereum.Subscription, error) {
|
||||
if g.backend == nil {
|
||||
return new(event.Feed).Subscribe(ch), nil
|
||||
@@ -80,6 +82,8 @@ type goodFetcher struct {
|
||||
backend *backends.SimulatedBackend
|
||||
}
|
||||
|
||||
func (_ *goodFetcher) Close() {}
|
||||
|
||||
func (g *goodFetcher) HeaderByHash(_ context.Context, hash common.Hash) (*gethTypes.Header, error) {
|
||||
if bytes.Equal(hash.Bytes(), common.BytesToHash([]byte{0}).Bytes()) {
|
||||
return nil, fmt.Errorf("expected block hash to be nonzero %v", hash)
|
||||
@@ -225,10 +229,6 @@ func TestService_Eth1Synced(t *testing.T) {
|
||||
now := time.Now()
|
||||
assert.NoError(t, testAcc.Backend.AdjustTime(now.Sub(time.Unix(int64(currTime), 0))))
|
||||
testAcc.Backend.Commit()
|
||||
|
||||
synced, err := web3Service.isEth1NodeSynced()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, synced, "Expected eth1 nodes to be synced")
|
||||
}
|
||||
|
||||
func TestFollowBlock_OK(t *testing.T) {
|
||||
@@ -472,7 +472,7 @@ func TestInitDepositCacheWithFinalization_OK(t *testing.T) {
|
||||
require.NoError(t, s.cfg.beaconDB.SaveState(context.Background(), emptyState, headRoot))
|
||||
require.NoError(t, stateGen.SaveState(context.Background(), headRoot, emptyState))
|
||||
s.cfg.stateGen = stateGen
|
||||
require.NoError(t, emptyState.SetEth1DepositIndex(2))
|
||||
require.NoError(t, emptyState.SetEth1DepositIndex(3))
|
||||
|
||||
ctx := context.Background()
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(0), Root: headRoot[:]}))
|
||||
@@ -480,8 +480,8 @@ func TestInitDepositCacheWithFinalization_OK(t *testing.T) {
|
||||
|
||||
s.chainStartData.Chainstarted = true
|
||||
require.NoError(t, s.initDepositCaches(context.Background(), ctrs))
|
||||
|
||||
deps := s.cfg.depositCache.NonFinalizedDeposits(context.Background(), nil)
|
||||
fDeposits := s.cfg.depositCache.FinalizedDeposits(ctx)
|
||||
deps := s.cfg.depositCache.NonFinalizedDeposits(context.Background(), fDeposits.MerkleTrieIndex, nil)
|
||||
assert.Equal(t, 0, len(deps))
|
||||
}
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -2,25 +2,32 @@ package testing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
)
|
||||
|
||||
// EngineClient --
|
||||
type EngineClient struct {
|
||||
NewPayloadResp []byte
|
||||
PayloadIDBytes *pb.PayloadIDBytes
|
||||
ForkChoiceUpdatedResp []byte
|
||||
ExecutionPayload *pb.ExecutionPayload
|
||||
ExecutionBlock *pb.ExecutionBlock
|
||||
Err error
|
||||
ErrLatestExecBlock error
|
||||
ErrExecBlockByHash error
|
||||
ErrForkchoiceUpdated error
|
||||
ErrNewPayload error
|
||||
BlockByHashMap map[[32]byte]*pb.ExecutionBlock
|
||||
NewPayloadResp []byte
|
||||
PayloadIDBytes *pb.PayloadIDBytes
|
||||
ForkChoiceUpdatedResp []byte
|
||||
ExecutionPayload *pb.ExecutionPayload
|
||||
ExecutionBlock *pb.ExecutionBlock
|
||||
Err error
|
||||
ErrLatestExecBlock error
|
||||
ErrExecBlockByHash error
|
||||
ErrForkchoiceUpdated error
|
||||
ErrNewPayload error
|
||||
BlockByHashMap map[[32]byte]*pb.ExecutionBlock
|
||||
TerminalBlockHash []byte
|
||||
TerminalBlockHashExists bool
|
||||
}
|
||||
|
||||
// NewPayload --
|
||||
@@ -58,3 +65,52 @@ func (e *EngineClient) ExecutionBlockByHash(_ context.Context, h common.Hash) (*
|
||||
}
|
||||
return b, e.ErrExecBlockByHash
|
||||
}
|
||||
|
||||
// GetTerminalBlockHash --
|
||||
func (e *EngineClient) GetTerminalBlockHash(ctx context.Context) ([]byte, bool, error) {
|
||||
ttd := new(big.Int)
|
||||
ttd.SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
|
||||
terminalTotalDifficulty, overflows := uint256.FromBig(ttd)
|
||||
if overflows {
|
||||
return nil, false, errors.New("could not convert terminal total difficulty to uint256")
|
||||
}
|
||||
blk, err := e.LatestExecutionBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not get latest execution block")
|
||||
}
|
||||
if blk == nil {
|
||||
return nil, false, errors.New("latest execution block is nil")
|
||||
}
|
||||
|
||||
for {
|
||||
b, err := hexutil.DecodeBig(blk.TotalDifficulty)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
|
||||
}
|
||||
currentTotalDifficulty, _ := uint256.FromBig(b)
|
||||
blockReachedTTD := currentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
|
||||
|
||||
parentHash := bytesutil.ToBytes32(blk.ParentHash)
|
||||
if len(blk.ParentHash) == 0 || parentHash == params.BeaconConfig().ZeroHash {
|
||||
return nil, false, nil
|
||||
}
|
||||
parentBlk, err := e.ExecutionBlockByHash(ctx, parentHash)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not get parent execution block")
|
||||
}
|
||||
if blockReachedTTD {
|
||||
b, err := hexutil.DecodeBig(parentBlk.TotalDifficulty)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not convert total difficulty to uint256")
|
||||
}
|
||||
parentTotalDifficulty, _ := uint256.FromBig(b)
|
||||
parentReachedTTD := parentTotalDifficulty.Cmp(terminalTotalDifficulty) >= 0
|
||||
if !parentReachedTTD {
|
||||
return blk.Hash, true, nil
|
||||
}
|
||||
} else {
|
||||
return nil, false, nil
|
||||
}
|
||||
blk = parentBlk
|
||||
}
|
||||
}
|
||||
|
||||
@@ -144,6 +144,8 @@ type RPCClient struct {
|
||||
Backend *backends.SimulatedBackend
|
||||
}
|
||||
|
||||
func (_ *RPCClient) Close() {}
|
||||
|
||||
func (*RPCClient) CallContext(_ context.Context, _ interface{}, _ string, _ ...interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -179,7 +178,7 @@ func writeSSZResponseHeaderAndBody(grpcResp *http.Response, w http.ResponseWrite
|
||||
} else {
|
||||
w.WriteHeader(grpcResp.StatusCode)
|
||||
}
|
||||
if _, err := io.Copy(w, ioutil.NopCloser(bytes.NewReader(respSsz))); err != nil {
|
||||
if _, err := io.Copy(w, io.NopCloser(bytes.NewReader(respSsz))); err != nil {
|
||||
return apimiddleware.InternalServerErrorWithMessage(err, "could not write response message")
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
@@ -36,7 +37,7 @@ func wrapFeeRecipientsArray(
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
|
||||
}
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(b))
|
||||
req.Body = io.NopCloser(bytes.NewReader(b))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -57,7 +58,7 @@ func wrapAttestationsArray(
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
|
||||
}
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(b))
|
||||
req.Body = io.NopCloser(bytes.NewReader(b))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
@@ -79,7 +80,7 @@ func wrapValidatorIndicesArray(
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
|
||||
}
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(b))
|
||||
req.Body = io.NopCloser(bytes.NewReader(b))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
@@ -101,7 +102,7 @@ func wrapSignedAggregateAndProofArray(
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
|
||||
}
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(b))
|
||||
req.Body = io.NopCloser(bytes.NewReader(b))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
@@ -123,7 +124,7 @@ func wrapBeaconCommitteeSubscriptionsArray(
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
|
||||
}
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(b))
|
||||
req.Body = io.NopCloser(bytes.NewReader(b))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
@@ -145,7 +146,7 @@ func wrapSyncCommitteeSubscriptionsArray(
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
|
||||
}
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(b))
|
||||
req.Body = io.NopCloser(bytes.NewReader(b))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
@@ -167,7 +168,7 @@ func wrapSyncCommitteeSignaturesArray(
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
|
||||
}
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(b))
|
||||
req.Body = io.NopCloser(bytes.NewReader(b))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
@@ -189,7 +190,7 @@ func wrapSignedContributionAndProofsArray(
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
|
||||
}
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(b))
|
||||
req.Body = io.NopCloser(bytes.NewReader(b))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
@@ -209,6 +210,11 @@ type bellatrixPublishBlockRequestJson struct {
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type bellatrixPublishBlindedBlockRequestJson struct {
|
||||
BellatrixBlock *blindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
// setInitialPublishBlockPostRequest is triggered before we deserialize the request JSON into a struct.
|
||||
// We don't know which version of the block got posted, but we can determine it from the slot.
|
||||
// We know that blocks of all versions have a Message field with a Slot field,
|
||||
@@ -224,7 +230,7 @@ func setInitialPublishBlockPostRequest(endpoint *apimiddleware.Endpoint,
|
||||
}
|
||||
}{}
|
||||
|
||||
buf, err := ioutil.ReadAll(req.Body)
|
||||
buf, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not read body")
|
||||
}
|
||||
@@ -243,7 +249,7 @@ func setInitialPublishBlockPostRequest(endpoint *apimiddleware.Endpoint,
|
||||
} else {
|
||||
endpoint.PostRequest = &signedBeaconBlockBellatrixContainerJson{}
|
||||
}
|
||||
req.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
|
||||
req.Body = io.NopCloser(bytes.NewBuffer(buf))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -282,6 +288,79 @@ func preparePublishedBlock(endpoint *apimiddleware.Endpoint, _ http.ResponseWrit
|
||||
return apimiddleware.InternalServerError(errors.New("unsupported block type"))
|
||||
}
|
||||
|
||||
// setInitialPublishBlindedBlockPostRequest is triggered before we deserialize the request JSON into a struct.
|
||||
// We don't know which version of the block got posted, but we can determine it from the slot.
|
||||
// We know that blocks of all versions have a Message field with a Slot field,
|
||||
// so we deserialize the request into a struct s, which has the right fields, to obtain the slot.
|
||||
// Once we know the slot, we can determine what the PostRequest field of the endpoint should be, and we set it appropriately.
|
||||
func setInitialPublishBlindedBlockPostRequest(endpoint *apimiddleware.Endpoint,
|
||||
_ http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
|
||||
s := struct {
|
||||
Message struct {
|
||||
Slot string
|
||||
}
|
||||
}{}
|
||||
|
||||
buf, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not read body")
|
||||
}
|
||||
if err := json.Unmarshal(buf, &s); err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not read slot from body")
|
||||
}
|
||||
slot, err := strconv.ParseUint(s.Message.Slot, 10, 64)
|
||||
if err != nil {
|
||||
return false, apimiddleware.InternalServerErrorWithMessage(err, "slot is not an unsigned integer")
|
||||
}
|
||||
currentEpoch := slots.ToEpoch(types.Slot(slot))
|
||||
if currentEpoch < params.BeaconConfig().AltairForkEpoch {
|
||||
endpoint.PostRequest = &signedBeaconBlockContainerJson{}
|
||||
} else if currentEpoch < params.BeaconConfig().BellatrixForkEpoch {
|
||||
endpoint.PostRequest = &signedBeaconBlockAltairContainerJson{}
|
||||
} else {
|
||||
endpoint.PostRequest = &signedBlindedBeaconBlockBellatrixContainerJson{}
|
||||
}
|
||||
req.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// In preparePublishedBlindedBlock we transform the PostRequest.
|
||||
// gRPC expects either an XXX_block field in the JSON object, but we have a message field at this point.
|
||||
// We do a simple conversion depending on the type of endpoint.PostRequest
|
||||
// (which was filled out previously in setInitialPublishBlockPostRequest).
|
||||
func preparePublishedBlindedBlock(endpoint *apimiddleware.Endpoint, _ http.ResponseWriter, _ *http.Request) apimiddleware.ErrorJson {
|
||||
if block, ok := endpoint.PostRequest.(*signedBeaconBlockContainerJson); ok {
|
||||
// Prepare post request that can be properly decoded on gRPC side.
|
||||
actualPostReq := &phase0PublishBlockRequestJson{
|
||||
Phase0Block: block.Message,
|
||||
Signature: block.Signature,
|
||||
}
|
||||
endpoint.PostRequest = actualPostReq
|
||||
return nil
|
||||
}
|
||||
if block, ok := endpoint.PostRequest.(*signedBeaconBlockAltairContainerJson); ok {
|
||||
// Prepare post request that can be properly decoded on gRPC side.
|
||||
actualPostReq := &altairPublishBlockRequestJson{
|
||||
AltairBlock: block.Message,
|
||||
Signature: block.Signature,
|
||||
}
|
||||
endpoint.PostRequest = actualPostReq
|
||||
return nil
|
||||
}
|
||||
if block, ok := endpoint.PostRequest.(*signedBlindedBeaconBlockBellatrixContainerJson); ok {
|
||||
// Prepare post request that can be properly decoded on gRPC side.
|
||||
actualPostReq := &bellatrixPublishBlindedBlockRequestJson{
|
||||
BellatrixBlock: block.Message,
|
||||
Signature: block.Signature,
|
||||
}
|
||||
endpoint.PostRequest = actualPostReq
|
||||
return nil
|
||||
}
|
||||
return apimiddleware.InternalServerError(errors.New("unsupported block type"))
|
||||
}
|
||||
|
||||
type tempSyncCommitteesResponseJson struct {
|
||||
Data *tempSyncCommitteeValidatorsJson `json:"data"`
|
||||
}
|
||||
@@ -341,7 +420,8 @@ func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, a
|
||||
}
|
||||
|
||||
var actualRespContainer interface{}
|
||||
if strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_PHASE0.String())) {
|
||||
switch {
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_PHASE0.String())):
|
||||
actualRespContainer = &phase0BlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &signedBeaconBlockContainerJson{
|
||||
@@ -349,7 +429,7 @@ func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, a
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
}
|
||||
} else if strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_ALTAIR.String())) {
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_ALTAIR.String())):
|
||||
actualRespContainer = &altairBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &signedBeaconBlockAltairContainerJson{
|
||||
@@ -357,7 +437,7 @@ func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, a
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
}
|
||||
} else if strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_BELLATRIX.String())) {
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_BELLATRIX.String())):
|
||||
actualRespContainer = &bellatrixBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: &signedBeaconBlockBellatrixContainerJson{
|
||||
@@ -365,7 +445,7 @@ func serializeV2Block(response interface{}) (apimiddleware.RunDefault, []byte, a
|
||||
Signature: respContainer.Data.Signature,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
default:
|
||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
|
||||
}
|
||||
|
||||
@@ -440,6 +520,11 @@ type bellatrixProduceBlockResponseJson struct {
|
||||
Data *beaconBlockBellatrixJson `json:"data"`
|
||||
}
|
||||
|
||||
type bellatrixProduceBlindedBlockResponseJson struct {
|
||||
Version string `json:"version"`
|
||||
Data *blindedBeaconBlockBellatrixJson `json:"data"`
|
||||
}
|
||||
|
||||
func serializeProducedV2Block(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
|
||||
respContainer, ok := response.(*produceBlockResponseV2Json)
|
||||
if !ok {
|
||||
@@ -447,22 +532,57 @@ func serializeProducedV2Block(response interface{}) (apimiddleware.RunDefault, [
|
||||
}
|
||||
|
||||
var actualRespContainer interface{}
|
||||
if strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_PHASE0.String())) {
|
||||
switch {
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_PHASE0.String())):
|
||||
actualRespContainer = &phase0ProduceBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.Phase0Block,
|
||||
}
|
||||
} else if strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_ALTAIR.String())) {
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_ALTAIR.String())):
|
||||
actualRespContainer = &altairProduceBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.AltairBlock,
|
||||
}
|
||||
} else if strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_BELLATRIX.String())) {
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_BELLATRIX.String())):
|
||||
actualRespContainer = &bellatrixProduceBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.BellatrixBlock,
|
||||
}
|
||||
} else {
|
||||
default:
|
||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
|
||||
}
|
||||
|
||||
j, err := json.Marshal(actualRespContainer)
|
||||
if err != nil {
|
||||
return false, nil, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal response")
|
||||
}
|
||||
return false, j, nil
|
||||
}
|
||||
|
||||
func serializeProducedBlindedBlock(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
|
||||
respContainer, ok := response.(*produceBlindedBlockResponseJson)
|
||||
if !ok {
|
||||
return false, nil, apimiddleware.InternalServerError(errors.New("container is not of the correct type"))
|
||||
}
|
||||
|
||||
var actualRespContainer interface{}
|
||||
switch {
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_PHASE0.String())):
|
||||
actualRespContainer = &phase0ProduceBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.Phase0Block,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_ALTAIR.String())):
|
||||
actualRespContainer = &altairProduceBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.AltairBlock,
|
||||
}
|
||||
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_BELLATRIX.String())):
|
||||
actualRespContainer = &bellatrixProduceBlindedBlockResponseJson{
|
||||
Version: respContainer.Version,
|
||||
Data: respContainer.Data.BellatrixBlock,
|
||||
}
|
||||
default:
|
||||
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
|
||||
}
|
||||
|
||||
|
||||
@@ -468,6 +468,112 @@ func TestPreparePublishedBlock(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestSetInitialPublishBlindedBlockPostRequest(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.BellatrixForkEpoch = params.BeaconConfig().AltairForkEpoch + 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
endpoint := &apimiddleware.Endpoint{}
|
||||
s := struct {
|
||||
Message struct {
|
||||
Slot string
|
||||
}
|
||||
}{}
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
s.Message = struct{ Slot string }{Slot: "0"}
|
||||
j, err := json.Marshal(s)
|
||||
require.NoError(t, err)
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(j)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
runDefault, errJson := setInitialPublishBlindedBlockPostRequest(endpoint, nil, request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
|
||||
assert.Equal(t, reflect.TypeOf(signedBeaconBlockContainerJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
|
||||
})
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
slot, err := slots.EpochStart(params.BeaconConfig().AltairForkEpoch)
|
||||
require.NoError(t, err)
|
||||
s.Message = struct{ Slot string }{Slot: strconv.FormatUint(uint64(slot), 10)}
|
||||
j, err := json.Marshal(s)
|
||||
require.NoError(t, err)
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(j)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
runDefault, errJson := setInitialPublishBlindedBlockPostRequest(endpoint, nil, request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
|
||||
assert.Equal(t, reflect.TypeOf(signedBeaconBlockAltairContainerJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
|
||||
})
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
slot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
require.NoError(t, err)
|
||||
s.Message = struct{ Slot string }{Slot: strconv.FormatUint(uint64(slot), 10)}
|
||||
j, err := json.Marshal(s)
|
||||
require.NoError(t, err)
|
||||
var body bytes.Buffer
|
||||
_, err = body.Write(j)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest("POST", "http://foo.example", &body)
|
||||
runDefault, errJson := setInitialPublishBlindedBlockPostRequest(endpoint, nil, request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
|
||||
assert.Equal(t, reflect.TypeOf(signedBlindedBeaconBlockBellatrixContainerJson{}).Name(), reflect.Indirect(reflect.ValueOf(endpoint.PostRequest)).Type().Name())
|
||||
})
|
||||
}
|
||||
|
||||
func TestPreparePublishedBlindedBlock(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
PostRequest: &signedBeaconBlockContainerJson{
|
||||
Message: &beaconBlockJson{
|
||||
Body: &beaconBlockBodyJson{},
|
||||
},
|
||||
},
|
||||
}
|
||||
errJson := preparePublishedBlindedBlock(endpoint, nil, nil)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
_, ok := endpoint.PostRequest.(*phase0PublishBlockRequestJson)
|
||||
assert.Equal(t, true, ok)
|
||||
})
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
PostRequest: &signedBeaconBlockAltairContainerJson{
|
||||
Message: &beaconBlockAltairJson{
|
||||
Body: &beaconBlockBodyAltairJson{},
|
||||
},
|
||||
},
|
||||
}
|
||||
errJson := preparePublishedBlindedBlock(endpoint, nil, nil)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
_, ok := endpoint.PostRequest.(*altairPublishBlockRequestJson)
|
||||
assert.Equal(t, true, ok)
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
endpoint := &apimiddleware.Endpoint{
|
||||
PostRequest: &signedBlindedBeaconBlockBellatrixContainerJson{
|
||||
Message: &blindedBeaconBlockBellatrixJson{
|
||||
Body: &blindedBeaconBlockBodyBellatrixJson{},
|
||||
},
|
||||
},
|
||||
}
|
||||
errJson := preparePublishedBlindedBlock(endpoint, nil, nil)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
_, ok := endpoint.PostRequest.(*bellatrixPublishBlindedBlockRequestJson)
|
||||
assert.Equal(t, true, ok)
|
||||
})
|
||||
|
||||
t.Run("unsupported block type", func(t *testing.T) {
|
||||
errJson := preparePublishedBlock(&apimiddleware.Endpoint{}, nil, nil)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "unsupported block type"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestPrepareValidatorAggregates(t *testing.T) {
|
||||
body := &tempSyncCommitteesResponseJson{
|
||||
Data: &tempSyncCommitteeValidatorsJson{
|
||||
@@ -779,3 +885,113 @@ func TestSerializeProducedV2Block(t *testing.T) {
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "unsupported block version"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestSerializeProduceBlindedBlock(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
response := &produceBlindedBlockResponseJson{
|
||||
Version: ethpbv2.Version_PHASE0.String(),
|
||||
Data: &blindedBeaconBlockContainerJson{
|
||||
Phase0Block: &beaconBlockJson{
|
||||
Slot: "1",
|
||||
ProposerIndex: "1",
|
||||
ParentRoot: "root",
|
||||
StateRoot: "root",
|
||||
Body: &beaconBlockBodyJson{},
|
||||
},
|
||||
AltairBlock: nil,
|
||||
},
|
||||
}
|
||||
runDefault, j, errJson := serializeProducedBlindedBlock(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
resp := &phase0ProduceBlockResponseJson{}
|
||||
require.NoError(t, json.Unmarshal(j, resp))
|
||||
require.NotNil(t, resp.Data)
|
||||
require.NotNil(t, resp.Data)
|
||||
beaconBlock := resp.Data
|
||||
assert.Equal(t, "1", beaconBlock.Slot)
|
||||
assert.Equal(t, "1", beaconBlock.ProposerIndex)
|
||||
assert.Equal(t, "root", beaconBlock.ParentRoot)
|
||||
assert.Equal(t, "root", beaconBlock.StateRoot)
|
||||
require.NotNil(t, beaconBlock.Body)
|
||||
})
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
response := &produceBlindedBlockResponseJson{
|
||||
Version: ethpbv2.Version_ALTAIR.String(),
|
||||
Data: &blindedBeaconBlockContainerJson{
|
||||
AltairBlock: &beaconBlockAltairJson{
|
||||
Slot: "1",
|
||||
ProposerIndex: "1",
|
||||
ParentRoot: "root",
|
||||
StateRoot: "root",
|
||||
Body: &beaconBlockBodyAltairJson{},
|
||||
},
|
||||
},
|
||||
}
|
||||
runDefault, j, errJson := serializeProducedBlindedBlock(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
resp := &altairProduceBlockResponseJson{}
|
||||
require.NoError(t, json.Unmarshal(j, resp))
|
||||
require.NotNil(t, resp.Data)
|
||||
require.NotNil(t, resp.Data)
|
||||
beaconBlock := resp.Data
|
||||
assert.Equal(t, "1", beaconBlock.Slot)
|
||||
assert.Equal(t, "1", beaconBlock.ProposerIndex)
|
||||
assert.Equal(t, "root", beaconBlock.ParentRoot)
|
||||
assert.Equal(t, "root", beaconBlock.StateRoot)
|
||||
require.NotNil(t, beaconBlock.Body)
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
response := &produceBlindedBlockResponseJson{
|
||||
Version: ethpbv2.Version_BELLATRIX.String(),
|
||||
Data: &blindedBeaconBlockContainerJson{
|
||||
BellatrixBlock: &blindedBeaconBlockBellatrixJson{
|
||||
Slot: "1",
|
||||
ProposerIndex: "1",
|
||||
ParentRoot: "root",
|
||||
StateRoot: "root",
|
||||
Body: &blindedBeaconBlockBodyBellatrixJson{},
|
||||
},
|
||||
},
|
||||
}
|
||||
runDefault, j, errJson := serializeProducedBlindedBlock(response)
|
||||
require.Equal(t, nil, errJson)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.NotNil(t, j)
|
||||
resp := &bellatrixProduceBlindedBlockResponseJson{}
|
||||
require.NoError(t, json.Unmarshal(j, resp))
|
||||
require.NotNil(t, resp.Data)
|
||||
require.NotNil(t, resp.Data)
|
||||
beaconBlock := resp.Data
|
||||
assert.Equal(t, "1", beaconBlock.Slot)
|
||||
assert.Equal(t, "1", beaconBlock.ProposerIndex)
|
||||
assert.Equal(t, "root", beaconBlock.ParentRoot)
|
||||
assert.Equal(t, "root", beaconBlock.StateRoot)
|
||||
require.NotNil(t, beaconBlock.Body)
|
||||
})
|
||||
|
||||
t.Run("incorrect response type", func(t *testing.T) {
|
||||
response := &types.Empty{}
|
||||
runDefault, j, errJson := serializeProducedV2Block(response)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.Equal(t, 0, len(j))
|
||||
require.NotNil(t, errJson)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "container is not of the correct type"))
|
||||
})
|
||||
|
||||
t.Run("unsupported block version", func(t *testing.T) {
|
||||
response := &produceBlockResponseV2Json{
|
||||
Version: "unsupported",
|
||||
}
|
||||
runDefault, j, errJson := serializeProducedV2Block(response)
|
||||
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
|
||||
require.Equal(t, 0, len(j))
|
||||
require.NotNil(t, errJson)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "unsupported block version"))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ func (_ *BeaconEndpointFactory) Paths() []string {
|
||||
"/eth/v1/beacon/headers",
|
||||
"/eth/v1/beacon/headers/{block_id}",
|
||||
"/eth/v1/beacon/blocks",
|
||||
"/eth/v1/beacon/blinded_blocks",
|
||||
"/eth/v1/beacon/blocks/{block_id}",
|
||||
"/eth/v2/beacon/blocks/{block_id}",
|
||||
"/eth/v1/beacon/blocks/{block_id}/root",
|
||||
@@ -58,6 +59,7 @@ func (_ *BeaconEndpointFactory) Paths() []string {
|
||||
"/eth/v1/validator/duties/sync/{epoch}",
|
||||
"/eth/v1/validator/blocks/{slot}",
|
||||
"/eth/v2/validator/blocks/{slot}",
|
||||
"/eth/v1/validator/blinded_blocks/{slot}",
|
||||
"/eth/v1/validator/attestation_data",
|
||||
"/eth/v1/validator/aggregate_attestation",
|
||||
"/eth/v1/validator/beacon_committee_subscriptions",
|
||||
@@ -108,6 +110,11 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
|
||||
OnPreDeserializeRequestBodyIntoContainer: setInitialPublishBlockPostRequest,
|
||||
OnPostDeserializeRequestBodyIntoContainer: preparePublishedBlock,
|
||||
}
|
||||
case "/eth/v1/beacon/blinded_blocks":
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreDeserializeRequestBodyIntoContainer: setInitialPublishBlindedBlockPostRequest,
|
||||
OnPostDeserializeRequestBodyIntoContainer: preparePublishedBlindedBlock,
|
||||
}
|
||||
case "/eth/v1/beacon/blocks/{block_id}":
|
||||
endpoint.GetResponse = &blockResponseJson{}
|
||||
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleGetBeaconBlockSSZ}
|
||||
@@ -214,6 +221,13 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreSerializeMiddlewareResponseIntoJson: serializeProducedV2Block,
|
||||
}
|
||||
case "/eth/v1/validator/blinded_blocks/{slot}":
|
||||
endpoint.GetResponse = &produceBlindedBlockResponseJson{}
|
||||
endpoint.RequestURLLiterals = []string{"slot"}
|
||||
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "randao_reveal", Hex: true}, {Name: "graffiti", Hex: true}}
|
||||
endpoint.Hooks = apimiddleware.HookCollection{
|
||||
OnPreSerializeMiddlewareResponseIntoJson: serializeProducedBlindedBlock,
|
||||
}
|
||||
case "/eth/v1/validator/attestation_data":
|
||||
endpoint.GetResponse = &produceAttestationDataResponseJson{}
|
||||
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "slot"}, {Name: "committee_index"}}
|
||||
|
||||
@@ -270,6 +270,12 @@ type produceBlockResponseV2Json struct {
|
||||
Data *beaconBlockContainerV2Json `json:"data"`
|
||||
}
|
||||
|
||||
// produceBlindedBlockResponseJson is used in /v1/validator/blinded_blocks/{slot} API endpoint.
|
||||
type produceBlindedBlockResponseJson struct {
|
||||
Version string `json:"version"`
|
||||
Data *blindedBeaconBlockContainerJson `json:"data"`
|
||||
}
|
||||
|
||||
// produceAttestationDataResponseJson is used in /validator/attestation_data API endpoint.
|
||||
type produceAttestationDataResponseJson struct {
|
||||
Data *attestationDataJson `json:"data"`
|
||||
@@ -371,6 +377,12 @@ type beaconBlockContainerV2Json struct {
|
||||
BellatrixBlock *beaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||
}
|
||||
|
||||
type blindedBeaconBlockContainerJson struct {
|
||||
Phase0Block *beaconBlockJson `json:"phase0_block"`
|
||||
AltairBlock *beaconBlockAltairJson `json:"altair_block"`
|
||||
BellatrixBlock *blindedBeaconBlockBellatrixJson `json:"bellatrix_block"`
|
||||
}
|
||||
|
||||
type signedBeaconBlockAltairContainerJson struct {
|
||||
Message *beaconBlockAltairJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
@@ -381,6 +393,11 @@ type signedBeaconBlockBellatrixContainerJson struct {
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type signedBlindedBeaconBlockBellatrixContainerJson struct {
|
||||
Message *blindedBeaconBlockBellatrixJson `json:"message"`
|
||||
Signature string `json:"signature" hex:"true"`
|
||||
}
|
||||
|
||||
type beaconBlockAltairJson struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
@@ -397,6 +414,14 @@ type beaconBlockBellatrixJson struct {
|
||||
Body *beaconBlockBodyBellatrixJson `json:"body"`
|
||||
}
|
||||
|
||||
type blindedBeaconBlockBellatrixJson struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root" hex:"true"`
|
||||
StateRoot string `json:"state_root" hex:"true"`
|
||||
Body *blindedBeaconBlockBodyBellatrixJson `json:"body"`
|
||||
}
|
||||
|
||||
type beaconBlockBodyAltairJson struct {
|
||||
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
||||
Eth1Data *eth1DataJson `json:"eth1_data"`
|
||||
@@ -422,6 +447,19 @@ type beaconBlockBodyBellatrixJson struct {
|
||||
ExecutionPayload *executionPayloadJson `json:"execution_payload"`
|
||||
}
|
||||
|
||||
type blindedBeaconBlockBodyBellatrixJson struct {
|
||||
RandaoReveal string `json:"randao_reveal" hex:"true"`
|
||||
Eth1Data *eth1DataJson `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti" hex:"true"`
|
||||
ProposerSlashings []*proposerSlashingJson `json:"proposer_slashings"`
|
||||
AttesterSlashings []*attesterSlashingJson `json:"attester_slashings"`
|
||||
Attestations []*attestationJson `json:"attestations"`
|
||||
Deposits []*depositJson `json:"deposits"`
|
||||
VoluntaryExits []*signedVoluntaryExitJson `json:"voluntary_exits"`
|
||||
SyncAggregate *syncAggregateJson `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *executionPayloadHeaderJson `json:"execution_payload"`
|
||||
}
|
||||
|
||||
type executionPayloadJson struct {
|
||||
ParentHash string `json:"parent_hash" hex:"true"`
|
||||
FeeRecipient string `json:"fee_recipient" hex:"true"`
|
||||
@@ -434,7 +472,7 @@ type executionPayloadJson struct {
|
||||
GasUsed string `json:"gas_used"`
|
||||
TimeStamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data" hex:"true"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas" hex:"true"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas" uint256:"true"`
|
||||
BlockHash string `json:"block_hash" hex:"true"`
|
||||
Transactions []string `json:"transactions" hex:"true"`
|
||||
}
|
||||
|
||||
@@ -37,6 +37,7 @@ go_library(
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
@@ -94,6 +95,7 @@ go_test(
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/eth/service:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
rpchelpers "github.com/prysmaticlabs/prysm/beacon-chain/rpc/eth/helpers"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
@@ -201,59 +202,58 @@ func (bs *Server) SubmitBlock(ctx context.Context, req *ethpbv2.SignedBeaconBloc
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlock")
|
||||
defer span.End()
|
||||
|
||||
var wsb block.SignedBeaconBlock
|
||||
var err error
|
||||
var v1alpha1Blk interface{}
|
||||
|
||||
switch blk := req.Message.(type) {
|
||||
case *ethpbv2.SignedBeaconBlockContainerV2_Phase0Block:
|
||||
v1alpha1Blk, err = migration.V1ToV1Alpha1SignedBlock(ðpbv1.SignedBeaconBlock{Block: blk.Phase0Block, Signature: req.Signature})
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Could not convert block to v1 block: %v", err)
|
||||
phase0BlkContainer, ok := req.Message.(*ethpbv2.SignedBeaconBlockContainerV2_Phase0Block)
|
||||
if ok {
|
||||
if err := bs.submitPhase0Block(ctx, phase0BlkContainer.Phase0Block, req.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(v1alpha1Blk)
|
||||
case *ethpbv2.SignedBeaconBlockContainerV2_AltairBlock:
|
||||
v1alpha1Blk, err = migration.AltairToV1Alpha1SignedBlock(ðpbv2.SignedBeaconBlockAltair{Message: blk.AltairBlock, Signature: req.Signature})
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Could not convert block to v1 block: %v", err)
|
||||
}
|
||||
altairBlkContainer, ok := req.Message.(*ethpbv2.SignedBeaconBlockContainerV2_AltairBlock)
|
||||
if ok {
|
||||
if err := bs.submitAltairBlock(ctx, altairBlkContainer.AltairBlock, req.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(v1alpha1Blk)
|
||||
case *ethpbv2.SignedBeaconBlockContainerV2_BellatrixBlock:
|
||||
v1alpha1Blk, err = migration.BellatrixToV1Alpha1SignedBlock(ðpbv2.SignedBeaconBlockBellatrix{Message: blk.BellatrixBlock, Signature: req.Signature})
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Could not convert block to v1 block: %v", err)
|
||||
}
|
||||
bellatrixBlkContainer, ok := req.Message.(*ethpbv2.SignedBeaconBlockContainerV2_BellatrixBlock)
|
||||
if ok {
|
||||
if err := bs.submitBellatrixBlock(ctx, bellatrixBlkContainer.BellatrixBlock, req.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock instructs the beacon node to use the components of the `SignedBlindedBeaconBlock` to construct
|
||||
// and publish a `SignedBeaconBlock` by swapping out the `transactions_root` for the corresponding full list of `transactions`.
|
||||
// The beacon node should broadcast a newly constructed `SignedBeaconBlock` to the beacon network,
|
||||
// to be included in the beacon chain. The beacon node is not required to validate the signed
|
||||
// `BeaconBlock`, and a successful response (20X) only indicates that the broadcast has been
|
||||
// successful. The beacon node is expected to integrate the new block into its state, and
|
||||
// therefore validate the block internally, however blocks which fail the validation are still
|
||||
// broadcast but a different status code is returned (202).
|
||||
func (bs *Server) SubmitBlindedBlock(ctx context.Context, req *ethpbv2.SignedBlindedBeaconBlockContainer) (*emptypb.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitBlindedBlock")
|
||||
defer span.End()
|
||||
|
||||
bellatrixBlkContainer, ok := req.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock)
|
||||
if ok {
|
||||
if err := bs.submitBlindedBellatrixBlock(ctx, bellatrixBlkContainer.BellatrixBlock, req.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(v1alpha1Blk)
|
||||
default:
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Unsupported block type %T", req.Message)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Could not wrap beacon block: %v", err)
|
||||
// At the end we check forks that don't have blinded blocks.
|
||||
phase0BlkContainer, ok := req.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_Phase0Block)
|
||||
if ok {
|
||||
if err := bs.submitPhase0Block(ctx, phase0BlkContainer.Phase0Block, req.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
root, err := wsb.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Could not tree hash block: %v", err)
|
||||
}
|
||||
|
||||
// Do not block proposal critical path with debug logging or block feed updates.
|
||||
defer func() {
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debugf(
|
||||
"Block proposal received via RPC")
|
||||
bs.BlockNotifier.BlockFeed().Send(&feed.Event{
|
||||
Type: blockfeed.ReceivedBlock,
|
||||
Data: &blockfeed.ReceivedBlockData{SignedBlock: wsb, IsOptimistic: false},
|
||||
})
|
||||
}()
|
||||
|
||||
// Broadcast the new block to the network.
|
||||
if err := bs.Broadcaster.Broadcast(ctx, wsb.Proto()); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast block: %v", err)
|
||||
}
|
||||
|
||||
if err := bs.BlockReceiver.ReceiveBlock(ctx, wsb, root); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not process beacon block: %v", err)
|
||||
altairBlkContainer, ok := req.Message.(*ethpbv2.SignedBlindedBeaconBlockContainer_AltairBlock)
|
||||
if ok {
|
||||
if err := bs.submitAltairBlock(ctx, altairBlkContainer.AltairBlock, req.Signature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &emptypb.Empty{}, nil
|
||||
@@ -706,3 +706,100 @@ func handleGetBlockError(blk block.SignedBeaconBlock, err error) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bs *Server) submitPhase0Block(ctx context.Context, phase0Blk *ethpbv1.BeaconBlock, sig []byte) error {
|
||||
v1alpha1Blk, err := migration.V1ToV1Alpha1SignedBlock(ðpbv1.SignedBeaconBlock{Block: phase0Blk, Signature: sig})
|
||||
if err != nil {
|
||||
return status.Errorf(codes.InvalidArgument, "Could not convert block to v1 block")
|
||||
}
|
||||
wrappedPhase0Blk, err := wrapper.WrappedSignedBeaconBlock(v1alpha1Blk)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.InvalidArgument, "Could not prepare block")
|
||||
}
|
||||
root, err := phase0Blk.HashTreeRoot()
|
||||
if err != nil {
|
||||
return status.Errorf(codes.InvalidArgument, "Could not tree hash block: %v", err)
|
||||
}
|
||||
|
||||
return bs.submitBlock(ctx, root, wrappedPhase0Blk)
|
||||
}
|
||||
|
||||
func (bs *Server) submitAltairBlock(ctx context.Context, altairBlk *ethpbv2.BeaconBlockAltair, sig []byte) error {
|
||||
v1alpha1Blk, err := migration.AltairToV1Alpha1SignedBlock(ðpbv2.SignedBeaconBlockAltair{Message: altairBlk, Signature: sig})
|
||||
if err != nil {
|
||||
return status.Errorf(codes.InvalidArgument, "Could not convert block to v1 block")
|
||||
}
|
||||
wrappedAltairBlk, err := wrapper.WrappedSignedBeaconBlock(v1alpha1Blk)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.InvalidArgument, "Could not prepare block")
|
||||
}
|
||||
|
||||
root, err := altairBlk.HashTreeRoot()
|
||||
if err != nil {
|
||||
return status.Errorf(codes.InvalidArgument, "Could not tree hash block: %v", err)
|
||||
}
|
||||
|
||||
return bs.submitBlock(ctx, root, wrappedAltairBlk)
|
||||
}
|
||||
|
||||
func (bs *Server) submitBellatrixBlock(ctx context.Context, bellatrixBlk *ethpbv2.BeaconBlockBellatrix, sig []byte) error {
|
||||
v1alpha1Blk, err := migration.BellatrixToV1Alpha1SignedBlock(ðpbv2.SignedBeaconBlockBellatrix{Message: bellatrixBlk, Signature: sig})
|
||||
if err != nil {
|
||||
return status.Errorf(codes.InvalidArgument, "Could not convert block to v1 block")
|
||||
}
|
||||
wrappedBellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(v1alpha1Blk)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.InvalidArgument, "Could not prepare block")
|
||||
}
|
||||
|
||||
root, err := bellatrixBlk.HashTreeRoot()
|
||||
if err != nil {
|
||||
return status.Errorf(codes.InvalidArgument, "Could not tree hash block: %v", err)
|
||||
}
|
||||
|
||||
return bs.submitBlock(ctx, root, wrappedBellatrixBlk)
|
||||
}
|
||||
|
||||
func (bs *Server) submitBlindedBellatrixBlock(ctx context.Context, blindedBellatrixBlk *ethpbv2.BlindedBeaconBlockBellatrix, sig []byte) error {
|
||||
v1alpha1SignedBlk, err := migration.BlindedBellatrixToV1Alpha1SignedBlock(ðpbv2.SignedBlindedBeaconBlockBellatrix{
|
||||
Message: blindedBellatrixBlk,
|
||||
Signature: sig,
|
||||
})
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
|
||||
}
|
||||
wrappedBellatrixSignedBlk, err := wrapper.WrappedSignedBeaconBlock(v1alpha1SignedBlk)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not get blinded block: %v", err)
|
||||
}
|
||||
|
||||
root, err := blindedBellatrixBlk.HashTreeRoot()
|
||||
if err != nil {
|
||||
return status.Errorf(codes.InvalidArgument, "Could not tree hash block: %v", err)
|
||||
}
|
||||
|
||||
return bs.submitBlock(ctx, root, wrappedBellatrixSignedBlk)
|
||||
}
|
||||
|
||||
func (bs *Server) submitBlock(ctx context.Context, blockRoot [fieldparams.RootLength]byte, block block.SignedBeaconBlock) error {
|
||||
// Do not block proposal critical path with debug logging or block feed updates.
|
||||
defer func() {
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(blockRoot[:]))).Debugf(
|
||||
"Block proposal received via RPC")
|
||||
bs.BlockNotifier.BlockFeed().Send(&feed.Event{
|
||||
Type: blockfeed.ReceivedBlock,
|
||||
Data: &blockfeed.ReceivedBlockData{SignedBlock: block},
|
||||
})
|
||||
}()
|
||||
|
||||
// Broadcast the new block to the network.
|
||||
if err := bs.Broadcaster.Broadcast(ctx, block.Proto()); err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not broadcast block: %v", err)
|
||||
}
|
||||
|
||||
if err := bs.BlockReceiver.ReceiveBlock(ctx, block, blockRoot); err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not process beacon block: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
dbTest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/proto/migration"
|
||||
@@ -428,7 +429,7 @@ func TestServer_ListBlockHeaders(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_ProposeBlock_OK(t *testing.T) {
|
||||
func TestServer_SubmitBlock_OK(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
@@ -556,6 +557,140 @@ func TestServer_ProposeBlock_OK(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestSubmitBlindedBlock(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlock()
|
||||
blk, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), blk), "Could not save genesis block")
|
||||
|
||||
numDeposits := uint64(64)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
|
||||
bsRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
beaconChainServer := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
BlockReceiver: c,
|
||||
ChainInfoFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
Broadcaster: mockp2p.NewTestP2P(t),
|
||||
}
|
||||
req := util.NewBeaconBlock()
|
||||
req.Block.Slot = 5
|
||||
req.Block.ParentRoot = bsRoot[:]
|
||||
v1Block, err := migration.V1Alpha1ToV1SignedBlock(req)
|
||||
require.NoError(t, err)
|
||||
wrapped, err := wrapper.WrappedSignedBeaconBlock(req)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapped))
|
||||
blockReq := ðpbv2.SignedBlindedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContainer_Phase0Block{Phase0Block: v1Block.Block},
|
||||
Signature: v1Block.Signature,
|
||||
}
|
||||
_, err = beaconChainServer.SubmitBlindedBlock(context.Background(), blockReq)
|
||||
assert.NoError(t, err, "Could not propose block correctly")
|
||||
})
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlockAltair()
|
||||
blk, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), blk), "Could not save genesis block")
|
||||
|
||||
numDeposits := uint64(64)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
|
||||
bsRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
beaconChainServer := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
BlockReceiver: c,
|
||||
ChainInfoFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
Broadcaster: mockp2p.NewTestP2P(t),
|
||||
}
|
||||
req := util.NewBeaconBlockAltair()
|
||||
req.Block.Slot = 5
|
||||
req.Block.ParentRoot = bsRoot[:]
|
||||
v2Block, err := migration.V1Alpha1BeaconBlockAltairToV2(req.Block)
|
||||
require.NoError(t, err)
|
||||
wrapped, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapped))
|
||||
blockReq := ðpbv2.SignedBlindedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContainer_AltairBlock{AltairBlock: v2Block},
|
||||
Signature: req.Signature,
|
||||
}
|
||||
_, err = beaconChainServer.SubmitBlindedBlock(context.Background(), blockReq)
|
||||
assert.NoError(t, err, "Could not propose block correctly")
|
||||
})
|
||||
|
||||
t.Run("Bellatrix - OK", func(t *testing.T) {
|
||||
transactions := [][]byte{[]byte("transaction1"), []byte("transaction2")}
|
||||
transactionsRoot, err := ssz.TransactionsRoot(transactions)
|
||||
require.NoError(t, err)
|
||||
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := util.NewBeaconBlockBellatrix()
|
||||
wrapped, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapped), "Could not save genesis block")
|
||||
|
||||
numDeposits := uint64(64)
|
||||
beaconState, _ := util.DeterministicGenesisState(t, numDeposits)
|
||||
bsRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
beaconChainServer := &Server{
|
||||
BeaconDB: beaconDB,
|
||||
BlockReceiver: c,
|
||||
ChainInfoFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
Broadcaster: mockp2p.NewTestP2P(t),
|
||||
}
|
||||
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Slot = 5
|
||||
blk.Block.ParentRoot = bsRoot[:]
|
||||
blk.Block.Body.ExecutionPayload.Transactions = transactions
|
||||
blindedBlk := util.NewBlindedBeaconBlockBellatrix()
|
||||
blindedBlk.Message.Slot = 5
|
||||
blindedBlk.Message.ParentRoot = bsRoot[:]
|
||||
blindedBlk.Message.Body.ExecutionPayloadHeader.TransactionsRoot = transactionsRoot[:]
|
||||
wrapped, err = wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapped))
|
||||
|
||||
blockReq := ðpbv2.SignedBlindedBeaconBlockContainer{
|
||||
Message: ðpbv2.SignedBlindedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: blindedBlk.Message},
|
||||
Signature: blindedBlk.Signature,
|
||||
}
|
||||
_, err = beaconChainServer.SubmitBlindedBlock(context.Background(), blockReq)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_GetBlock(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -286,6 +286,7 @@ func (vs *Server) ProduceBlock(ctx context.Context, req *ethpbv1.ProduceBlockReq
|
||||
return ðpbv1.ProduceBlockResponse{Data: block}, nil
|
||||
}
|
||||
|
||||
// ProduceBlockV2 requests the beacon node to produce a valid unsigned beacon block, which can then be signed by a proposer and submitted.
|
||||
func (vs *Server) ProduceBlockV2(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv2.ProduceBlockResponseV2, error) {
|
||||
_, span := trace.StartSpan(ctx, "validator.ProduceBlockV2")
|
||||
defer span.End()
|
||||
@@ -347,6 +348,71 @@ func (vs *Server) ProduceBlockV2(ctx context.Context, req *ethpbv1.ProduceBlockR
|
||||
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
|
||||
}
|
||||
|
||||
// ProduceBlindedBlock requests the beacon node to produce a valid unsigned blinded beacon block,
|
||||
// which can then be signed by a proposer and submitted.
|
||||
//
|
||||
// Pre-Bellatrix, this endpoint will return a regular block.
|
||||
func (vs *Server) ProduceBlindedBlock(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv2.ProduceBlindedBlockResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlindedBlock")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v1alpha1req := ðpbalpha.BlockRequest{
|
||||
Slot: req.Slot,
|
||||
RandaoReveal: req.RandaoReveal,
|
||||
Graffiti: req.Graffiti,
|
||||
}
|
||||
v1alpha1resp, err := vs.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
|
||||
if err != nil {
|
||||
// We simply return err because it's already of a gRPC error type.
|
||||
return nil, err
|
||||
}
|
||||
phase0Block, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Phase0)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1ToV1Block(phase0Block.Phase0)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
return ðpbv2.ProduceBlindedBlockResponse{
|
||||
Version: ethpbv2.Version_PHASE0,
|
||||
Data: ðpbv2.BlindedBeaconBlockContainer{
|
||||
Block: ðpbv2.BlindedBeaconBlockContainer_Phase0Block{Phase0Block: block},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
altairBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Altair)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlock.Altair)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
return ðpbv2.ProduceBlindedBlockResponse{
|
||||
Version: ethpbv2.Version_ALTAIR,
|
||||
Data: ðpbv2.BlindedBeaconBlockContainer{
|
||||
Block: ðpbv2.BlindedBeaconBlockContainer_AltairBlock{AltairBlock: block},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
bellatrixBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Bellatrix)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockBellatrixToV2Blinded(bellatrixBlock.Bellatrix)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
return ðpbv2.ProduceBlindedBlockResponse{
|
||||
Version: ethpbv2.Version_BELLATRIX,
|
||||
Data: ðpbv2.BlindedBeaconBlockContainer{
|
||||
Block: ðpbv2.BlindedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: block},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
|
||||
}
|
||||
|
||||
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.
|
||||
func (vs *Server) PrepareBeaconProposer(
|
||||
ctx context.Context, request *ethpbv1.PrepareBeaconProposerRequest,
|
||||
|
||||
@@ -1150,6 +1150,407 @@ func TestProduceBlockV2_SyncNotReady(t *testing.T) {
|
||||
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
|
||||
}
|
||||
|
||||
func TestProduceBlindedBlock(t *testing.T) {
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wrapped, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wrapped), "Could not save genesis block")
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
v1Alpha1Server := &v1alpha1validator.Server{
|
||||
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mockChain.ChainService{},
|
||||
ChainStartFetcher: &mockPOW.POWChain{},
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
Eth1BlockFetcher: &mockPOW.POWChain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db),
|
||||
}
|
||||
|
||||
proposerSlashings := make([]*ethpbalpha.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
|
||||
for i := types.ValidatorIndex(0); uint64(i) < params.BeaconConfig().MaxProposerSlashings; i++ {
|
||||
proposerSlashing, err := util.GenerateProposerSlashingForValidator(
|
||||
beaconState,
|
||||
privKeys[i],
|
||||
i, /* validator index */
|
||||
)
|
||||
require.NoError(t, err)
|
||||
proposerSlashings[i] = proposerSlashing
|
||||
err = v1Alpha1Server.SlashingsPool.InsertProposerSlashing(context.Background(), beaconState, proposerSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
attSlashings := make([]*ethpbalpha.AttesterSlashing, params.BeaconConfig().MaxAttesterSlashings)
|
||||
for i := uint64(0); i < params.BeaconConfig().MaxAttesterSlashings; i++ {
|
||||
attesterSlashing, err := util.GenerateAttesterSlashingForValidator(
|
||||
beaconState,
|
||||
privKeys[i+params.BeaconConfig().MaxProposerSlashings],
|
||||
types.ValidatorIndex(i+params.BeaconConfig().MaxProposerSlashings), /* validator index */
|
||||
)
|
||||
require.NoError(t, err)
|
||||
attSlashings[i] = attesterSlashing
|
||||
err = v1Alpha1Server.SlashingsPool.InsertAttesterSlashing(context.Background(), beaconState, attesterSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
v1Server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
V1Alpha1Server: v1Alpha1Server,
|
||||
}
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
graffiti := bytesutil.ToBytes32([]byte("eth2"))
|
||||
req := ðpbv1.ProduceBlockRequest{
|
||||
Slot: 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
Graffiti: graffiti[:],
|
||||
}
|
||||
resp, err := v1Server.ProduceBlindedBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, ethpbv2.Version_PHASE0, resp.Version)
|
||||
|
||||
containerBlock, ok := resp.Data.Block.(*ethpbv2.BlindedBeaconBlockContainer_Phase0Block)
|
||||
require.Equal(t, true, ok)
|
||||
blk := containerBlock.Phase0Block
|
||||
assert.Equal(t, req.Slot, blk.Slot, "Expected block to have slot of 1")
|
||||
assert.DeepEqual(t, parentRoot[:], blk.ParentRoot, "Expected block to have correct parent root")
|
||||
assert.DeepEqual(t, randaoReveal, blk.Body.RandaoReveal, "Expected block to have correct randao reveal")
|
||||
assert.DeepEqual(t, req.Graffiti, blk.Body.Graffiti, "Expected block to have correct graffiti")
|
||||
assert.Equal(t, params.BeaconConfig().MaxProposerSlashings, uint64(len(blk.Body.ProposerSlashings)))
|
||||
expectedPropSlashings := make([]*ethpbv1.ProposerSlashing, len(proposerSlashings))
|
||||
for i, slash := range proposerSlashings {
|
||||
expectedPropSlashings[i] = migration.V1Alpha1ProposerSlashingToV1(slash)
|
||||
}
|
||||
assert.DeepEqual(t, expectedPropSlashings, blk.Body.ProposerSlashings)
|
||||
assert.Equal(t, params.BeaconConfig().MaxAttesterSlashings, uint64(len(blk.Body.AttesterSlashings)))
|
||||
expectedAttSlashings := make([]*ethpbv1.AttesterSlashing, len(attSlashings))
|
||||
for i, slash := range attSlashings {
|
||||
expectedAttSlashings[i] = migration.V1Alpha1AttSlashingToV1(slash)
|
||||
}
|
||||
assert.DeepEqual(t, expectedAttSlashings, blk.Body.AttesterSlashings)
|
||||
})
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bc := params.BeaconConfig()
|
||||
bc.AltairForkEpoch = types.Epoch(0)
|
||||
params.OverrideBeaconConfig(bc)
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee, err := altair.NextSyncCommittee(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, beaconState.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
genesisBlock := util.NewBeaconBlockAltair()
|
||||
genesisBlock.Block.StateRoot = stateRoot[:]
|
||||
wrappedAltairBlock, err := wrapper.WrappedSignedBeaconBlock(genesisBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wrappedAltairBlock))
|
||||
parentRoot, err := genesisBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
v1Alpha1Server := &v1alpha1validator.Server{
|
||||
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mockChain.ChainService{},
|
||||
ChainStartFetcher: &mockPOW.POWChain{},
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
Eth1BlockFetcher: &mockPOW.POWChain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
}
|
||||
|
||||
proposerSlashings := make([]*ethpbalpha.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
|
||||
for i := types.ValidatorIndex(0); uint64(i) < params.BeaconConfig().MaxProposerSlashings; i++ {
|
||||
proposerSlashing, err := util.GenerateProposerSlashingForValidator(
|
||||
beaconState,
|
||||
privKeys[i],
|
||||
i, /* validator index */
|
||||
)
|
||||
require.NoError(t, err)
|
||||
proposerSlashings[i] = proposerSlashing
|
||||
err = v1Alpha1Server.SlashingsPool.InsertProposerSlashing(context.Background(), beaconState, proposerSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
attSlashings := make([]*ethpbalpha.AttesterSlashing, params.BeaconConfig().MaxAttesterSlashings)
|
||||
for i := uint64(0); i < params.BeaconConfig().MaxAttesterSlashings; i++ {
|
||||
attesterSlashing, err := util.GenerateAttesterSlashingForValidator(
|
||||
beaconState,
|
||||
privKeys[i+params.BeaconConfig().MaxProposerSlashings],
|
||||
types.ValidatorIndex(i+params.BeaconConfig().MaxProposerSlashings), /* validator index */
|
||||
)
|
||||
require.NoError(t, err)
|
||||
attSlashings[i] = attesterSlashing
|
||||
err = v1Alpha1Server.SlashingsPool.InsertAttesterSlashing(context.Background(), beaconState, attesterSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
aggregationBits := bitfield.NewBitvector128()
|
||||
for i := range aggregationBits {
|
||||
aggregationBits[i] = 0xAA
|
||||
}
|
||||
|
||||
syncCommitteeIndices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
sigs := make([]bls.Signature, 0, len(syncCommitteeIndices))
|
||||
for i, indice := range syncCommitteeIndices {
|
||||
if aggregationBits.BitAt(uint64(i)) {
|
||||
b := p2pType.SSZBytes(parentRoot[:])
|
||||
sb, err := signing.ComputeDomainAndSign(beaconState, coreTime.CurrentEpoch(beaconState), &b, params.BeaconConfig().DomainSyncCommittee, privKeys[indice])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
sigs = append(sigs, sig)
|
||||
}
|
||||
}
|
||||
aggregatedSig := bls.AggregateSignatures(sigs).Marshal()
|
||||
contribution := ðpbalpha.SyncCommitteeContribution{
|
||||
Slot: 0,
|
||||
BlockRoot: parentRoot[:],
|
||||
SubcommitteeIndex: 0,
|
||||
AggregationBits: aggregationBits,
|
||||
Signature: aggregatedSig,
|
||||
}
|
||||
require.NoError(t, v1Alpha1Server.SyncCommitteePool.SaveSyncCommitteeContribution(contribution))
|
||||
|
||||
v1Server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
V1Alpha1Server: v1Alpha1Server,
|
||||
}
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
graffiti := bytesutil.ToBytes32([]byte("eth2"))
|
||||
|
||||
req := ðpbv1.ProduceBlockRequest{
|
||||
Slot: 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
Graffiti: graffiti[:],
|
||||
}
|
||||
resp, err := v1Server.ProduceBlindedBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, ethpbv2.Version_ALTAIR, resp.Version)
|
||||
|
||||
containerBlock, ok := resp.Data.Block.(*ethpbv2.BlindedBeaconBlockContainer_AltairBlock)
|
||||
require.Equal(t, true, ok)
|
||||
blk := containerBlock.AltairBlock
|
||||
assert.Equal(t, req.Slot, blk.Slot, "Expected block to have slot of 1")
|
||||
assert.DeepEqual(t, parentRoot[:], blk.ParentRoot, "Expected block to have correct parent root")
|
||||
assert.DeepEqual(t, randaoReveal, blk.Body.RandaoReveal, "Expected block to have correct randao reveal")
|
||||
assert.DeepEqual(t, req.Graffiti, blk.Body.Graffiti, "Expected block to have correct graffiti")
|
||||
assert.Equal(t, params.BeaconConfig().MaxProposerSlashings, uint64(len(blk.Body.ProposerSlashings)))
|
||||
expectedPropSlashings := make([]*ethpbv1.ProposerSlashing, len(proposerSlashings))
|
||||
for i, slash := range proposerSlashings {
|
||||
expectedPropSlashings[i] = migration.V1Alpha1ProposerSlashingToV1(slash)
|
||||
}
|
||||
assert.DeepEqual(t, expectedPropSlashings, blk.Body.ProposerSlashings)
|
||||
assert.Equal(t, params.BeaconConfig().MaxAttesterSlashings, uint64(len(blk.Body.AttesterSlashings)))
|
||||
expectedAttSlashings := make([]*ethpbv1.AttesterSlashing, len(attSlashings))
|
||||
for i, slash := range attSlashings {
|
||||
expectedAttSlashings[i] = migration.V1Alpha1AttSlashingToV1(slash)
|
||||
}
|
||||
assert.DeepEqual(t, expectedAttSlashings, blk.Body.AttesterSlashings)
|
||||
expectedBits := bitfield.NewBitvector512()
|
||||
for i := 0; i <= 15; i++ {
|
||||
expectedBits[i] = 0xAA
|
||||
}
|
||||
assert.DeepEqual(t, expectedBits, blk.Body.SyncAggregate.SyncCommitteeBits)
|
||||
assert.DeepEqual(t, aggregatedSig, blk.Body.SyncAggregate.SyncCommitteeSignature)
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bc := params.BeaconConfig()
|
||||
bc.AltairForkEpoch = types.Epoch(0)
|
||||
bc.BellatrixForkEpoch = types.Epoch(1)
|
||||
params.OverrideBeaconConfig(bc)
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisStateBellatrix(t, params.BeaconConfig().SyncCommitteeSize)
|
||||
require.NoError(t, beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch))
|
||||
syncCommittee, err := altair.NextSyncCommittee(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, beaconState.SetNextSyncCommittee(syncCommittee))
|
||||
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
genesisBlock := util.NewBeaconBlockBellatrix()
|
||||
genesisBlock.Block.StateRoot = stateRoot[:]
|
||||
wrappedBellatrixBlock, err := wrapper.WrappedSignedBeaconBlock(genesisBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wrappedBellatrixBlock))
|
||||
parentRoot, err := genesisBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
v1Alpha1Server := &v1alpha1validator.Server{
|
||||
ExecutionEngineCaller: &mockPOW.EngineClient{
|
||||
ExecutionBlock: &enginev1.ExecutionBlock{
|
||||
TotalDifficulty: "0x1",
|
||||
},
|
||||
},
|
||||
TimeFetcher: &mockChain.ChainService{},
|
||||
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mockChain.ChainService{},
|
||||
ChainStartFetcher: &mockPOW.POWChain{},
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
Eth1BlockFetcher: &mockPOW.POWChain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
proposerSlashings := make([]*ethpbalpha.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
|
||||
for i := types.ValidatorIndex(0); uint64(i) < params.BeaconConfig().MaxProposerSlashings; i++ {
|
||||
proposerSlashing, err := util.GenerateProposerSlashingForValidator(
|
||||
beaconState,
|
||||
privKeys[i],
|
||||
i,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
proposerSlashings[i] = proposerSlashing
|
||||
err = v1Alpha1Server.SlashingsPool.InsertProposerSlashing(context.Background(), beaconState, proposerSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
attSlashings := make([]*ethpbalpha.AttesterSlashing, params.BeaconConfig().MaxAttesterSlashings)
|
||||
for i := uint64(0); i < params.BeaconConfig().MaxAttesterSlashings; i++ {
|
||||
attesterSlashing, err := util.GenerateAttesterSlashingForValidator(
|
||||
beaconState,
|
||||
privKeys[i+params.BeaconConfig().MaxProposerSlashings],
|
||||
types.ValidatorIndex(i+params.BeaconConfig().MaxProposerSlashings), /* validator index */
|
||||
)
|
||||
require.NoError(t, err)
|
||||
attSlashings[i] = attesterSlashing
|
||||
err = v1Alpha1Server.SlashingsPool.InsertAttesterSlashing(context.Background(), beaconState, attesterSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
aggregationBits := bitfield.NewBitvector128()
|
||||
for i := range aggregationBits {
|
||||
aggregationBits[i] = 0xAA
|
||||
}
|
||||
|
||||
syncCommitteeIndices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
sigs := make([]bls.Signature, 0, len(syncCommitteeIndices))
|
||||
for i, indice := range syncCommitteeIndices {
|
||||
if aggregationBits.BitAt(uint64(i)) {
|
||||
b := p2pType.SSZBytes(parentRoot[:])
|
||||
sb, err := signing.ComputeDomainAndSign(beaconState, coreTime.CurrentEpoch(beaconState), &b, params.BeaconConfig().DomainSyncCommittee, privKeys[indice])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
sigs = append(sigs, sig)
|
||||
}
|
||||
}
|
||||
aggregatedSig := bls.AggregateSignatures(sigs).Marshal()
|
||||
contribution := ðpbalpha.SyncCommitteeContribution{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
BlockRoot: parentRoot[:],
|
||||
SubcommitteeIndex: 0,
|
||||
AggregationBits: aggregationBits,
|
||||
Signature: aggregatedSig,
|
||||
}
|
||||
require.NoError(t, v1Alpha1Server.SyncCommitteePool.SaveSyncCommitteeContribution(contribution))
|
||||
|
||||
v1Server := &Server{
|
||||
V1Alpha1Server: v1Alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 1, privKeys)
|
||||
require.NoError(t, err)
|
||||
graffiti := bytesutil.ToBytes32([]byte("eth2"))
|
||||
|
||||
req := ðpbv1.ProduceBlockRequest{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch + 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
Graffiti: graffiti[:],
|
||||
}
|
||||
resp, err := v1Server.ProduceBlindedBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, ethpbv2.Version_BELLATRIX, resp.Version)
|
||||
|
||||
containerBlock, ok := resp.Data.Block.(*ethpbv2.BlindedBeaconBlockContainer_BellatrixBlock)
|
||||
require.Equal(t, true, ok)
|
||||
blk := containerBlock.BellatrixBlock
|
||||
assert.Equal(t, req.Slot, blk.Slot, "Expected block to have slot of 1")
|
||||
assert.DeepEqual(t, parentRoot[:], blk.ParentRoot, "Expected block to have correct parent root")
|
||||
assert.DeepEqual(t, randaoReveal, blk.Body.RandaoReveal, "Expected block to have correct randao reveal")
|
||||
assert.DeepEqual(t, req.Graffiti, blk.Body.Graffiti, "Expected block to have correct graffiti")
|
||||
assert.Equal(t, params.BeaconConfig().MaxProposerSlashings, uint64(len(blk.Body.ProposerSlashings)))
|
||||
expectedPropSlashings := make([]*ethpbv1.ProposerSlashing, len(proposerSlashings))
|
||||
for i, slash := range proposerSlashings {
|
||||
expectedPropSlashings[i] = migration.V1Alpha1ProposerSlashingToV1(slash)
|
||||
}
|
||||
assert.DeepEqual(t, expectedPropSlashings, blk.Body.ProposerSlashings)
|
||||
assert.Equal(t, params.BeaconConfig().MaxAttesterSlashings, uint64(len(blk.Body.AttesterSlashings)))
|
||||
expectedAttSlashings := make([]*ethpbv1.AttesterSlashing, len(attSlashings))
|
||||
for i, slash := range attSlashings {
|
||||
expectedAttSlashings[i] = migration.V1Alpha1AttSlashingToV1(slash)
|
||||
}
|
||||
assert.DeepEqual(t, expectedAttSlashings, blk.Body.AttesterSlashings)
|
||||
expectedBits := bitfield.NewBitvector512()
|
||||
for i := 0; i <= 15; i++ {
|
||||
expectedBits[i] = 0xAA
|
||||
}
|
||||
assert.DeepEqual(t, expectedBits, blk.Body.SyncAggregate.SyncCommitteeBits)
|
||||
assert.DeepEqual(t, aggregatedSig, blk.Body.SyncAggregate.SyncCommitteeSignature)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProduceBlindedBlock_SyncNotReady(t *testing.T) {
|
||||
chainService := &mockChain.ChainService{}
|
||||
vs := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
}
|
||||
_, err := vs.ProduceBlindedBlock(context.Background(), ðpbv1.ProduceBlockRequest{})
|
||||
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
|
||||
}
|
||||
|
||||
func TestProduceAttestationData(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = 3*params.BeaconConfig().SlotsPerEpoch + 1
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
// GetForkChoice returns a fork choice store.
|
||||
func (ds *Server) GetForkChoice(_ context.Context, _ *empty.Empty) (*pbrpc.ForkChoiceResponse, error) {
|
||||
store := ds.HeadFetcher.ForkChoicer()
|
||||
store := ds.ForkFetcher.ForkChoicer()
|
||||
|
||||
return &pbrpc.ForkChoiceResponse{
|
||||
JustifiedEpoch: store.JustifiedEpoch(),
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
|
||||
func TestServer_GetForkChoice_ProtoArray(t *testing.T) {
|
||||
store := protoarray.New(0, 0, [32]byte{'a'})
|
||||
bs := &Server{HeadFetcher: &mock.ChainService{ForkChoiceStore: store}}
|
||||
bs := &Server{ForkFetcher: &mock.ChainService{ForkChoiceStore: store}}
|
||||
res, err := bs.GetForkChoice(context.Background(), &empty.Empty{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, store.JustifiedEpoch(), res.JustifiedEpoch, "Did not get wanted justified epoch")
|
||||
|
||||
@@ -28,6 +28,7 @@ type Server struct {
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
StateGen *stategen.State
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
ForkFetcher blockchain.ForkFetcher
|
||||
PeerManager p2p.PeerManager
|
||||
PeersFetcher p2p.PeersProvider
|
||||
ReplayerBuilder stategen.ReplayerBuilder
|
||||
|
||||
@@ -52,7 +52,6 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
@@ -77,7 +76,6 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ferranbt_fastssz//:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
@@ -141,7 +139,6 @@ go_test(
|
||||
"//beacon-chain/state/stategen/mock:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//beacon-chain/sync/initial-sync/testing:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
@@ -162,7 +159,6 @@ go_test(
|
||||
"@com_github_d4l3k_messagediff//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_golang_mock//gomock:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -37,17 +37,24 @@ func (vs *Server) StreamBlocksAltair(req *ethpb.StreamBlocksRequest, stream ethp
|
||||
case version.Phase0:
|
||||
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlock)
|
||||
if !ok {
|
||||
log.Warn("Mismatch between version and block type, was expecting *ethpb.SignedBeaconBlock")
|
||||
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlock")
|
||||
continue
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_Phase0Block{Phase0Block: phBlk}
|
||||
case version.Altair:
|
||||
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlockAltair)
|
||||
if !ok {
|
||||
log.Warn("Mismatch between version and block type, was expecting *v2.SignedBeaconBlockAltair")
|
||||
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlockAltair")
|
||||
continue
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_AltairBlock{AltairBlock: phBlk}
|
||||
case version.Bellatrix:
|
||||
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlockBellatrix)
|
||||
if !ok {
|
||||
log.Warn("Mismatch between version and block type, was expecting SignedBeaconBlockBellatrix")
|
||||
continue
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_BellatrixBlock{BellatrixBlock: phBlk}
|
||||
}
|
||||
|
||||
if err := stream.Send(b); err != nil {
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation/aggregation"
|
||||
@@ -117,16 +116,7 @@ func (a proposerAtts) sortByProfitability() (proposerAtts, error) {
|
||||
if len(a) < 2 {
|
||||
return a, nil
|
||||
}
|
||||
if features.Get().ProposerAttsSelectionUsingMaxCover {
|
||||
return a.sortByProfitabilityUsingMaxCover()
|
||||
}
|
||||
sort.Slice(a, func(i, j int) bool {
|
||||
if a[i].Data.Slot == a[j].Data.Slot {
|
||||
return a[i].AggregationBits.Count() > a[j].AggregationBits.Count()
|
||||
}
|
||||
return a[i].Data.Slot > a[j].Data.Slot
|
||||
})
|
||||
return a, nil
|
||||
return a.sortByProfitabilityUsingMaxCover()
|
||||
}
|
||||
|
||||
// sortByProfitabilityUsingMaxCover orders attestations by highest slot and by highest aggregation bit count.
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
@@ -39,11 +38,6 @@ func TestProposer_ProposerAtts_sortByProfitability(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProposer_ProposerAtts_sortByProfitabilityUsingMaxCover(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
ProposerAttsSelectionUsingMaxCover: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
type testData struct {
|
||||
slot types.Slot
|
||||
bits bitfield.Bitlist
|
||||
@@ -116,38 +110,9 @@ func TestProposer_ProposerAtts_sortByProfitabilityUsingMaxCover(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("compare to native sort", func(t *testing.T) {
|
||||
// The naive sort will end up with 0b11001000 being selected second (which is not optimal
|
||||
// as it only contains a single unknown bit).
|
||||
// The max-cover based approach will select 0b00001100 instead, despite lower bit count
|
||||
// (since it has two new/unknown bits).
|
||||
t.Run("naive", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
ProposerAttsSelectionUsingMaxCover: false,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
atts := getAtts([]testData{
|
||||
{1, bitfield.Bitlist{0b11000011, 0b1}},
|
||||
{1, bitfield.Bitlist{0b11001000, 0b1}},
|
||||
{1, bitfield.Bitlist{0b00001100, 0b1}},
|
||||
})
|
||||
want := getAtts([]testData{
|
||||
{1, bitfield.Bitlist{0b11000011, 0b1}},
|
||||
{1, bitfield.Bitlist{0b11001000, 0b1}},
|
||||
{1, bitfield.Bitlist{0b00001100, 0b1}},
|
||||
})
|
||||
atts, err := atts.sortByProfitability()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
require.DeepEqual(t, want, atts)
|
||||
})
|
||||
t.Run("max-cover", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
ProposerAttsSelectionUsingMaxCover: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
atts := getAtts([]testData{
|
||||
{1, bitfield.Bitlist{0b11000011, 0b1}},
|
||||
{1, bitfield.Bitlist{0b11001000, 0b1}},
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user