mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
26 Commits
disallow-l
...
v2.1.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4de92bafc4 | ||
|
|
69438583e5 | ||
|
|
e81f3fed01 | ||
|
|
1b2a5fb4a5 | ||
|
|
6c878b1665 | ||
|
|
838963c9f7 | ||
|
|
7b38f8b8fc | ||
|
|
23e8e695cc | ||
|
|
ce9eaae22e | ||
|
|
7010e8dec8 | ||
|
|
9e4ba75e71 | ||
|
|
044a4ad5a3 | ||
|
|
690084cab6 | ||
|
|
88db7117d2 | ||
|
|
1faa292615 | ||
|
|
434018a4b9 | ||
|
|
54624569bf | ||
|
|
b55ddb5a34 | ||
|
|
a38de90435 | ||
|
|
d454d30f19 | ||
|
|
b04dd9fe5c | ||
|
|
8140a1a7e0 | ||
|
|
cab9917317 | ||
|
|
4c4fb9f2c0 | ||
|
|
80f4f22401 | ||
|
|
dd296cbd8a |
10
.github/CODEOWNERS
vendored
10
.github/CODEOWNERS
vendored
@@ -9,8 +9,8 @@ deps.bzl @prysmaticlabs/core-team
|
||||
|
||||
# Radek and Nishant are responsible for changes that can affect the native state feature.
|
||||
# See https://www.notion.so/prysmaticlabs/Native-Beacon-State-Redesign-6cc9744b4ec1439bb34fa829b36a35c1
|
||||
/beacon-chain/state/fieldtrie/ @rkapka @nisdas
|
||||
/beacon-chain/state/v1/ @rkapka @nisdas
|
||||
/beacon-chain/state/v2/ @rkapka @nisdas
|
||||
/beacon-chain/state/v3/ @rkapka @nisdas
|
||||
/beacon-chain/state/state-native/ @rkapka @nisdas
|
||||
/beacon-chain/state/fieldtrie/ @rkapka @nisdas @rauljordan
|
||||
/beacon-chain/state/v1/ @rkapka @nisdas @rauljordan
|
||||
/beacon-chain/state/v2/ @rkapka @nisdas @rauljordan
|
||||
/beacon-chain/state/v3/ @rkapka @nisdas @rauljordan
|
||||
/beacon-chain/state/state-native/ @rkapka @nisdas @rauljordan
|
||||
|
||||
6
.github/workflows/go.yml
vendored
6
.github/workflows/go.yml
vendored
@@ -64,7 +64,6 @@ jobs:
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
args: --print-issued-lines --sort-results --no-config --timeout=10m --disable-all -E deadcode -E errcheck -E gosimple --skip-files=validator/web/site_data.go --skip-dirs=proto --go=1.18
|
||||
version: v1.45.2
|
||||
skip-go-installation: true
|
||||
|
||||
@@ -88,11 +87,14 @@ jobs:
|
||||
- name: Build
|
||||
# Use blst tag to allow go and bazel builds for blst.
|
||||
run: go build -v ./...
|
||||
|
||||
env:
|
||||
CGO_CFLAGS: "-O -D__BLST_PORTABLE__"
|
||||
# fuzz leverage go tag based stubs at compile time.
|
||||
# Building and testing with these tags should be checked and enforced at pre-submit.
|
||||
- name: Test for fuzzing
|
||||
run: go test -tags=fuzz,develop ./... -test.run=^Fuzz
|
||||
env:
|
||||
CGO_CFLAGS: "-O -D__BLST_PORTABLE__"
|
||||
|
||||
# Tests run via Bazel for now...
|
||||
# - name: Test
|
||||
|
||||
@@ -1,69 +1,26 @@
|
||||
linters-settings:
|
||||
govet:
|
||||
check-shadowing: true
|
||||
settings:
|
||||
printf:
|
||||
funcs:
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf
|
||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf
|
||||
golint:
|
||||
min-confidence: 0
|
||||
gocyclo:
|
||||
min-complexity: 10
|
||||
maligned:
|
||||
suggest-new: true
|
||||
dupl:
|
||||
threshold: 100
|
||||
goconst:
|
||||
min-len: 2
|
||||
min-occurrences: 2
|
||||
depguard:
|
||||
list-type: blacklist
|
||||
packages:
|
||||
# logging is allowed only by logutils.Log, logrus
|
||||
# is allowed to use only in logutils package
|
||||
- github.com/sirupsen/logrus
|
||||
misspell:
|
||||
locale: US
|
||||
lll:
|
||||
line-length: 140
|
||||
goimports:
|
||||
local-prefixes: github.com/golangci/golangci-lint
|
||||
gocritic:
|
||||
enabled-tags:
|
||||
- performance
|
||||
- style
|
||||
- experimental
|
||||
disabled-checks:
|
||||
- wrapperFunc
|
||||
run:
|
||||
skip-files:
|
||||
- validator/web/site_data.go
|
||||
- .*_test.go
|
||||
skip-dirs:
|
||||
- proto
|
||||
- tools/analyzers
|
||||
timeout: 10m
|
||||
go: '1.18'
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- deadcode
|
||||
- goconst
|
||||
- goimports
|
||||
- golint
|
||||
- gosec
|
||||
- misspell
|
||||
- structcheck
|
||||
- typecheck
|
||||
- unparam
|
||||
- varcheck
|
||||
- gofmt
|
||||
- unused
|
||||
disable-all: true
|
||||
- errcheck
|
||||
- gosimple
|
||||
- gocognit
|
||||
|
||||
run:
|
||||
skip-dirs:
|
||||
- proto/
|
||||
- ^contracts/
|
||||
deadline: 10m
|
||||
linters-settings:
|
||||
gocognit:
|
||||
# TODO: We should target for < 50
|
||||
min-complexity: 97
|
||||
|
||||
# golangci.com configuration
|
||||
# https://github.com/golangci/golangci/wiki/Configuration
|
||||
service:
|
||||
golangci-lint-version: 1.15.0 # use the fixed version to not introduce new linters unexpectedly
|
||||
prepare:
|
||||
- echo "here I can run custom commands, but no preparation needed for this repo"
|
||||
output:
|
||||
print-issued-lines: true
|
||||
sort-results: true
|
||||
|
||||
@@ -28,6 +28,7 @@ const (
|
||||
)
|
||||
|
||||
var errMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
|
||||
var errMalformedRequest = errors.New("required request data are missing")
|
||||
|
||||
// ClientOpt is a functional option for the Client type (http.Client wrapper)
|
||||
type ClientOpt func(*Client)
|
||||
@@ -199,9 +200,15 @@ func (c *Client) GetHeader(ctx context.Context, slot types.Slot, parentHash [32]
|
||||
|
||||
// RegisterValidator encodes the SignedValidatorRegistrationV1 message to json (including hex-encoding the byte
|
||||
// fields with 0x prefixes) and posts to the builder validator registration endpoint.
|
||||
func (c *Client) RegisterValidator(ctx context.Context, svr *ethpb.SignedValidatorRegistrationV1) error {
|
||||
v := &SignedValidatorRegistration{SignedValidatorRegistrationV1: svr}
|
||||
body, err := json.Marshal(v)
|
||||
func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error {
|
||||
if len(svr) == 0 {
|
||||
return errors.Wrap(errMalformedRequest, "empty validator registration list")
|
||||
}
|
||||
vs := make([]*SignedValidatorRegistration, len(svr))
|
||||
for i := 0; i < len(svr); i++ {
|
||||
vs[i] = &SignedValidatorRegistration{SignedValidatorRegistrationV1: svr[i]}
|
||||
}
|
||||
body, err := json.Marshal(vs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error encoding the SignedValidatorRegistration value body in RegisterValidator")
|
||||
}
|
||||
|
||||
@@ -73,7 +73,7 @@ func TestClient_Status(t *testing.T) {
|
||||
|
||||
func TestClient_RegisterValidator(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
expectedBody := `{"message":{"fee_recipient":"0x0000000000000000000000000000000000000000","gas_limit":"23","timestamp":"42","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"}}`
|
||||
expectedBody := `[{"message":{"fee_recipient":"0x0000000000000000000000000000000000000000","gas_limit":"23","timestamp":"42","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"}}]`
|
||||
expectedPath := "/eth/v1/builder/validators"
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
@@ -104,7 +104,7 @@ func TestClient_RegisterValidator(t *testing.T) {
|
||||
Pubkey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
|
||||
},
|
||||
}
|
||||
require.NoError(t, c.RegisterValidator(ctx, reg))
|
||||
require.NoError(t, c.RegisterValidator(ctx, []*eth.SignedValidatorRegistrationV1{reg}))
|
||||
}
|
||||
|
||||
func TestClient_GetHeader(t *testing.T) {
|
||||
|
||||
@@ -63,7 +63,7 @@ func sszBytesToUint256(b []byte) Uint256 {
|
||||
|
||||
// SSZBytes creates an ssz-style (little-endian byte slice) representation of the Uint256
|
||||
func (s Uint256) SSZBytes() []byte {
|
||||
return bytesutil.ReverseByteOrder(s.Int.Bytes())
|
||||
return bytesutil.PadTo(bytesutil.ReverseByteOrder(s.Int.Bytes()), 32)
|
||||
}
|
||||
|
||||
var errUnmarshalUint256Failed = errors.New("unable to UnmarshalText into a Uint256 value")
|
||||
|
||||
@@ -694,9 +694,10 @@ func TestMarshalBlindedBeaconBlockBodyBellatrix(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRoundTripUint256(t *testing.T) {
|
||||
vs := "452312848583266388373324160190187140051835877600158453279131187530910662656"
|
||||
vs := "4523128485832663883733241601901871400518358776001584532791311875309106626"
|
||||
u := stringToUint256(vs)
|
||||
sb := u.SSZBytes()
|
||||
require.Equal(t, 32, len(sb))
|
||||
uu := sszBytesToUint256(sb)
|
||||
require.Equal(t, true, bytes.Equal(u.SSZBytes(), uu.SSZBytes()))
|
||||
require.Equal(t, vs, uu.String())
|
||||
|
||||
@@ -31,26 +31,26 @@ func processField(s interface{}, processors []fieldProcessor) error {
|
||||
sliceElem := t.Field(i).Type.Elem()
|
||||
kind := sliceElem.Kind()
|
||||
// Recursively process slices to struct pointers.
|
||||
if kind == reflect.Ptr && sliceElem.Elem().Kind() == reflect.Struct {
|
||||
switch {
|
||||
case kind == reflect.Ptr && sliceElem.Elem().Kind() == reflect.Struct:
|
||||
for j := 0; j < v.Field(i).Len(); j++ {
|
||||
if err := processField(v.Field(i).Index(j).Interface(), processors); err != nil {
|
||||
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Process each string in string slices.
|
||||
if kind == reflect.String {
|
||||
case kind == reflect.String:
|
||||
for _, proc := range processors {
|
||||
_, hasTag := t.Field(i).Tag.Lookup(proc.tag)
|
||||
if hasTag {
|
||||
for j := 0; j < v.Field(i).Len(); j++ {
|
||||
if err := proc.f(v.Field(i).Index(j)); err != nil {
|
||||
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
|
||||
}
|
||||
if !hasTag {
|
||||
continue
|
||||
}
|
||||
for j := 0; j < v.Field(i).Len(); j++ {
|
||||
if err := proc.f(v.Field(i).Index(j)); err != nil {
|
||||
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
// Recursively process struct pointers.
|
||||
case reflect.Ptr:
|
||||
|
||||
@@ -121,8 +121,9 @@ func (g *Gateway) Start() {
|
||||
}
|
||||
|
||||
g.server = &http.Server{
|
||||
Addr: g.cfg.gatewayAddr,
|
||||
Handler: corsMux,
|
||||
Addr: g.cfg.gatewayAddr,
|
||||
Handler: corsMux,
|
||||
ReadHeaderTimeout: time.Second,
|
||||
}
|
||||
|
||||
go func() {
|
||||
|
||||
@@ -10,6 +10,7 @@ go_library(
|
||||
"head_sync_committee_info.go",
|
||||
"init_sync_process_block.go",
|
||||
"log.go",
|
||||
"merge_ascii_art.go",
|
||||
"metrics.go",
|
||||
"new_slot.go",
|
||||
"options.go",
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build !develop
|
||||
// +build !develop
|
||||
|
||||
package blockchain
|
||||
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
@@ -80,3 +82,30 @@ func logBlockSyncStatus(block interfaces.BeaconBlock, blockRoot [32]byte, justif
|
||||
}).Info("Synced new block")
|
||||
return nil
|
||||
}
|
||||
|
||||
// logs payload related data every slot.
|
||||
func logPayload(block interfaces.BeaconBlock) error {
|
||||
isExecutionBlk, err := blocks.IsExecutionBlock(block.Body())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not determine if block is execution block")
|
||||
}
|
||||
if !isExecutionBlk {
|
||||
return nil
|
||||
}
|
||||
payload, err := block.Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload.GasLimit == 0 {
|
||||
return errors.New("gas limit should not be 0")
|
||||
}
|
||||
gasUtilized := float64(payload.GasUsed) / float64(payload.GasLimit)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
|
||||
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash)),
|
||||
"blockNumber": payload.BlockNumber,
|
||||
"gasUtilized": fmt.Sprintf("%.2f", gasUtilized),
|
||||
}).Debug("Synced new payload")
|
||||
return nil
|
||||
}
|
||||
|
||||
81
beacon-chain/blockchain/merge_ascii_art.go
Normal file
81
beacon-chain/blockchain/merge_ascii_art.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package blockchain
|
||||
|
||||
var mergeAsciiArt = `
|
||||
|
||||
+?$$$$$$?*; ;*?$$$$?*; +!$$$$$$?!;
|
||||
!##$???$@##$+ !&#@$??$&#@* +@#&$????$##+
|
||||
!##; +@#&; !##* ;$##* @#$ ;&#+
|
||||
!##; !##+ ;##$ @#@ $#&* ++;
|
||||
!##; ;@#&; *##* ?##; ;$##&$!+;
|
||||
!##?!!!?$##$+ !##+ !##+ ;!$@##&$!;
|
||||
!##@@@@$$!+ *##* ?##; ;*?@#&!
|
||||
!##; ;##$ @#@ ;?$; ?##+
|
||||
!##; ?##! ;?##+ ;##+ ;$#&;
|
||||
!##; !&#&$??$&#@* ;&#&$$??$$&#@+
|
||||
+??; ;*?$$$$?+ ;+!?$$$$$!+
|
||||
;;;;
|
||||
;+!??$$$?!*+; ;*?$@&&&&&@@$!*;
|
||||
*?@############&$?+ ;!@###############&$!;
|
||||
;!@####&@$????$$@#####@! ;?&####$?*++;++*!?@####&?;
|
||||
*@###&$*; ;*$&###@* *&###@!; ;!@###&!
|
||||
!###&!; ;?&###? *####! *&###?
|
||||
!###@+ ;$###$ +###&+ ;$###?
|
||||
;###&; $###? ;;+*!??$$$$$$$$??!$###* ;@###*
|
||||
!###! &###?$@&#####################@$?!+; +###$
|
||||
$###+ ;*?&#################################&$?*; &##&;
|
||||
$###+ ;!$&########################################&$!; &###;
|
||||
*###? ;!$################################################$*; +###@
|
||||
;@###+ +$&####################################################&?; $###!
|
||||
+###&+ *$##########################################################$+ ;$###$;
|
||||
*&###?; +$##############################################################?*@###$;
|
||||
+$###&?+ ;$#####################################################################?;
|
||||
*@####@&#####################################################################*
|
||||
+$&##################@?!*++*!$&###################&$?*++*!?$###############&*
|
||||
$###############&?+ ;!@###############@!; ;!@##############!
|
||||
;$##############&!; *&###########&!; !&#############!
|
||||
$##############@+ +@* ;$#########$; +@* ;$#############!
|
||||
?##############$; *###* $#######$; +&##! ?#############+
|
||||
+##############$ !#####! $#####@; *#####? ?############@;
|
||||
@#############@; !#######! ;&####+ *#######? $############?
|
||||
+##############+ ?#########? $###$ !#########$ +############&;
|
||||
$#############$ ;$###########? !###? !###########$; $############!
|
||||
@#############* !#############! ?###$ *#############? +############@
|
||||
;&############&; +?@#######&$+; ;&####; ;+?@#######&?+; @############;
|
||||
;#############@ +$&#&$*; ;$#####@; +$&#&$*; $############+
|
||||
;#############$ *+ ;+; ;*; *&#######&! ;*; ;+; +*; $############*
|
||||
&############@ ;$@!; +$@! ;?###########$; *@$* ;*$@+ $############*
|
||||
$############&; ;$#&$*+!@##* +@#############@+ +&#@?++$&#@; @############+
|
||||
*#############* $######&+ !#################? +@######$; +#############;
|
||||
@############$ ?####@+ ;$###################$; ;@####$ $############@
|
||||
*#############* !##@; +@#####################&* ;$##? +#############!
|
||||
$#############+ *$; ?#########################?; $! +&############&;
|
||||
;&#############! +$###########################@+ *&#############?
|
||||
+##############$*; ;?###############################$+ *$##############@;
|
||||
*###############&$?!!$###################################$?!?$&################+
|
||||
*###################################&@$$$$@&#################################!
|
||||
+&##############################&?+; ;+?&#############################?
|
||||
;$############################@; ;@###########################!
|
||||
?###########################* *##########################*
|
||||
+@#############&$!+$#######? ?########$+!$&###########@+
|
||||
!&###########&; $#######$+ +$########? +&##########?;
|
||||
;?###########&* *@#######@$!; ;$@########@* *##########$+
|
||||
;?&##########?; ;*$&####&$* ;!$&####&$* ;$#########@*
|
||||
;!@#########@!; ;++*+; ;*; ;+*++; ;!&########$*
|
||||
*$&########&$*; ;*$&#&$*; +*$&#######&?+
|
||||
;*$&#########&@$$@&#########&@$$@&########&$*;
|
||||
;+?$&##############################&$?+;
|
||||
;+!?$@&###################&$$!+;
|
||||
;++*!??$$$$$$$?!!*+;
|
||||
|
||||
;;; ;;+*++; ;;;++;;;++;;; ;+++;;;++++; ;;; ;; ;;; ;;;++;;;+++;; ;;;+++++++; ;+++++;
|
||||
;@#&+ +$&&@$@&#? @#@@@&#&@@@#$ ;$@@@@#&@@@@! !#@ !#$ +&#&; !#&@@@#&@@@&&; !#&@@@@@@@* $#&@@@&&$+
|
||||
$#?#@; ?#&!; *#$ &&;;;$#!;;*#$ ;;;*#@;;;; $#? +#&; ;@#?#$ !#!;;*#@;;;@#; ?#$;;;;;;; @#* ;!&#?
|
||||
*#$ ?#$ *#&; ;+; ++ $#! ;+; *#$ ;&#+ $#* ?#? $#! ;+; +#@ ++ ?#$ $#* ;&#*
|
||||
;&&; @#* $#$ $#! *#$ !#@; !#$ +#@ ;&#; +#@ ?#@??????! $#* $#$
|
||||
$#!;;;*#&; $#? $#! *#$ $#? ;&&; ;@#*;;;!#$ +#@ ?#@??????! $#* $#$
|
||||
!##@@@@@&#$ !#@; $#! *#$ ;&#+ $#* ?#&@@@@@##! +#@ ?#$ $#* @#*
|
||||
;&&+;;;;;;@#* ;$#$+ @$ $#! *#$ *#@!#? *#@;;;;;;+##+ +#@ ?#$ $#* +$#$
|
||||
$#* +#&; ;?&#@$$$$#$ +$$&#@$$; ;$$$$@##$$$$* $##@; ;&#+ !#@; $$@##$$* ?#&$$$$$$$! @#@$$$@&@!
|
||||
;** +*; +*!?!*+; ;******* ;***********+ ;**; ;*+ **; *******+ ;*********+ +*!!!!*;
|
||||
|
||||
`
|
||||
@@ -78,6 +78,8 @@ func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.SignedBea
|
||||
"mergeBlockParentTotalDifficulty": mergeBlockParentTD,
|
||||
}).Info("Validated terminal block")
|
||||
|
||||
log.Info(mergeAsciiArt)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -659,10 +659,6 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interface
|
||||
// This removes the attestations from the mem pool. It will only remove the attestations if input root `r` is canonical,
|
||||
// meaning the block `b` is part of the canonical chain.
|
||||
func (s *Service) pruneCanonicalAttsFromPool(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock) error {
|
||||
if !features.Get().CorrectlyPruneCanonicalAtts {
|
||||
return nil
|
||||
}
|
||||
|
||||
canonical, err := s.IsCanonical(ctx, r)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -21,11 +21,11 @@ import (
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
@@ -1579,6 +1579,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
@@ -1695,6 +1696,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
@@ -1817,11 +1819,6 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRemoveBlockAttestationsInPool_Canonical(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
CorrectlyPruneCanonicalAtts: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
genesis, keys := util.DeterministicGenesisState(t, 64)
|
||||
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
@@ -1843,11 +1840,6 @@ func TestRemoveBlockAttestationsInPool_Canonical(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRemoveBlockAttestationsInPool_NonCanonical(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
CorrectlyPruneCanonicalAtts: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
genesis, keys := util.DeterministicGenesisState(t, 64)
|
||||
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
@@ -1930,6 +1922,7 @@ func Test_validateMergeTransitionBlock(t *testing.T) {
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
@@ -2112,6 +2105,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
WithForkChoiceStore(fcs),
|
||||
WithDepositCache(depositCache),
|
||||
WithStateNotifier(&mock.MockStateNotifier{}),
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -71,11 +71,15 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.SignedBeaco
|
||||
|
||||
// Log block sync status.
|
||||
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix())); err != nil {
|
||||
return err
|
||||
log.WithError(err).Error("Unable to log block sync status")
|
||||
}
|
||||
// Log payload data
|
||||
if err := logPayload(blockCopy.Block()); err != nil {
|
||||
log.WithError(err).Error("Unable to log debug block payload data")
|
||||
}
|
||||
// Log state transition data.
|
||||
if err := logStateTransitionData(blockCopy.Block()); err != nil {
|
||||
return err
|
||||
log.WithError(err).Error("Unable to log state transition data")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -30,8 +30,8 @@ type WeakSubjectivityVerifier struct {
|
||||
// NewWeakSubjectivityVerifier validates a checkpoint, and if valid, uses it to initialize a weak subjectivity verifier.
|
||||
func NewWeakSubjectivityVerifier(wsc *ethpb.Checkpoint, db weakSubjectivityDB) (*WeakSubjectivityVerifier, error) {
|
||||
if wsc == nil || len(wsc.Root) == 0 || wsc.Epoch == 0 {
|
||||
log.Info("No checkpoint for syncing provided, node will begin syncing from genesis. Checkpoint Sync is an optional feature that allows your node to sync from a more recent checkpoint, " +
|
||||
"which enhances the security of your local beacon node and the broader network. See https://docs.prylabs.network/docs/next/prysm-usage/checkpoint-sync/ to learn how to configure Checkpoint Sync.")
|
||||
log.Info("--weak-subjectivity-checkpoint not provided. Prysm recommends providing a weak subjectivity checkpoint" +
|
||||
"for nodes synced from genesis, or manual verification of block and state roots for checkpoint sync nodes.")
|
||||
return &WeakSubjectivityVerifier{
|
||||
enabled: false,
|
||||
}, nil
|
||||
|
||||
@@ -24,6 +24,7 @@ go_library(
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/network"
|
||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -101,7 +102,7 @@ func (s *Service) Status(ctx context.Context) error {
|
||||
|
||||
// RegisterValidator registers a validator with the builder relay network.
|
||||
// It also saves the registration object to the DB.
|
||||
func (s *Service) RegisterValidator(ctx context.Context, reg *ethpb.SignedValidatorRegistrationV1) error {
|
||||
func (s *Service) RegisterValidator(ctx context.Context, reg []*ethpb.SignedValidatorRegistrationV1) error {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.RegisterValidator")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
@@ -109,13 +110,25 @@ func (s *Service) RegisterValidator(ctx context.Context, reg *ethpb.SignedValida
|
||||
registerValidatorLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
idx, exists := s.cfg.headFetcher.HeadPublicKeyToValidatorIndex(bytesutil.ToBytes48(reg.Message.Pubkey))
|
||||
if !exists {
|
||||
return nil // If the pubkey is not found, it is not a validator. Do nothing.
|
||||
idxs := make([]types.ValidatorIndex, 0)
|
||||
msgs := make([]*ethpb.ValidatorRegistrationV1, 0)
|
||||
valid := make([]*ethpb.SignedValidatorRegistrationV1, 0)
|
||||
for i := 0; i < len(reg); i++ {
|
||||
r := reg[i]
|
||||
nx, exists := s.cfg.headFetcher.HeadPublicKeyToValidatorIndex(bytesutil.ToBytes48(r.Message.Pubkey))
|
||||
if !exists {
|
||||
// we want to allow validators to set up keys that haven't been added to the beaconstate validator list yet,
|
||||
// so we should tolerate keys that do not seem to be valid by skipping past them.
|
||||
log.Warnf("Skipping validator registration for pubkey=%#x - not in current validator set.", r.Message.Pubkey)
|
||||
continue
|
||||
}
|
||||
idxs = append(idxs, nx)
|
||||
msgs = append(msgs, r.Message)
|
||||
valid = append(valid, r)
|
||||
}
|
||||
if err := s.c.RegisterValidator(ctx, reg); err != nil {
|
||||
return errors.Wrap(err, "could not register validator")
|
||||
if err := s.c.RegisterValidator(ctx, valid); err != nil {
|
||||
return errors.Wrap(err, "could not register validator(s)")
|
||||
}
|
||||
|
||||
return s.cfg.beaconDB.SaveRegistrationsByValidatorIDs(ctx, []types.ValidatorIndex{idx}, []*ethpb.ValidatorRegistrationV1{reg.Message})
|
||||
return s.cfg.beaconDB.SaveRegistrationsByValidatorIDs(ctx, idxs, msgs)
|
||||
}
|
||||
|
||||
1
beacon-chain/cache/active_balance.go
vendored
1
beacon-chain/cache/active_balance.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build fuzz
|
||||
// +build fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
1
beacon-chain/cache/active_balance_test.go
vendored
1
beacon-chain/cache/active_balance_test.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
1
beacon-chain/cache/committee.go
vendored
1
beacon-chain/cache/committee.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
1
beacon-chain/cache/committee_disabled.go
vendored
1
beacon-chain/cache/committee_disabled.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build fuzz
|
||||
// +build fuzz
|
||||
|
||||
// This file is used in fuzzer builds to bypass global committee caches.
|
||||
package cache
|
||||
|
||||
1
beacon-chain/cache/committee_fuzz_test.go
vendored
1
beacon-chain/cache/committee_fuzz_test.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
1
beacon-chain/cache/committee_test.go
vendored
1
beacon-chain/cache/committee_test.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
1
beacon-chain/cache/proposer_indices.go
vendored
1
beacon-chain/cache/proposer_indices.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build fuzz
|
||||
// +build fuzz
|
||||
|
||||
// This file is used in fuzzer builds to bypass proposer indices caches.
|
||||
package cache
|
||||
|
||||
1
beacon-chain/cache/proposer_indices_test.go
vendored
1
beacon-chain/cache/proposer_indices_test.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
1
beacon-chain/cache/sync_committee.go
vendored
1
beacon-chain/cache/sync_committee.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !fuzz
|
||||
// +build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build fuzz
|
||||
// +build fuzz
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
@@ -39,6 +39,7 @@ go_test(
|
||||
"attestation_test.go",
|
||||
"justification_finalization_test.go",
|
||||
"new_test.go",
|
||||
"precompute_test.go",
|
||||
"reward_penalty_test.go",
|
||||
"slashing_test.go",
|
||||
],
|
||||
|
||||
@@ -149,14 +149,14 @@ func computeCheckpoints(state state.BeaconState, newBits bitfield.Bitvector4) (*
|
||||
finalizedCheckpoint := state.FinalizedCheckpoint()
|
||||
|
||||
// If 2/3 or more of the total balance attested in the current epoch.
|
||||
if newBits.BitAt(0) {
|
||||
if newBits.BitAt(0) && currentEpoch >= justifiedCheckpoint.Epoch {
|
||||
blockRoot, err := helpers.BlockRoot(state, currentEpoch)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not get block root for current epoch %d", currentEpoch)
|
||||
}
|
||||
justifiedCheckpoint.Epoch = currentEpoch
|
||||
justifiedCheckpoint.Root = blockRoot
|
||||
} else if newBits.BitAt(1) {
|
||||
} else if newBits.BitAt(1) && prevEpoch >= justifiedCheckpoint.Epoch {
|
||||
// If 2/3 or more of total balance attested in the previous epoch.
|
||||
blockRoot, err := helpers.BlockRoot(state, prevEpoch)
|
||||
if err != nil {
|
||||
|
||||
@@ -250,3 +250,18 @@ func TestUnrealizedCheckpoints(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ComputeCheckpoints_CantUpdateToLower(t *testing.T) {
|
||||
st, err := v2.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch * 2,
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{
|
||||
Epoch: 2,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
jb := make(bitfield.Bitvector4, 1)
|
||||
jb.SetBitAt(1, true)
|
||||
cp, _, err := precompute.ComputeCheckpoints(st, jb)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, types.Epoch(2), cp.Epoch)
|
||||
}
|
||||
|
||||
3
beacon-chain/core/epoch/precompute/precompute_test.go
Normal file
3
beacon-chain/core/epoch/precompute/precompute_test.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package precompute
|
||||
|
||||
var ComputeCheckpoints = computeCheckpoints
|
||||
@@ -105,7 +105,6 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
}
|
||||
}
|
||||
datafile := KVStoreDatafilePath(dirPath)
|
||||
start := time.Now()
|
||||
log.Infof("Opening Bolt DB at %s", datafile)
|
||||
boltDB, err := bolt.Open(
|
||||
datafile,
|
||||
@@ -116,40 +115,29 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
log.WithField("elapsed", time.Since(start)).Error("Failed to open Bolt DB")
|
||||
if errors.Is(err, bolt.ErrTimeout) {
|
||||
return nil, errors.New("cannot obtain database lock, database may be in use by another process")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Opened Bolt DB")
|
||||
|
||||
boltDB.AllocSize = boltAllocSize
|
||||
start = time.Now()
|
||||
log.Infof("Creating block cache...")
|
||||
blockCache, err := ristretto.NewCache(&ristretto.Config{
|
||||
NumCounters: 1000, // number of keys to track frequency of (1000).
|
||||
MaxCost: BlockCacheSize, // maximum cost of cache (1000 Blocks).
|
||||
BufferItems: 64, // number of keys per Get buffer.
|
||||
})
|
||||
if err != nil {
|
||||
log.WithField("elapsed", time.Since(start)).Error("Failed to create block cache")
|
||||
return nil, err
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Created block cache")
|
||||
|
||||
start = time.Now()
|
||||
log.Infof("Creating validator cache...")
|
||||
validatorCache, err := ristretto.NewCache(&ristretto.Config{
|
||||
NumCounters: NumOfValidatorEntries, // number of entries in cache (2 Million).
|
||||
MaxCost: ValidatorEntryMaxCost, // maximum size of the cache (64Mb)
|
||||
BufferItems: 64, // number of keys per Get buffer.
|
||||
})
|
||||
if err != nil {
|
||||
log.WithField("elapsed", time.Since(start)).Error("Failed to to create validator cache")
|
||||
return nil, err
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Created validator cache")
|
||||
|
||||
kv := &Store{
|
||||
db: boltDB,
|
||||
@@ -159,8 +147,6 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
stateSummaryCache: newStateSummaryCache(),
|
||||
ctx: ctx,
|
||||
}
|
||||
start = time.Now()
|
||||
log.Infof("Updating DB and creating buckets...")
|
||||
if err := kv.db.Update(func(tx *bolt.Tx) error {
|
||||
return createBuckets(
|
||||
tx,
|
||||
@@ -195,13 +181,9 @@ func NewKVStore(ctx context.Context, dirPath string, config *Config) (*Store, er
|
||||
registrationBucket,
|
||||
)
|
||||
}); err != nil {
|
||||
log.WithField("elapsed", time.Since(start)).Error("Failed to update db and create buckets")
|
||||
return nil, err
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Updated db and created buckets")
|
||||
|
||||
err = prometheus.Register(createBoltCollector(kv.db))
|
||||
|
||||
return kv, err
|
||||
}
|
||||
|
||||
|
||||
@@ -162,12 +162,27 @@ func (s *Store) SaveStatesEfficient(ctx context.Context, states []state.ReadOnly
|
||||
if states == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
validatorKeys, validatorsEntries, err := getValidators(states)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
return s.saveStatesEfficientInternal(ctx, tx, blockRoots, states, validatorKeys, validatorsEntries)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getValidators(states []state.ReadOnlyBeaconState) ([][]byte, map[string]*ethpb.Validator, error) {
|
||||
validatorsEntries := make(map[string]*ethpb.Validator) // It's a map to make sure that you store only new validator entries.
|
||||
validatorKeys := make([][]byte, len(states)) // For every state, this stores a compressed list of validator keys.
|
||||
for i, st := range states {
|
||||
pb, ok := st.InnerStateUnsafe().(withValidators)
|
||||
if !ok {
|
||||
return errors.New("could not cast state to interface with GetValidators()")
|
||||
return nil, nil, errors.New("could not cast state to interface with GetValidators()")
|
||||
}
|
||||
validators := pb.GetValidators()
|
||||
|
||||
@@ -177,7 +192,7 @@ func (s *Store) SaveStatesEfficient(ctx context.Context, states []state.ReadOnly
|
||||
// create the unique hash for that validator entry.
|
||||
hash, hashErr := val.HashTreeRoot()
|
||||
if hashErr != nil {
|
||||
return hashErr
|
||||
return nil, nil, hashErr
|
||||
}
|
||||
hashes = append(hashes, hash[:]...)
|
||||
|
||||
@@ -187,117 +202,113 @@ func (s *Store) SaveStatesEfficient(ctx context.Context, states []state.ReadOnly
|
||||
}
|
||||
validatorKeys[i] = snappy.Encode(nil, hashes)
|
||||
}
|
||||
return validatorKeys, validatorsEntries, nil
|
||||
}
|
||||
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateBucket)
|
||||
valIdxBkt := tx.Bucket(blockRootValidatorHashesBucket)
|
||||
for i, rt := range blockRoots {
|
||||
indicesByBucket := createStateIndicesFromStateSlot(ctx, states[i].Slot())
|
||||
if err := updateValueForIndices(ctx, indicesByBucket, rt[:], tx); err != nil {
|
||||
return errors.Wrap(err, "could not update DB indices")
|
||||
}
|
||||
|
||||
// There is a gap when the states that are passed are used outside this
|
||||
// thread. But while storing the state object, we should not store the
|
||||
// validator entries.To bring the gap closer, we empty the validators
|
||||
// just before Put() and repopulate that state with original validators.
|
||||
// look at issue https://github.com/prysmaticlabs/prysm/issues/9262.
|
||||
switch rawType := states[i].InnerStateUnsafe().(type) {
|
||||
case *ethpb.BeaconState:
|
||||
var pbState *ethpb.BeaconState
|
||||
var err error
|
||||
if features.Get().EnableNativeState {
|
||||
pbState, err = state_native.ProtobufBeaconStatePhase0(rawType)
|
||||
} else {
|
||||
pbState, err = v1.ProtobufBeaconState(rawType)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
encodedState, err := encode(ctx, pbState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateAltair:
|
||||
var pbState *ethpb.BeaconStateAltair
|
||||
var err error
|
||||
if features.Get().EnableNativeState {
|
||||
pbState, err = state_native.ProtobufBeaconStateAltair(rawType)
|
||||
} else {
|
||||
pbState, err = v2.ProtobufBeaconState(rawType)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedState := snappy.Encode(nil, append(altairKey, rawObj...))
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateBellatrix:
|
||||
var pbState *ethpb.BeaconStateBellatrix
|
||||
var err error
|
||||
if features.Get().EnableNativeState {
|
||||
pbState, err = state_native.ProtobufBeaconStateBellatrix(rawType)
|
||||
} else {
|
||||
pbState, err = v3.ProtobufBeaconState(rawType)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedState := snappy.Encode(nil, append(bellatrixKey, rawObj...))
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errors.New("invalid state type")
|
||||
}
|
||||
func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, blockRoots [][32]byte, states []state.ReadOnlyBeaconState, validatorKeys [][]byte, validatorsEntries map[string]*ethpb.Validator) error {
|
||||
bucket := tx.Bucket(stateBucket)
|
||||
valIdxBkt := tx.Bucket(blockRootValidatorHashesBucket)
|
||||
for i, rt := range blockRoots {
|
||||
indicesByBucket := createStateIndicesFromStateSlot(ctx, states[i].Slot())
|
||||
if err := updateValueForIndices(ctx, indicesByBucket, rt[:], tx); err != nil {
|
||||
return errors.Wrap(err, "could not update DB indices")
|
||||
}
|
||||
|
||||
// store the validator entries separately to save space.
|
||||
return s.storeValidatorEntriesSeparately(ctx, tx, validatorsEntries)
|
||||
}); err != nil {
|
||||
return err
|
||||
// There is a gap when the states that are passed are used outside this
|
||||
// thread. But while storing the state object, we should not store the
|
||||
// validator entries.To bring the gap closer, we empty the validators
|
||||
// just before Put() and repopulate that state with original validators.
|
||||
// look at issue https://github.com/prysmaticlabs/prysm/issues/9262.
|
||||
switch rawType := states[i].InnerStateUnsafe().(type) {
|
||||
case *ethpb.BeaconState:
|
||||
var pbState *ethpb.BeaconState
|
||||
var err error
|
||||
if features.Get().EnableNativeState {
|
||||
pbState, err = state_native.ProtobufBeaconStatePhase0(rawType)
|
||||
} else {
|
||||
pbState, err = v1.ProtobufBeaconState(rawType)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
encodedState, err := encode(ctx, pbState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateAltair:
|
||||
var pbState *ethpb.BeaconStateAltair
|
||||
var err error
|
||||
if features.Get().EnableNativeState {
|
||||
pbState, err = state_native.ProtobufBeaconStateAltair(rawType)
|
||||
} else {
|
||||
pbState, err = v2.ProtobufBeaconState(rawType)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedState := snappy.Encode(nil, append(altairKey, rawObj...))
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateBellatrix:
|
||||
var pbState *ethpb.BeaconStateBellatrix
|
||||
var err error
|
||||
if features.Get().EnableNativeState {
|
||||
pbState, err = state_native.ProtobufBeaconStateBellatrix(rawType)
|
||||
} else {
|
||||
pbState, err = v3.ProtobufBeaconState(rawType)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedState := snappy.Encode(nil, append(bellatrixKey, rawObj...))
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errors.New("invalid state type")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
// store the validator entries separately to save space.
|
||||
return s.storeValidatorEntriesSeparately(ctx, tx, validatorsEntries)
|
||||
}
|
||||
|
||||
func (s *Store) storeValidatorEntriesSeparately(ctx context.Context, tx *bolt.Tx, validatorsEntries map[string]*ethpb.Validator) error {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package p2p_test
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ go_library(
|
||||
"//api/gateway/apimiddleware:go_default_library",
|
||||
"//api/grpc:go_default_library",
|
||||
"//beacon-chain/rpc/eth/events:go_default_library",
|
||||
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/api/gateway/apimiddleware"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/rpc/eth/helpers"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/proto/eth/v2"
|
||||
)
|
||||
|
||||
@@ -198,7 +199,7 @@ type versionResponseJson struct {
|
||||
|
||||
// syncingResponseJson is used in /node/syncing API endpoint.
|
||||
type syncingResponseJson struct {
|
||||
Data *syncInfoJson `json:"data"`
|
||||
Data *helpers.SyncDetailsJson `json:"data"`
|
||||
}
|
||||
|
||||
// beaconStateResponseJson is used in /debug/beacon/states/{state_id} API endpoint.
|
||||
@@ -775,12 +776,6 @@ type depositContractJson struct {
|
||||
Address string `json:"address"`
|
||||
}
|
||||
|
||||
type syncInfoJson struct {
|
||||
HeadSlot string `json:"head_slot"`
|
||||
SyncDistance string `json:"sync_distance"`
|
||||
IsSyncing bool `json:"is_syncing"`
|
||||
}
|
||||
|
||||
type attesterDutyJson struct {
|
||||
Pubkey string `json:"pubkey" hex:"true"`
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
@@ -932,16 +927,10 @@ type singleIndexedVerificationFailureJson struct {
|
||||
|
||||
type nodeSyncDetailsErrorJson struct {
|
||||
apimiddleware.DefaultErrorJson
|
||||
SyncDetails syncDetails `json:"sync_details"`
|
||||
SyncDetails helpers.SyncDetailsJson `json:"sync_details"`
|
||||
}
|
||||
|
||||
type eventErrorJson struct {
|
||||
StatusCode int `json:"status_code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type syncDetails struct {
|
||||
HeadSlot string `json:"head_slot"`
|
||||
SyncDistance string `json:"sync_distance"`
|
||||
IsSyncing bool `json:"is_syncing"`
|
||||
}
|
||||
|
||||
@@ -53,7 +53,7 @@ func (e *blockIdParseError) Error() string {
|
||||
// GetWeakSubjectivity computes the starting epoch of the current weak subjectivity period, and then also
|
||||
// determines the best block root and state root to use for a Checkpoint Sync starting from that point.
|
||||
func (bs *Server) GetWeakSubjectivity(ctx context.Context, _ *empty.Empty) (*ethpbv1.WeakSubjectivityResponse, error) {
|
||||
if err := rpchelpers.ValidateSync(ctx, bs.SyncChecker, bs.HeadFetcher, bs.GenesisTimeFetcher); err != nil {
|
||||
if err := rpchelpers.ValidateSync(ctx, bs.SyncChecker, bs.HeadFetcher, bs.GenesisTimeFetcher, bs.OptimisticModeFetcher); err != nil {
|
||||
// This is already a grpc error, so we can't wrap it any further
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ go_library(
|
||||
"validator_status.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/rpc/eth/helpers",
|
||||
visibility = ["//beacon-chain/rpc/eth:__subpackages__"],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/grpc:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
|
||||
@@ -34,15 +34,3 @@ type SingleIndexedVerificationFailure struct {
|
||||
Index int `json:"index"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// SyncDetails contain details about sync status.
|
||||
type SyncDetails struct {
|
||||
HeadSlot string `json:"head_slot"`
|
||||
SyncDistance string `json:"sync_distance"`
|
||||
IsSyncing bool `json:"is_syncing"`
|
||||
}
|
||||
|
||||
// SyncDetailsContainer is a wrapper for SyncDetails.
|
||||
type SyncDetailsContainer struct {
|
||||
SyncDetails *SyncDetails `json:"sync_details"`
|
||||
}
|
||||
|
||||
@@ -17,19 +17,38 @@ import (
|
||||
|
||||
// ValidateSync checks whether the node is currently syncing and returns an error if it is.
|
||||
// It also appends syncing info to gRPC headers.
|
||||
func ValidateSync(ctx context.Context, syncChecker sync.Checker, headFetcher blockchain.HeadFetcher, timeFetcher blockchain.TimeFetcher) error {
|
||||
func ValidateSync(
|
||||
ctx context.Context,
|
||||
syncChecker sync.Checker,
|
||||
headFetcher blockchain.HeadFetcher,
|
||||
timeFetcher blockchain.TimeFetcher,
|
||||
optimisticModeFetcher blockchain.OptimisticModeFetcher,
|
||||
) error {
|
||||
if !syncChecker.Syncing() {
|
||||
return nil
|
||||
}
|
||||
headSlot := headFetcher.HeadSlot()
|
||||
syncDetailsContainer := &SyncDetailsContainer{
|
||||
SyncDetails: &SyncDetails{
|
||||
isOptimistic := false
|
||||
|
||||
headState, err := headFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not get head state: %v", err)
|
||||
}
|
||||
isOptimistic, err = IsOptimistic(ctx, headState, optimisticModeFetcher)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not check optimistic status: %v", err)
|
||||
}
|
||||
|
||||
syncDetailsContainer := &syncDetailsContainer{
|
||||
SyncDetails: &SyncDetailsJson{
|
||||
HeadSlot: strconv.FormatUint(uint64(headSlot), 10),
|
||||
SyncDistance: strconv.FormatUint(uint64(timeFetcher.CurrentSlot()-headSlot), 10),
|
||||
IsSyncing: true,
|
||||
IsOptimistic: isOptimistic,
|
||||
},
|
||||
}
|
||||
err := grpc.AppendCustomErrorHeader(ctx, syncDetailsContainer)
|
||||
|
||||
err = grpc.AppendCustomErrorHeader(ctx, syncDetailsContainer)
|
||||
if err != nil {
|
||||
return status.Errorf(
|
||||
codes.Internal,
|
||||
@@ -41,7 +60,7 @@ func ValidateSync(ctx context.Context, syncChecker sync.Checker, headFetcher blo
|
||||
}
|
||||
|
||||
// IsOptimistic checks whether the latest block header of the passed in beacon state is the header of an optimistic block.
|
||||
func IsOptimistic(ctx context.Context, st state.BeaconState, optimisticSyncFetcher blockchain.OptimisticModeFetcher) (bool, error) {
|
||||
func IsOptimistic(ctx context.Context, st state.BeaconState, optimisticModeFetcher blockchain.OptimisticModeFetcher) (bool, error) {
|
||||
header := st.LatestBlockHeader()
|
||||
// This happens when the block at the state's slot is not missing.
|
||||
if bytes.Equal(header.StateRoot, params.BeaconConfig().ZeroHash[:]) {
|
||||
@@ -55,9 +74,22 @@ func IsOptimistic(ctx context.Context, st state.BeaconState, optimisticSyncFetch
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get header root")
|
||||
}
|
||||
isOptimistic, err := optimisticSyncFetcher.IsOptimisticForRoot(ctx, headRoot)
|
||||
isOptimistic, err := optimisticModeFetcher.IsOptimisticForRoot(ctx, headRoot)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not check if block is optimistic")
|
||||
}
|
||||
return isOptimistic, nil
|
||||
}
|
||||
|
||||
// SyncDetailsJson contains information about node sync status.
|
||||
type SyncDetailsJson struct {
|
||||
HeadSlot string `json:"head_slot"`
|
||||
SyncDistance string `json:"sync_distance"`
|
||||
IsSyncing bool `json:"is_syncing"`
|
||||
IsOptimistic bool `json:"is_optimistic"`
|
||||
}
|
||||
|
||||
// SyncDetailsContainer is a wrapper for SyncDetails.
|
||||
type syncDetailsContainer struct {
|
||||
SyncDetails *SyncDetailsJson `json:"sync_details"`
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ func TestValidateSync(t *testing.T) {
|
||||
Slot: &headSlot,
|
||||
State: st,
|
||||
}
|
||||
err = ValidateSync(ctx, syncChecker, chainService, chainService)
|
||||
err = ValidateSync(ctx, syncChecker, chainService, chainService, chainService)
|
||||
require.NotNil(t, err)
|
||||
sts, ok := grpc.ServerTransportStreamFromContext(ctx).(*runtime.ServerTransportStream)
|
||||
require.Equal(t, true, ok, "type assertion failed")
|
||||
@@ -39,7 +39,7 @@ func TestValidateSync(t *testing.T) {
|
||||
require.Equal(t, true, ok, "could not retrieve custom error metadata value")
|
||||
assert.DeepEqual(
|
||||
t,
|
||||
[]string{"{\"sync_details\":{\"head_slot\":\"50\",\"sync_distance\":\"50\",\"is_syncing\":true}}"},
|
||||
[]string{"{\"sync_details\":{\"head_slot\":\"50\",\"sync_distance\":\"50\",\"is_syncing\":true,\"is_optimistic\":false}}"},
|
||||
v,
|
||||
)
|
||||
})
|
||||
@@ -47,7 +47,15 @@ func TestValidateSync(t *testing.T) {
|
||||
syncChecker := &syncmock.Sync{
|
||||
IsSyncing: false,
|
||||
}
|
||||
err := ValidateSync(ctx, syncChecker, nil, nil)
|
||||
headSlot := types.Slot(100)
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(50))
|
||||
chainService := &chainmock.ChainService{
|
||||
Slot: &headSlot,
|
||||
State: st,
|
||||
}
|
||||
err = ValidateSync(ctx, syncChecker, nil, nil, chainService)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ go_library(
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/peerdata"
|
||||
rpchelpers "github.com/prysmaticlabs/prysm/beacon-chain/rpc/eth/helpers"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/proto/migration"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -266,11 +267,23 @@ func (ns *Server) GetSyncStatus(ctx context.Context, _ *emptypb.Empty) (*ethpb.S
|
||||
defer span.End()
|
||||
|
||||
headSlot := ns.HeadFetcher.HeadSlot()
|
||||
|
||||
headState, err := ns.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
|
||||
}
|
||||
|
||||
isOptimistic, err := rpchelpers.IsOptimistic(ctx, headState, ns.OptimisticModeFetcher)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not check optimistic status: %v", err)
|
||||
}
|
||||
|
||||
return ðpb.SyncingResponse{
|
||||
Data: ðpb.SyncInfo{
|
||||
HeadSlot: headSlot,
|
||||
SyncDistance: ns.GenesisTimeFetcher.CurrentSlot() - headSlot,
|
||||
IsSyncing: ns.SyncChecker.Syncing(),
|
||||
IsOptimistic: isOptimistic,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -167,20 +167,22 @@ func TestSyncStatus(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
err = state.SetSlot(100)
|
||||
require.NoError(t, err)
|
||||
chainService := &mock.ChainService{Slot: currentSlot, State: state}
|
||||
chainService := &mock.ChainService{Slot: currentSlot, State: state, Optimistic: true}
|
||||
syncChecker := &syncmock.Sync{}
|
||||
syncChecker.IsSyncing = true
|
||||
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
SyncChecker: syncChecker,
|
||||
HeadFetcher: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: syncChecker,
|
||||
}
|
||||
resp, err := s.GetSyncStatus(context.Background(), &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, types.Slot(100), resp.Data.HeadSlot)
|
||||
assert.Equal(t, types.Slot(10), resp.Data.SyncDistance)
|
||||
assert.Equal(t, true, resp.Data.IsSyncing)
|
||||
assert.Equal(t, true, resp.Data.IsOptimistic)
|
||||
}
|
||||
|
||||
func TestGetPeer(t *testing.T) {
|
||||
|
||||
@@ -15,12 +15,13 @@ import (
|
||||
// providing RPC endpoints for verifying a beacon node's sync status, genesis and
|
||||
// version information.
|
||||
type Server struct {
|
||||
SyncChecker sync.Checker
|
||||
Server *grpc.Server
|
||||
BeaconDB db.ReadOnlyDatabase
|
||||
PeersFetcher p2p.PeersProvider
|
||||
PeerManager p2p.PeerManager
|
||||
MetadataProvider p2p.MetadataProvider
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
SyncChecker sync.Checker
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
Server *grpc.Server
|
||||
BeaconDB db.ReadOnlyDatabase
|
||||
PeersFetcher p2p.PeersProvider
|
||||
PeerManager p2p.PeerManager
|
||||
MetadataProvider p2p.MetadataProvider
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ func (vs *Server) GetAttesterDuties(ctx context.Context, req *ethpbv1.AttesterDu
|
||||
ctx, span := trace.StartSpan(ctx, "validator.GetAttesterDuties")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
@@ -126,7 +126,7 @@ func (vs *Server) GetProposerDuties(ctx context.Context, req *ethpbv1.ProposerDu
|
||||
ctx, span := trace.StartSpan(ctx, "validator.GetProposerDuties")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
@@ -204,7 +204,7 @@ func (vs *Server) GetSyncCommitteeDuties(ctx context.Context, req *ethpbv2.SyncC
|
||||
ctx, span := trace.StartSpan(ctx, "validator.GetSyncCommitteeDuties")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
@@ -274,7 +274,7 @@ func (vs *Server) ProduceBlock(ctx context.Context, req *ethpbv1.ProduceBlockReq
|
||||
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlock")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
@@ -291,7 +291,7 @@ func (vs *Server) ProduceBlockV2(ctx context.Context, req *ethpbv1.ProduceBlockR
|
||||
_, span := trace.StartSpan(ctx, "validator.ProduceBlockV2")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
@@ -355,7 +355,7 @@ func (vs *Server) ProduceBlockV2SSZ(ctx context.Context, req *ethpbv1.ProduceBlo
|
||||
_, span := trace.StartSpan(ctx, "validator.ProduceBlockV2SSZ")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
@@ -426,7 +426,7 @@ func (vs *Server) ProduceBlindedBlock(ctx context.Context, req *ethpbv1.ProduceB
|
||||
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlindedBlock")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
@@ -493,7 +493,7 @@ func (vs *Server) ProduceBlindedBlockSSZ(ctx context.Context, req *ethpbv1.Produ
|
||||
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlindedBlockSSZ")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
@@ -685,7 +685,7 @@ func (vs *Server) SubmitBeaconCommitteeSubscription(ctx context.Context, req *et
|
||||
ctx, span := trace.StartSpan(ctx, "validator.SubmitBeaconCommitteeSubscription")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
@@ -762,7 +762,7 @@ func (vs *Server) SubmitSyncCommitteeSubscription(ctx context.Context, req *ethp
|
||||
ctx, span := trace.StartSpan(ctx, "validator.SubmitSyncCommitteeSubscription")
|
||||
defer span.End()
|
||||
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher); err != nil {
|
||||
if err := rpchelpers.ValidateSync(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -249,13 +249,16 @@ func TestGetAttesterDuties(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetAttesterDuties_SyncNotReady(t *testing.T) {
|
||||
chainService := &mockChain.ChainService{}
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
chainService := &mockChain.ChainService{State: st}
|
||||
vs := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
_, err := vs.GetAttesterDuties(context.Background(), ðpbv1.AttesterDutiesRequest{})
|
||||
_, err = vs.GetAttesterDuties(context.Background(), ðpbv1.AttesterDutiesRequest{})
|
||||
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
|
||||
}
|
||||
|
||||
@@ -406,13 +409,16 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetProposerDuties_SyncNotReady(t *testing.T) {
|
||||
chainService := &mockChain.ChainService{}
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
chainService := &mockChain.ChainService{State: st}
|
||||
vs := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
_, err := vs.GetProposerDuties(context.Background(), ðpbv1.ProposerDutiesRequest{})
|
||||
_, err = vs.GetProposerDuties(context.Background(), ðpbv1.ProposerDutiesRequest{})
|
||||
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
|
||||
}
|
||||
|
||||
@@ -631,14 +637,16 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetSyncCommitteeDuties_SyncNotReady(t *testing.T) {
|
||||
chainService := &mockChain.ChainService{}
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
chainService := &mockChain.ChainService{State: st}
|
||||
vs := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
_, err := vs.GetSyncCommitteeDuties(context.Background(), ðpbv2.SyncCommitteeDutiesRequest{})
|
||||
_, err = vs.GetSyncCommitteeDuties(context.Background(), ðpbv2.SyncCommitteeDutiesRequest{})
|
||||
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
|
||||
}
|
||||
|
||||
@@ -751,13 +759,16 @@ func TestProduceBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProduceBlock_SyncNotReady(t *testing.T) {
|
||||
chainService := &mockChain.ChainService{}
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
chainService := &mockChain.ChainService{State: st}
|
||||
vs := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
_, err := vs.ProduceBlock(context.Background(), ðpbv1.ProduceBlockRequest{})
|
||||
_, err = vs.ProduceBlock(context.Background(), ðpbv1.ProduceBlockRequest{})
|
||||
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
|
||||
}
|
||||
|
||||
@@ -1728,24 +1739,30 @@ func TestProduceBlockV2SSZ(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProduceBlockV2_SyncNotReady(t *testing.T) {
|
||||
chainService := &mockChain.ChainService{}
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
chainService := &mockChain.ChainService{State: st}
|
||||
vs := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
_, err := vs.ProduceBlockV2(context.Background(), ðpbv1.ProduceBlockRequest{})
|
||||
_, err = vs.ProduceBlockV2(context.Background(), ðpbv1.ProduceBlockRequest{})
|
||||
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
|
||||
}
|
||||
|
||||
func TestProduceBlockV2SSZ_SyncNotReady(t *testing.T) {
|
||||
chainService := &mockChain.ChainService{}
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
chainService := &mockChain.ChainService{State: st}
|
||||
vs := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
_, err := vs.ProduceBlockV2SSZ(context.Background(), ðpbv1.ProduceBlockRequest{})
|
||||
_, err = vs.ProduceBlockV2SSZ(context.Background(), ðpbv1.ProduceBlockRequest{})
|
||||
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
|
||||
}
|
||||
|
||||
@@ -2717,24 +2734,30 @@ func TestProduceBlindedBlockSSZ(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProduceBlindedBlock_SyncNotReady(t *testing.T) {
|
||||
chainService := &mockChain.ChainService{}
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
chainService := &mockChain.ChainService{State: st}
|
||||
vs := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
_, err := vs.ProduceBlindedBlock(context.Background(), ðpbv1.ProduceBlockRequest{})
|
||||
_, err = vs.ProduceBlindedBlock(context.Background(), ðpbv1.ProduceBlockRequest{})
|
||||
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
|
||||
}
|
||||
|
||||
func TestProduceBlindedBlockSSZ_SyncNotReady(t *testing.T) {
|
||||
chainService := &mockChain.ChainService{}
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
chainService := &mockChain.ChainService{State: st}
|
||||
vs := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
_, err := vs.ProduceBlindedBlockSSZ(context.Background(), ðpbv1.ProduceBlockRequest{})
|
||||
_, err = vs.ProduceBlindedBlockSSZ(context.Background(), ðpbv1.ProduceBlockRequest{})
|
||||
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
|
||||
}
|
||||
|
||||
@@ -3100,13 +3123,16 @@ func TestSubmitBeaconCommitteeSubscription(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSubmitBeaconCommitteeSubscription_SyncNotReady(t *testing.T) {
|
||||
chainService := &mockChain.ChainService{}
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
chainService := &mockChain.ChainService{State: st}
|
||||
vs := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
_, err := vs.SubmitBeaconCommitteeSubscription(context.Background(), ðpbv1.SubmitBeaconCommitteeSubscriptionsRequest{})
|
||||
_, err = vs.SubmitBeaconCommitteeSubscription(context.Background(), ðpbv1.SubmitBeaconCommitteeSubscriptionsRequest{})
|
||||
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
|
||||
}
|
||||
|
||||
@@ -3256,13 +3282,16 @@ func TestSubmitSyncCommitteeSubscription(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSubmitSyncCommitteeSubscription_SyncNotReady(t *testing.T) {
|
||||
chainService := &mockChain.ChainService{}
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
chainService := &mockChain.ChainService{State: st}
|
||||
vs := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
}
|
||||
_, err := vs.SubmitSyncCommitteeSubscription(context.Background(), ðpbv2.SubmitSyncCommitteeSubscriptionsRequest{})
|
||||
_, err = vs.SubmitSyncCommitteeSubscription(context.Background(), ðpbv2.SubmitSyncCommitteeSubscriptionsRequest{})
|
||||
assert.ErrorContains(t, "Syncing to latest head, not ready to respond", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -218,14 +218,15 @@ func (s *Service) Start() {
|
||||
ProposerSlotIndexCache: s.cfg.ProposerIdsCache,
|
||||
}
|
||||
validatorServerV1 := &validator.Server{
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
HeadUpdater: s.cfg.HeadUpdater,
|
||||
TimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
AttestationsPool: s.cfg.AttestationsPool,
|
||||
PeerManager: s.cfg.PeerManager,
|
||||
Broadcaster: s.cfg.Broadcaster,
|
||||
V1Alpha1Server: validatorServer,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
HeadUpdater: s.cfg.HeadUpdater,
|
||||
TimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
AttestationsPool: s.cfg.AttestationsPool,
|
||||
PeerManager: s.cfg.PeerManager,
|
||||
Broadcaster: s.cfg.Broadcaster,
|
||||
V1Alpha1Server: validatorServer,
|
||||
StateFetcher: &statefetcher.StateProvider{
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
@@ -233,8 +234,7 @@ func (s *Service) Start() {
|
||||
StateGenService: s.cfg.StateGen,
|
||||
ReplayerBuilder: ch,
|
||||
},
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
|
||||
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
|
||||
}
|
||||
|
||||
nodeServer := &nodev1alpha1.Server{
|
||||
@@ -252,14 +252,15 @@ func (s *Service) Start() {
|
||||
BeaconMonitoringPort: s.cfg.BeaconMonitoringPort,
|
||||
}
|
||||
nodeServerV1 := &node.Server{
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
Server: s.grpcServer,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
PeersFetcher: s.cfg.PeersFetcher,
|
||||
PeerManager: s.cfg.PeerManager,
|
||||
MetadataProvider: s.cfg.MetadataProvider,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
Server: s.grpcServer,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
PeersFetcher: s.cfg.PeersFetcher,
|
||||
PeerManager: s.cfg.PeerManager,
|
||||
MetadataProvider: s.cfg.MetadataProvider,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
}
|
||||
|
||||
beaconChainServer := &beaconv1alpha1.Server{
|
||||
|
||||
@@ -19,12 +19,13 @@ var (
|
||||
// trie of the particular field.
|
||||
type FieldTrie struct {
|
||||
*sync.RWMutex
|
||||
reference *stateutil.Reference
|
||||
fieldLayers [][]*[32]byte
|
||||
field types.BeaconStateField
|
||||
dataType types.DataType
|
||||
length uint64
|
||||
numOfElems int
|
||||
reference *stateutil.Reference
|
||||
fieldLayers [][]*[32]byte
|
||||
field types.BeaconStateField
|
||||
dataType types.DataType
|
||||
length uint64
|
||||
numOfElems int
|
||||
isTransferred bool
|
||||
}
|
||||
|
||||
// NewFieldTrie is the constructor for the field trie data structure. It creates the corresponding
|
||||
@@ -191,6 +192,43 @@ func (f *FieldTrie) CopyTrie() *FieldTrie {
|
||||
}
|
||||
}
|
||||
|
||||
// Length return the length of the whole field trie.
|
||||
func (f *FieldTrie) Length() uint64 {
|
||||
return f.length
|
||||
}
|
||||
|
||||
// TransferTrie starts the process of transferring all the
|
||||
// trie related data to a new trie. This is done if we
|
||||
// know that other states which hold references to this
|
||||
// trie will unlikely need it for recomputation. This helps
|
||||
// us save on a copy. Any caller of this method will need
|
||||
// to take care that this isn't called on an empty trie.
|
||||
func (f *FieldTrie) TransferTrie() *FieldTrie {
|
||||
if f.fieldLayers == nil {
|
||||
return &FieldTrie{
|
||||
field: f.field,
|
||||
dataType: f.dataType,
|
||||
reference: stateutil.NewRef(1),
|
||||
RWMutex: new(sync.RWMutex),
|
||||
length: f.length,
|
||||
numOfElems: f.numOfElems,
|
||||
}
|
||||
}
|
||||
f.isTransferred = true
|
||||
nTrie := &FieldTrie{
|
||||
fieldLayers: f.fieldLayers,
|
||||
field: f.field,
|
||||
dataType: f.dataType,
|
||||
reference: stateutil.NewRef(1),
|
||||
RWMutex: new(sync.RWMutex),
|
||||
length: f.length,
|
||||
numOfElems: f.numOfElems,
|
||||
}
|
||||
// Zero out field layers here.
|
||||
f.fieldLayers = nil
|
||||
return nTrie
|
||||
}
|
||||
|
||||
// TrieRoot returns the corresponding root of the trie.
|
||||
func (f *FieldTrie) TrieRoot() ([32]byte, error) {
|
||||
if f.Empty() {
|
||||
@@ -222,7 +260,7 @@ func (f *FieldTrie) FieldReference() *stateutil.Reference {
|
||||
// Empty checks whether the underlying field trie is
|
||||
// empty or not.
|
||||
func (f *FieldTrie) Empty() bool {
|
||||
return f == nil || len(f.fieldLayers) == 0
|
||||
return f == nil || len(f.fieldLayers) == 0 || f.isTransferred
|
||||
}
|
||||
|
||||
// InsertFieldLayer manually inserts a field layer. This method
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build !minimal
|
||||
// +build !minimal
|
||||
|
||||
package state_native
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build !minimal
|
||||
// +build !minimal
|
||||
|
||||
package state_native
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build minimal
|
||||
// +build minimal
|
||||
|
||||
package state_native
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build minimal
|
||||
// +build minimal
|
||||
|
||||
package state_native
|
||||
|
||||
|
||||
@@ -740,17 +740,31 @@ func (b *BeaconState) rootSelector(ctx context.Context, field nativetypes.FieldI
|
||||
|
||||
func (b *BeaconState) recomputeFieldTrie(index nativetypes.FieldIndex, elements interface{}) ([32]byte, error) {
|
||||
fTrie := b.stateFieldLeaves[index]
|
||||
fTrieMutex := fTrie.RWMutex
|
||||
// We can't lock the trie directly because the trie's variable gets reassigned,
|
||||
// and therefore we would call Unlock() on a different object.
|
||||
fTrieMutex := fTrie.RWMutex
|
||||
if fTrie.FieldReference().Refs() > 1 {
|
||||
fTrieMutex.Lock()
|
||||
fTrieMutex.Lock()
|
||||
|
||||
if fTrie.Empty() {
|
||||
err := b.resetFieldTrie(index, elements, fTrie.Length())
|
||||
if err != nil {
|
||||
fTrieMutex.Unlock()
|
||||
return [32]byte{}, err
|
||||
}
|
||||
// Reduce reference count as we are instantiating a new trie.
|
||||
fTrie.FieldReference().MinusRef()
|
||||
newTrie := fTrie.CopyTrie()
|
||||
fTrieMutex.Unlock()
|
||||
return b.stateFieldLeaves[index].TrieRoot()
|
||||
}
|
||||
|
||||
if fTrie.FieldReference().Refs() > 1 {
|
||||
fTrie.FieldReference().MinusRef()
|
||||
newTrie := fTrie.TransferTrie()
|
||||
b.stateFieldLeaves[index] = newTrie
|
||||
fTrie = newTrie
|
||||
fTrieMutex.Unlock()
|
||||
}
|
||||
fTrieMutex.Unlock()
|
||||
|
||||
// remove duplicate indexes
|
||||
b.dirtyIndices[index] = slice.SetUint64(b.dirtyIndices[index])
|
||||
// sort indexes again
|
||||
|
||||
@@ -383,3 +383,204 @@ func TestBeaconState_AppendValidator_DoesntMutateCopy(t *testing.T) {
|
||||
_, ok := st1.ValidatorIndexByPubkey(bytesutil.ToBytes48(val.PublicKey))
|
||||
assert.Equal(t, false, ok, "Expected no validator index to be present in st1 for the newly inserted pubkey")
|
||||
}
|
||||
|
||||
func TestBeaconState_ValidatorMutation_Phase0(t *testing.T) {
|
||||
testState, _ := util.DeterministicGenesisState(t, 400)
|
||||
pbState, err := statenative.ProtobufBeaconStatePhase0(testState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
testState, err = statenative.InitializeFromProtoPhase0(pbState)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = testState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Reset tries
|
||||
require.NoError(t, testState.UpdateValidatorAtIndex(200, new(ethpb.Validator)))
|
||||
_, err = testState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
newState1 := testState.Copy()
|
||||
_ = testState.Copy()
|
||||
|
||||
require.NoError(t, testState.UpdateValidatorAtIndex(15, ðpb.Validator{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: 1111,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: 1112,
|
||||
ActivationEpoch: 1114,
|
||||
ExitEpoch: 1116,
|
||||
WithdrawableEpoch: 1117,
|
||||
}))
|
||||
|
||||
rt, err := testState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
pbState, err = statenative.ProtobufBeaconStatePhase0(testState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
|
||||
copiedTestState, err := statenative.InitializeFromProtoPhase0(pbState)
|
||||
require.NoError(t, err)
|
||||
|
||||
rt2, err := copiedTestState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, rt, rt2)
|
||||
|
||||
require.NoError(t, newState1.UpdateValidatorAtIndex(150, ðpb.Validator{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: 2111,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: 2112,
|
||||
ActivationEpoch: 2114,
|
||||
ExitEpoch: 2116,
|
||||
WithdrawableEpoch: 2117,
|
||||
}))
|
||||
|
||||
rt, err = newState1.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
pbState, err = statenative.ProtobufBeaconStatePhase0(newState1.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
|
||||
copiedTestState, err = statenative.InitializeFromProtoPhase0(pbState)
|
||||
require.NoError(t, err)
|
||||
|
||||
rt2, err = copiedTestState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, rt, rt2)
|
||||
}
|
||||
|
||||
func TestBeaconState_ValidatorMutation_Altair(t *testing.T) {
|
||||
testState, _ := util.DeterministicGenesisStateAltair(t, 400)
|
||||
pbState, err := statenative.ProtobufBeaconStateAltair(testState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
testState, err = statenative.InitializeFromProtoAltair(pbState)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = testState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Reset tries
|
||||
require.NoError(t, testState.UpdateValidatorAtIndex(200, new(ethpb.Validator)))
|
||||
_, err = testState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
newState1 := testState.Copy()
|
||||
_ = testState.Copy()
|
||||
|
||||
require.NoError(t, testState.UpdateValidatorAtIndex(15, ðpb.Validator{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: 1111,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: 1112,
|
||||
ActivationEpoch: 1114,
|
||||
ExitEpoch: 1116,
|
||||
WithdrawableEpoch: 1117,
|
||||
}))
|
||||
|
||||
rt, err := testState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
pbState, err = statenative.ProtobufBeaconStateAltair(testState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
|
||||
copiedTestState, err := statenative.InitializeFromProtoAltair(pbState)
|
||||
require.NoError(t, err)
|
||||
|
||||
rt2, err := copiedTestState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, rt, rt2)
|
||||
|
||||
require.NoError(t, newState1.UpdateValidatorAtIndex(150, ðpb.Validator{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: 2111,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: 2112,
|
||||
ActivationEpoch: 2114,
|
||||
ExitEpoch: 2116,
|
||||
WithdrawableEpoch: 2117,
|
||||
}))
|
||||
|
||||
rt, err = newState1.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
pbState, err = statenative.ProtobufBeaconStateAltair(newState1.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
|
||||
copiedTestState, err = statenative.InitializeFromProtoAltair(pbState)
|
||||
require.NoError(t, err)
|
||||
|
||||
rt2, err = copiedTestState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, rt, rt2)
|
||||
}
|
||||
|
||||
func TestBeaconState_ValidatorMutation_Bellatrix(t *testing.T) {
|
||||
testState, _ := util.DeterministicGenesisStateBellatrix(t, 400)
|
||||
pbState, err := statenative.ProtobufBeaconStateBellatrix(testState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
testState, err = statenative.InitializeFromProtoBellatrix(pbState)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = testState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Reset tries
|
||||
require.NoError(t, testState.UpdateValidatorAtIndex(200, new(ethpb.Validator)))
|
||||
_, err = testState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
newState1 := testState.Copy()
|
||||
_ = testState.Copy()
|
||||
|
||||
require.NoError(t, testState.UpdateValidatorAtIndex(15, ðpb.Validator{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: 1111,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: 1112,
|
||||
ActivationEpoch: 1114,
|
||||
ExitEpoch: 1116,
|
||||
WithdrawableEpoch: 1117,
|
||||
}))
|
||||
|
||||
rt, err := testState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
pbState, err = statenative.ProtobufBeaconStateBellatrix(testState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
|
||||
copiedTestState, err := statenative.InitializeFromProtoBellatrix(pbState)
|
||||
require.NoError(t, err)
|
||||
|
||||
rt2, err := copiedTestState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, rt, rt2)
|
||||
|
||||
require.NoError(t, newState1.UpdateValidatorAtIndex(150, ðpb.Validator{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: 2111,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: 2112,
|
||||
ActivationEpoch: 2114,
|
||||
ExitEpoch: 2116,
|
||||
WithdrawableEpoch: 2117,
|
||||
}))
|
||||
|
||||
rt, err = newState1.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
pbState, err = statenative.ProtobufBeaconStateBellatrix(newState1.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
|
||||
copiedTestState, err = statenative.InitializeFromProtoBellatrix(pbState)
|
||||
require.NoError(t, err)
|
||||
|
||||
rt2, err = copiedTestState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, rt, rt2)
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package v1_test
|
||||
|
||||
|
||||
@@ -390,17 +390,31 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex)
|
||||
|
||||
func (b *BeaconState) recomputeFieldTrie(index types.FieldIndex, elements interface{}) ([32]byte, error) {
|
||||
fTrie := b.stateFieldLeaves[index]
|
||||
fTrieMutex := fTrie.RWMutex
|
||||
// We can't lock the trie directly because the trie's variable gets reassigned,
|
||||
// and therefore we would call Unlock() on a different object.
|
||||
fTrieMutex := fTrie.RWMutex
|
||||
if fTrie.FieldReference().Refs() > 1 {
|
||||
fTrieMutex.Lock()
|
||||
fTrieMutex.Lock()
|
||||
|
||||
if fTrie.Empty() {
|
||||
err := b.resetFieldTrie(index, elements, fTrie.Length())
|
||||
if err != nil {
|
||||
fTrieMutex.Unlock()
|
||||
return [32]byte{}, err
|
||||
}
|
||||
// Reduce reference count as we are instantiating a new trie.
|
||||
fTrie.FieldReference().MinusRef()
|
||||
newTrie := fTrie.CopyTrie()
|
||||
fTrieMutex.Unlock()
|
||||
return b.stateFieldLeaves[index].TrieRoot()
|
||||
}
|
||||
|
||||
if fTrie.FieldReference().Refs() > 1 {
|
||||
fTrie.FieldReference().MinusRef()
|
||||
newTrie := fTrie.TransferTrie()
|
||||
b.stateFieldLeaves[index] = newTrie
|
||||
fTrie = newTrie
|
||||
fTrieMutex.Unlock()
|
||||
}
|
||||
fTrieMutex.Unlock()
|
||||
|
||||
// remove duplicate indexes
|
||||
b.dirtyIndices[index] = slice.SetUint64(b.dirtyIndices[index])
|
||||
// sort indexes again
|
||||
|
||||
@@ -269,3 +269,70 @@ func TestBeaconState_AppendValidator_DoesntMutateCopy(t *testing.T) {
|
||||
_, ok := st1.ValidatorIndexByPubkey(bytesutil.ToBytes48(val.PublicKey))
|
||||
assert.Equal(t, false, ok, "Expected no validator index to be present in st1 for the newly inserted pubkey")
|
||||
}
|
||||
|
||||
func TestBeaconState_ValidatorMutation_Phase0(t *testing.T) {
|
||||
testState, _ := util.DeterministicGenesisState(t, 400)
|
||||
pbState, err := v1.ProtobufBeaconState(testState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
testState, err = v1.InitializeFromProto(pbState)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = testState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Reset tries
|
||||
require.NoError(t, testState.UpdateValidatorAtIndex(200, new(ethpb.Validator)))
|
||||
_, err = testState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
newState1 := testState.Copy()
|
||||
_ = testState.Copy()
|
||||
|
||||
require.NoError(t, testState.UpdateValidatorAtIndex(15, ðpb.Validator{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: 1111,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: 1112,
|
||||
ActivationEpoch: 1114,
|
||||
ExitEpoch: 1116,
|
||||
WithdrawableEpoch: 1117,
|
||||
}))
|
||||
|
||||
rt, err := testState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
pbState, err = v1.ProtobufBeaconState(testState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
|
||||
copiedTestState, err := v1.InitializeFromProto(pbState)
|
||||
require.NoError(t, err)
|
||||
|
||||
rt2, err := copiedTestState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, rt, rt2)
|
||||
|
||||
require.NoError(t, newState1.UpdateValidatorAtIndex(150, ðpb.Validator{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: 2111,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: 2112,
|
||||
ActivationEpoch: 2114,
|
||||
ExitEpoch: 2116,
|
||||
WithdrawableEpoch: 2117,
|
||||
}))
|
||||
|
||||
rt, err = newState1.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
pbState, err = v1.ProtobufBeaconState(newState1.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
|
||||
copiedTestState, err = v1.InitializeFromProto(pbState)
|
||||
require.NoError(t, err)
|
||||
|
||||
rt2, err = copiedTestState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, rt, rt2)
|
||||
}
|
||||
|
||||
@@ -71,6 +71,7 @@ go_test(
|
||||
"references_test.go",
|
||||
"setters_test.go",
|
||||
"state_fuzz_test.go",
|
||||
"state_test.go",
|
||||
"state_trie_test.go",
|
||||
],
|
||||
data = glob(["testdata/**"]),
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package v2_test
|
||||
|
||||
|
||||
79
beacon-chain/state/v2/state_test.go
Normal file
79
beacon-chain/state/v2/state_test.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package v2_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
)
|
||||
|
||||
func TestBeaconState_ValidatorMutation_Altair(t *testing.T) {
|
||||
testState, _ := util.DeterministicGenesisStateAltair(t, 400)
|
||||
pbState, err := v2.ProtobufBeaconState(testState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
testState, err = v2.InitializeFromProto(pbState)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = testState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Reset tries
|
||||
require.NoError(t, testState.UpdateValidatorAtIndex(200, new(ethpb.Validator)))
|
||||
_, err = testState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
newState1 := testState.Copy()
|
||||
_ = testState.Copy()
|
||||
|
||||
require.NoError(t, testState.UpdateValidatorAtIndex(15, ðpb.Validator{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: 1111,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: 1112,
|
||||
ActivationEpoch: 1114,
|
||||
ExitEpoch: 1116,
|
||||
WithdrawableEpoch: 1117,
|
||||
}))
|
||||
|
||||
rt, err := testState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
pbState, err = v2.ProtobufBeaconState(testState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
|
||||
copiedTestState, err := v2.InitializeFromProto(pbState)
|
||||
require.NoError(t, err)
|
||||
|
||||
rt2, err := copiedTestState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, rt, rt2)
|
||||
|
||||
require.NoError(t, newState1.UpdateValidatorAtIndex(150, ðpb.Validator{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: 2111,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: 2112,
|
||||
ActivationEpoch: 2114,
|
||||
ExitEpoch: 2116,
|
||||
WithdrawableEpoch: 2117,
|
||||
}))
|
||||
|
||||
rt, err = newState1.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
pbState, err = v2.ProtobufBeaconState(newState1.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
|
||||
copiedTestState, err = v2.InitializeFromProto(pbState)
|
||||
require.NoError(t, err)
|
||||
|
||||
rt2, err = copiedTestState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, rt, rt2)
|
||||
}
|
||||
@@ -378,17 +378,31 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex)
|
||||
|
||||
func (b *BeaconState) recomputeFieldTrie(index types.FieldIndex, elements interface{}) ([32]byte, error) {
|
||||
fTrie := b.stateFieldLeaves[index]
|
||||
fTrieMutex := fTrie.RWMutex
|
||||
// We can't lock the trie directly because the trie's variable gets reassigned,
|
||||
// and therefore we would call Unlock() on a different object.
|
||||
fTrieMutex := fTrie.RWMutex
|
||||
if fTrie.FieldReference().Refs() > 1 {
|
||||
fTrieMutex.Lock()
|
||||
fTrieMutex.Lock()
|
||||
|
||||
if fTrie.Empty() {
|
||||
err := b.resetFieldTrie(index, elements, fTrie.Length())
|
||||
if err != nil {
|
||||
fTrieMutex.Unlock()
|
||||
return [32]byte{}, err
|
||||
}
|
||||
// Reduce reference count as we are instantiating a new trie.
|
||||
fTrie.FieldReference().MinusRef()
|
||||
newTrie := fTrie.CopyTrie()
|
||||
fTrieMutex.Unlock()
|
||||
return b.stateFieldLeaves[index].TrieRoot()
|
||||
}
|
||||
|
||||
if fTrie.FieldReference().Refs() > 1 {
|
||||
fTrie.FieldReference().MinusRef()
|
||||
newTrie := fTrie.TransferTrie()
|
||||
b.stateFieldLeaves[index] = newTrie
|
||||
fTrie = newTrie
|
||||
fTrieMutex.Unlock()
|
||||
}
|
||||
fTrieMutex.Unlock()
|
||||
|
||||
// remove duplicate indexes
|
||||
b.dirtyIndices[index] = slice.SetUint64(b.dirtyIndices[index])
|
||||
// sort indexes again
|
||||
|
||||
@@ -73,6 +73,7 @@ go_test(
|
||||
"references_test.go",
|
||||
"setters_test.go",
|
||||
"state_fuzz_test.go",
|
||||
"state_test.go",
|
||||
"state_trie_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package v3_test
|
||||
|
||||
|
||||
79
beacon-chain/state/v3/state_test.go
Normal file
79
beacon-chain/state/v3/state_test.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package v3_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
v3 "github.com/prysmaticlabs/prysm/beacon-chain/state/v3"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
)
|
||||
|
||||
func TestBeaconState_ValidatorMutation_Bellatrix(t *testing.T) {
|
||||
testState, _ := util.DeterministicGenesisStateBellatrix(t, 400)
|
||||
pbState, err := v3.ProtobufBeaconState(testState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
testState, err = v3.InitializeFromProto(pbState)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = testState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Reset tries
|
||||
require.NoError(t, testState.UpdateValidatorAtIndex(200, new(ethpb.Validator)))
|
||||
_, err = testState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
newState1 := testState.Copy()
|
||||
_ = testState.Copy()
|
||||
|
||||
require.NoError(t, testState.UpdateValidatorAtIndex(15, ðpb.Validator{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: 1111,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: 1112,
|
||||
ActivationEpoch: 1114,
|
||||
ExitEpoch: 1116,
|
||||
WithdrawableEpoch: 1117,
|
||||
}))
|
||||
|
||||
rt, err := testState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
pbState, err = v3.ProtobufBeaconState(testState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
|
||||
copiedTestState, err := v3.InitializeFromProtoUnsafe(pbState)
|
||||
require.NoError(t, err)
|
||||
|
||||
rt2, err := copiedTestState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, rt, rt2)
|
||||
|
||||
require.NoError(t, newState1.UpdateValidatorAtIndex(150, ðpb.Validator{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: 2111,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: 2112,
|
||||
ActivationEpoch: 2114,
|
||||
ExitEpoch: 2116,
|
||||
WithdrawableEpoch: 2117,
|
||||
}))
|
||||
|
||||
rt, err = newState1.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
pbState, err = v3.ProtobufBeaconState(newState1.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
|
||||
copiedTestState, err = v3.InitializeFromProto(pbState)
|
||||
require.NoError(t, err)
|
||||
|
||||
rt2, err = copiedTestState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, rt, rt2)
|
||||
}
|
||||
@@ -376,14 +376,31 @@ func (b *BeaconState) rootSelector(field types.FieldIndex) ([32]byte, error) {
|
||||
|
||||
func (b *BeaconState) recomputeFieldTrie(index types.FieldIndex, elements interface{}) ([32]byte, error) {
|
||||
fTrie := b.stateFieldLeaves[index]
|
||||
if fTrie.FieldReference().Refs() > 1 {
|
||||
fTrie.Lock()
|
||||
defer fTrie.Unlock()
|
||||
fTrieMutex := fTrie.RWMutex
|
||||
// We can't lock the trie directly because the trie's variable gets reassigned,
|
||||
// and therefore we would call Unlock() on a different object.
|
||||
fTrieMutex.Lock()
|
||||
|
||||
if fTrie.Empty() {
|
||||
err := b.resetFieldTrie(index, elements, fTrie.Length())
|
||||
if err != nil {
|
||||
fTrieMutex.Unlock()
|
||||
return [32]byte{}, err
|
||||
}
|
||||
// Reduce reference count as we are instantiating a new trie.
|
||||
fTrie.FieldReference().MinusRef()
|
||||
newTrie := fTrie.CopyTrie()
|
||||
fTrieMutex.Unlock()
|
||||
return b.stateFieldLeaves[index].TrieRoot()
|
||||
}
|
||||
|
||||
if fTrie.FieldReference().Refs() > 1 {
|
||||
fTrie.FieldReference().MinusRef()
|
||||
newTrie := fTrie.TransferTrie()
|
||||
b.stateFieldLeaves[index] = newTrie
|
||||
fTrie = newTrie
|
||||
}
|
||||
fTrieMutex.Unlock()
|
||||
|
||||
// remove duplicate indexes
|
||||
b.dirtyIndices[index] = slice.SetUint64(b.dirtyIndices[index])
|
||||
// sort indexes again
|
||||
|
||||
@@ -11,7 +11,9 @@ go_library(
|
||||
deps = [
|
||||
"//api/client/beacon:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/api/client/beacon"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// APIInitializer manages initializing the beacon node using checkpoint sync, retrieving the checkpoint state and root
|
||||
@@ -27,6 +29,15 @@ func NewAPIInitializer(beaconNodeHost string) (*APIInitializer, error) {
|
||||
// Initialize downloads origin state and block for checkpoint sync and initializes database records to
|
||||
// prepare the node to begin syncing from that point.
|
||||
func (dl *APIInitializer) Initialize(ctx context.Context, d db.Database) error {
|
||||
origin, err := d.OriginCheckpointBlockRoot(ctx)
|
||||
if err == nil && origin != params.BeaconConfig().ZeroHash {
|
||||
log.Warnf("origin checkpoint root %#x found in db, ignoring checkpoint sync flags", origin)
|
||||
return nil
|
||||
} else {
|
||||
if !errors.Is(err, db.ErrNotFound) {
|
||||
return errors.Wrap(err, "error while checking database for origin root")
|
||||
}
|
||||
}
|
||||
od, err := beacon.DownloadFinalizedData(ctx, dl.c)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error retrieving checkpoint origin state and block")
|
||||
|
||||
@@ -5,10 +5,11 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/io/file"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/io/file"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Initializer describes a type that is able to obtain the checkpoint sync data (BeaconState and SignedBeaconBlock)
|
||||
@@ -42,6 +43,15 @@ type FileInitializer struct {
|
||||
// Initialize is called in the BeaconNode db startup code if an Initializer is present.
|
||||
// Initialize does what is needed to prepare the beacon node database for syncing from the weak subjectivity checkpoint.
|
||||
func (fi *FileInitializer) Initialize(ctx context.Context, d db.Database) error {
|
||||
origin, err := d.OriginCheckpointBlockRoot(ctx)
|
||||
if err == nil && origin != params.BeaconConfig().ZeroHash {
|
||||
log.Warnf("origin checkpoint root %#x found in db, ignoring checkpoint sync flags", origin)
|
||||
return nil
|
||||
} else {
|
||||
if !errors.Is(err, db.ErrNotFound) {
|
||||
return errors.Wrap(err, "error while checking database for origin root")
|
||||
}
|
||||
}
|
||||
serBlock, err := file.ReadFileAsBytes(fi.blockPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading block file %s for checkpoint sync init", fi.blockPath)
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build fuzz
|
||||
// +build fuzz
|
||||
|
||||
package sync
|
||||
|
||||
|
||||
@@ -13,5 +13,6 @@ go_library(
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -3,6 +3,8 @@ package genesis
|
||||
import (
|
||||
"context"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/api/client/beacon"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
@@ -27,6 +29,18 @@ func NewAPIInitializer(beaconNodeHost string) (*APIInitializer, error) {
|
||||
// Initialize downloads origin state and block for checkpoint sync and initializes database records to
|
||||
// prepare the node to begin syncing from that point.
|
||||
func (dl *APIInitializer) Initialize(ctx context.Context, d db.Database) error {
|
||||
existing, err := d.GenesisState(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if existing != nil && !existing.IsNil() {
|
||||
htr, err := existing.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error while computing hash_tree_root of existing genesis state")
|
||||
}
|
||||
log.Warnf("database contains genesis with htr=%#x, ignoring remote genesis state parameter", htr)
|
||||
return nil
|
||||
}
|
||||
sb, err := dl.c.GetState(ctx, beacon.IdGenesis)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error retrieving genesis state from %s", dl.c.NodeURL())
|
||||
|
||||
@@ -187,22 +187,8 @@ func (q *blocksQueue) loop() {
|
||||
ticker := time.NewTicker(pollingInterval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
// Check highest expected slot when we approach chain's head slot.
|
||||
if q.chain.HeadSlot() >= q.highestExpectedSlot {
|
||||
// By the time initial sync is complete, highest slot may increase, re-check.
|
||||
if q.mode == modeStopOnFinalizedEpoch {
|
||||
if q.highestExpectedSlot < q.blocksFetcher.bestFinalizedSlot() {
|
||||
q.highestExpectedSlot = q.blocksFetcher.bestFinalizedSlot()
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if q.highestExpectedSlot < q.blocksFetcher.bestNonFinalizedSlot() {
|
||||
q.highestExpectedSlot = q.blocksFetcher.bestNonFinalizedSlot()
|
||||
continue
|
||||
}
|
||||
}
|
||||
log.WithField("slot", q.highestExpectedSlot).Debug("Highest expected slot reached")
|
||||
q.cancel()
|
||||
if waitHighestExpectedSlot(q) {
|
||||
continue
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -277,6 +263,27 @@ func (q *blocksQueue) loop() {
|
||||
}
|
||||
}
|
||||
|
||||
func waitHighestExpectedSlot(q *blocksQueue) bool {
|
||||
// Check highest expected slot when we approach chain's head slot.
|
||||
if q.chain.HeadSlot() >= q.highestExpectedSlot {
|
||||
// By the time initial sync is complete, highest slot may increase, re-check.
|
||||
if q.mode == modeStopOnFinalizedEpoch {
|
||||
if q.highestExpectedSlot < q.blocksFetcher.bestFinalizedSlot() {
|
||||
q.highestExpectedSlot = q.blocksFetcher.bestFinalizedSlot()
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
if q.highestExpectedSlot < q.blocksFetcher.bestNonFinalizedSlot() {
|
||||
q.highestExpectedSlot = q.blocksFetcher.bestNonFinalizedSlot()
|
||||
return true
|
||||
}
|
||||
}
|
||||
log.WithField("slot", q.highestExpectedSlot).Debug("Highest expected slot reached")
|
||||
q.cancel()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// onScheduleEvent is an event called on newly arrived epochs. Transforms state to scheduled.
|
||||
func (q *blocksQueue) onScheduleEvent(ctx context.Context) eventHandlerFn {
|
||||
return func(m *stateMachine, in interface{}) (stateID, error) {
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition/interop"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -37,14 +36,6 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if !features.Get().CorrectlyPruneCanonicalAtts {
|
||||
// Delete attestations from the block in the pool to avoid inclusion in future block.
|
||||
if err := s.deleteAttsInPool(block.Body().Attestations()); err != nil {
|
||||
log.Debugf("Could not delete attestations in pool: %v", err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package sync
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
srcs = [
|
||||
"bazel.go",
|
||||
"data_path.go",
|
||||
"non_bazel.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/build/bazel",
|
||||
visibility = ["//visibility:public"],
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
// licenses/APL.txt.
|
||||
|
||||
//go:build bazel
|
||||
// +build bazel
|
||||
|
||||
package bazel
|
||||
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
// licenses/APL.txt.
|
||||
|
||||
//go:build !bazel
|
||||
// +build !bazel
|
||||
|
||||
package bazel
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
"accounts.go",
|
||||
"backup.go",
|
||||
"delete.go",
|
||||
"exit.go",
|
||||
"list.go",
|
||||
"wallet_utils.go",
|
||||
],
|
||||
@@ -34,6 +35,7 @@ go_test(
|
||||
srcs = [
|
||||
"backup_test.go",
|
||||
"delete_test.go",
|
||||
"exit_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
@@ -42,7 +44,9 @@ go_test(
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/mock:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//validator/accounts:go_default_library",
|
||||
@@ -52,8 +56,10 @@ go_test(
|
||||
"//validator/keymanager/derived:go_default_library",
|
||||
"//validator/keymanager/local:go_default_library",
|
||||
"//validator/testing:go_default_library",
|
||||
"@com_github_golang_mock//gomock:go_default_library",
|
||||
"@com_github_google_uuid//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@com_github_wealdtech_go_eth2_wallet_encryptor_keystorev4//:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -30,6 +30,7 @@ var Commands = &cli.Command{
|
||||
features.Mainnet,
|
||||
features.PraterTestnet,
|
||||
features.RopstenTestnet,
|
||||
features.SepoliaTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
}),
|
||||
Before: func(cliCtx *cli.Context) error {
|
||||
@@ -39,10 +40,7 @@ var Commands = &cli.Command{
|
||||
if err := tos.VerifyTosAcceptedOrPrompt(cliCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := features.ConfigureValidator(cliCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return features.ConfigureValidator(cliCtx)
|
||||
},
|
||||
Action: func(cliCtx *cli.Context) error {
|
||||
if err := accountsDelete(cliCtx); err != nil {
|
||||
@@ -69,6 +67,7 @@ var Commands = &cli.Command{
|
||||
features.Mainnet,
|
||||
features.PraterTestnet,
|
||||
features.RopstenTestnet,
|
||||
features.SepoliaTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
}),
|
||||
Before: func(cliCtx *cli.Context) error {
|
||||
@@ -78,10 +77,7 @@ var Commands = &cli.Command{
|
||||
if err := tos.VerifyTosAcceptedOrPrompt(cliCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := features.ConfigureValidator(cliCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return features.ConfigureValidator(cliCtx)
|
||||
},
|
||||
Action: func(cliCtx *cli.Context) error {
|
||||
if err := accountsList(cliCtx); err != nil {
|
||||
@@ -105,6 +101,7 @@ var Commands = &cli.Command{
|
||||
features.Mainnet,
|
||||
features.PraterTestnet,
|
||||
features.RopstenTestnet,
|
||||
features.SepoliaTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
}),
|
||||
Before: func(cliCtx *cli.Context) error {
|
||||
@@ -114,10 +111,7 @@ var Commands = &cli.Command{
|
||||
if err := tos.VerifyTosAcceptedOrPrompt(cliCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := features.ConfigureValidator(cliCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return features.ConfigureValidator(cliCtx)
|
||||
},
|
||||
Action: func(cliCtx *cli.Context) error {
|
||||
if err := accountsBackup(cliCtx); err != nil {
|
||||
@@ -138,6 +132,7 @@ var Commands = &cli.Command{
|
||||
features.Mainnet,
|
||||
features.PraterTestnet,
|
||||
features.RopstenTestnet,
|
||||
features.SepoliaTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
}),
|
||||
Before: func(cliCtx *cli.Context) error {
|
||||
@@ -174,19 +169,20 @@ var Commands = &cli.Command{
|
||||
features.Mainnet,
|
||||
features.PraterTestnet,
|
||||
features.RopstenTestnet,
|
||||
features.SepoliaTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
}),
|
||||
Before: func(cliCtx *cli.Context) error {
|
||||
if err := cmd.LoadFlagsFromConfig(cliCtx, cliCtx.Command.Flags); err != nil {
|
||||
return err
|
||||
}
|
||||
return tos.VerifyTosAcceptedOrPrompt(cliCtx)
|
||||
},
|
||||
Action: func(cliCtx *cli.Context) error {
|
||||
if err := features.ConfigureValidator(cliCtx); err != nil {
|
||||
if err := tos.VerifyTosAcceptedOrPrompt(cliCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := accounts.ExitAccountsCli(cliCtx, os.Stdin); err != nil {
|
||||
return features.ConfigureValidator(cliCtx)
|
||||
},
|
||||
Action: func(cliCtx *cli.Context) error {
|
||||
if err := accountsExit(cliCtx, os.Stdin); err != nil {
|
||||
log.Fatalf("Could not perform voluntary exit: %v", err)
|
||||
}
|
||||
return nil
|
||||
|
||||
57
cmd/validator/accounts/exit.go
Normal file
57
cmd/validator/accounts/exit.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package accounts
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/cmd"
|
||||
"github.com/prysmaticlabs/prysm/cmd/validator/flags"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts"
|
||||
"github.com/prysmaticlabs/prysm/validator/client"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func accountsExit(c *cli.Context, r io.Reader) error {
|
||||
w, km, err := walletWithKeymanager(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dialOpts := client.ConstructDialOptions(
|
||||
c.Int(cmd.GrpcMaxCallRecvMsgSizeFlag.Name),
|
||||
c.String(flags.CertFlag.Name),
|
||||
c.Uint(flags.GrpcRetriesFlag.Name),
|
||||
c.Duration(flags.GrpcRetryDelayFlag.Name),
|
||||
)
|
||||
grpcHeaders := strings.Split(c.String(flags.GrpcHeadersFlag.Name), ",")
|
||||
|
||||
opts := []accounts.Option{
|
||||
accounts.WithWallet(w),
|
||||
accounts.WithKeymanager(km),
|
||||
accounts.WithGRPCDialOpts(dialOpts),
|
||||
accounts.WithBeaconRPCProvider(c.String(flags.BeaconRPCProviderFlag.Name)),
|
||||
accounts.WithGRPCHeaders(grpcHeaders),
|
||||
}
|
||||
|
||||
// Get full set of public keys from the keymanager.
|
||||
validatingPublicKeys, err := km.FetchValidatingPublicKeys(c.Context)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(validatingPublicKeys) == 0 {
|
||||
return errors.New("wallet is empty, no accounts to delete")
|
||||
}
|
||||
// Filter keys either from CLI flag or from interactive session.
|
||||
rawPubKey, formattedPubKeys, err := accounts.FilterExitAccountsFromUserInput(c, r, validatingPublicKeys)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not filter public keys for deletion")
|
||||
}
|
||||
opts = append(opts, accounts.WithRawPubKeys(rawPubKey))
|
||||
opts = append(opts, accounts.WithFormattedPubKeys(formattedPubKeys))
|
||||
|
||||
acc, err := accounts.NewCLIManager(opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return acc.Exit(c.Context)
|
||||
}
|
||||
217
cmd/validator/accounts/exit_test.go
Normal file
217
cmd/validator/accounts/exit_test.go
Normal file
@@ -0,0 +1,217 @@
|
||||
package accounts
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
mock2 "github.com/prysmaticlabs/prysm/testing/mock"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts/wallet"
|
||||
"github.com/prysmaticlabs/prysm/validator/keymanager"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
func TestExitAccountsCli_OK(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
mockValidatorClient := mock2.NewMockBeaconNodeValidatorClient(ctrl)
|
||||
mockNodeClient := mock2.NewMockNodeClient(ctrl)
|
||||
|
||||
mockValidatorClient.EXPECT().
|
||||
ValidatorIndex(gomock.Any(), gomock.Any()).
|
||||
Return(ðpb.ValidatorIndexResponse{Index: 1}, nil)
|
||||
|
||||
// Any time in the past will suffice
|
||||
genesisTime := ×tamppb.Timestamp{
|
||||
Seconds: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC).Unix(),
|
||||
}
|
||||
|
||||
mockNodeClient.EXPECT().
|
||||
GetGenesis(gomock.Any(), gomock.Any()).
|
||||
Return(ðpb.Genesis{GenesisTime: genesisTime}, nil)
|
||||
|
||||
mockValidatorClient.EXPECT().
|
||||
DomainData(gomock.Any(), gomock.Any()).
|
||||
Return(ðpb.DomainResponse{SignatureDomain: make([]byte, 32)}, nil)
|
||||
|
||||
mockValidatorClient.EXPECT().
|
||||
ProposeExit(gomock.Any(), gomock.AssignableToTypeOf(ðpb.SignedVoluntaryExit{})).
|
||||
Return(ðpb.ProposeExitResponse{}, nil)
|
||||
|
||||
walletDir, _, passwordFilePath := setupWalletAndPasswordsDir(t)
|
||||
// Write a directory where we will import keys from.
|
||||
keysDir := filepath.Join(t.TempDir(), "keysDir")
|
||||
require.NoError(t, os.MkdirAll(keysDir, os.ModePerm))
|
||||
|
||||
// Create keystore file in the keys directory we can then import from in our wallet.
|
||||
keystore, _ := createKeystore(t, keysDir)
|
||||
time.Sleep(time.Second)
|
||||
|
||||
// We initialize a wallet with a local keymanager.
|
||||
cliCtx := setupWalletCtx(t, &testWalletConfig{
|
||||
// Wallet configuration flags.
|
||||
walletDir: walletDir,
|
||||
keymanagerKind: keymanager.Local,
|
||||
walletPasswordFile: passwordFilePath,
|
||||
accountPasswordFile: passwordFilePath,
|
||||
// Flag required for ImportAccounts to work.
|
||||
keysDir: keysDir,
|
||||
// Flag required for ExitAccounts to work.
|
||||
voluntaryExitPublicKeys: keystore.Pubkey,
|
||||
})
|
||||
_, err := accounts.CreateWalletWithKeymanager(cliCtx.Context, &accounts.CreateWalletConfig{
|
||||
WalletCfg: &wallet.Config{
|
||||
WalletDir: walletDir,
|
||||
KeymanagerKind: keymanager.Local,
|
||||
WalletPassword: password,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, accounts.ImportAccountsCli(cliCtx))
|
||||
|
||||
_, keymanager, err := walletWithKeymanager(cliCtx)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, keymanager)
|
||||
|
||||
validatingPublicKeys, err := keymanager.FetchValidatingPublicKeys(cliCtx.Context)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, validatingPublicKeys)
|
||||
|
||||
// Prepare user input for final confirmation step
|
||||
var stdin bytes.Buffer
|
||||
stdin.Write([]byte(accounts.ExitPassphrase))
|
||||
rawPubKeys, formattedPubKeys, err := accounts.FilterExitAccountsFromUserInput(
|
||||
cliCtx, &stdin, validatingPublicKeys,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, rawPubKeys)
|
||||
require.NotNil(t, formattedPubKeys)
|
||||
|
||||
cfg := accounts.PerformExitCfg{
|
||||
ValidatorClient: mockValidatorClient,
|
||||
NodeClient: mockNodeClient,
|
||||
Keymanager: keymanager,
|
||||
RawPubKeys: rawPubKeys,
|
||||
FormattedPubKeys: formattedPubKeys,
|
||||
}
|
||||
rawExitedKeys, formattedExitedKeys, err := accounts.PerformVoluntaryExit(cliCtx.Context, cfg)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(rawExitedKeys))
|
||||
assert.DeepEqual(t, rawPubKeys[0], rawExitedKeys[0])
|
||||
require.Equal(t, 1, len(formattedExitedKeys))
|
||||
assert.Equal(t, "0x"+keystore.Pubkey[:12], formattedExitedKeys[0])
|
||||
}
|
||||
|
||||
func TestExitAccountsCli_OK_AllPublicKeys(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
mockValidatorClient := mock2.NewMockBeaconNodeValidatorClient(ctrl)
|
||||
mockNodeClient := mock2.NewMockNodeClient(ctrl)
|
||||
|
||||
mockValidatorClient.EXPECT().
|
||||
ValidatorIndex(gomock.Any(), gomock.Any()).
|
||||
Return(ðpb.ValidatorIndexResponse{Index: 0}, nil)
|
||||
|
||||
mockValidatorClient.EXPECT().
|
||||
ValidatorIndex(gomock.Any(), gomock.Any()).
|
||||
Return(ðpb.ValidatorIndexResponse{Index: 1}, nil)
|
||||
|
||||
// Any time in the past will suffice
|
||||
genesisTime := ×tamppb.Timestamp{
|
||||
Seconds: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC).Unix(),
|
||||
}
|
||||
|
||||
mockNodeClient.EXPECT().
|
||||
GetGenesis(gomock.Any(), gomock.Any()).
|
||||
Times(2).
|
||||
Return(ðpb.Genesis{GenesisTime: genesisTime}, nil)
|
||||
|
||||
mockValidatorClient.EXPECT().
|
||||
DomainData(gomock.Any(), gomock.Any()).
|
||||
Times(2).
|
||||
Return(ðpb.DomainResponse{SignatureDomain: make([]byte, 32)}, nil)
|
||||
|
||||
mockValidatorClient.EXPECT().
|
||||
ProposeExit(gomock.Any(), gomock.AssignableToTypeOf(ðpb.SignedVoluntaryExit{})).
|
||||
Times(2).
|
||||
Return(ðpb.ProposeExitResponse{}, nil)
|
||||
|
||||
walletDir, _, passwordFilePath := setupWalletAndPasswordsDir(t)
|
||||
// Write a directory where we will import keys from.
|
||||
keysDir := filepath.Join(t.TempDir(), "keysDir")
|
||||
require.NoError(t, os.MkdirAll(keysDir, os.ModePerm))
|
||||
|
||||
// Create keystore file in the keys directory we can then import from in our wallet.
|
||||
keystore1, _ := createKeystore(t, keysDir)
|
||||
time.Sleep(time.Second)
|
||||
keystore2, _ := createKeystore(t, keysDir)
|
||||
time.Sleep(time.Second)
|
||||
|
||||
// We initialize a wallet with a local keymanager.
|
||||
cliCtx := setupWalletCtx(t, &testWalletConfig{
|
||||
// Wallet configuration flags.
|
||||
walletDir: walletDir,
|
||||
keymanagerKind: keymanager.Local,
|
||||
walletPasswordFile: passwordFilePath,
|
||||
accountPasswordFile: passwordFilePath,
|
||||
// Flag required for ImportAccounts to work.
|
||||
keysDir: keysDir,
|
||||
// Exit all public keys.
|
||||
exitAll: true,
|
||||
})
|
||||
_, err := accounts.CreateWalletWithKeymanager(cliCtx.Context, &accounts.CreateWalletConfig{
|
||||
WalletCfg: &wallet.Config{
|
||||
WalletDir: walletDir,
|
||||
KeymanagerKind: keymanager.Local,
|
||||
WalletPassword: password,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, accounts.ImportAccountsCli(cliCtx))
|
||||
|
||||
_, keymanager, err := walletWithKeymanager(cliCtx)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, keymanager)
|
||||
|
||||
validatingPublicKeys, err := keymanager.FetchValidatingPublicKeys(cliCtx.Context)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, validatingPublicKeys)
|
||||
|
||||
// Prepare user input for final confirmation step
|
||||
var stdin bytes.Buffer
|
||||
stdin.Write([]byte(accounts.ExitPassphrase))
|
||||
rawPubKeys, formattedPubKeys, err := accounts.FilterExitAccountsFromUserInput(
|
||||
cliCtx, &stdin, validatingPublicKeys,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, rawPubKeys)
|
||||
require.NotNil(t, formattedPubKeys)
|
||||
|
||||
cfg := accounts.PerformExitCfg{
|
||||
ValidatorClient: mockValidatorClient,
|
||||
NodeClient: mockNodeClient,
|
||||
Keymanager: keymanager,
|
||||
RawPubKeys: rawPubKeys,
|
||||
FormattedPubKeys: formattedPubKeys,
|
||||
}
|
||||
rawExitedKeys, formattedExitedKeys, err := accounts.PerformVoluntaryExit(cliCtx.Context, cfg)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(rawExitedKeys))
|
||||
assert.DeepEqual(t, rawPubKeys, rawExitedKeys)
|
||||
require.Equal(t, 2, len(formattedExitedKeys))
|
||||
wantedFormatted := []string{
|
||||
"0x" + keystore1.Pubkey[:12],
|
||||
"0x" + keystore2.Pubkey[:12],
|
||||
}
|
||||
sort.Strings(wantedFormatted)
|
||||
sort.Strings(formattedExitedKeys)
|
||||
require.DeepEqual(t, wantedFormatted, formattedExitedKeys)
|
||||
}
|
||||
@@ -24,6 +24,7 @@ var Commands = &cli.Command{
|
||||
features.Mainnet,
|
||||
features.PraterTestnet,
|
||||
features.RopstenTestnet,
|
||||
features.SepoliaTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
}),
|
||||
Before: func(cliCtx *cli.Context) error {
|
||||
@@ -51,6 +52,7 @@ var Commands = &cli.Command{
|
||||
features.Mainnet,
|
||||
features.PraterTestnet,
|
||||
features.RopstenTestnet,
|
||||
features.SepoliaTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
}),
|
||||
Before: func(cliCtx *cli.Context) error {
|
||||
|
||||
@@ -36,6 +36,7 @@ var Commands = &cli.Command{
|
||||
features.Mainnet,
|
||||
features.PraterTestnet,
|
||||
features.RopstenTestnet,
|
||||
features.SepoliaTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
}),
|
||||
Before: func(cliCtx *cli.Context) error {
|
||||
@@ -68,6 +69,7 @@ var Commands = &cli.Command{
|
||||
features.Mainnet,
|
||||
features.PraterTestnet,
|
||||
features.RopstenTestnet,
|
||||
features.SepoliaTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
}),
|
||||
Before: func(cliCtx *cli.Context) error {
|
||||
@@ -99,6 +101,7 @@ var Commands = &cli.Command{
|
||||
features.Mainnet,
|
||||
features.PraterTestnet,
|
||||
features.RopstenTestnet,
|
||||
features.SepoliaTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
}),
|
||||
Before: func(cliCtx *cli.Context) error {
|
||||
|
||||
@@ -60,9 +60,6 @@ type Flags struct {
|
||||
// EnableSlashingProtectionPruning for the validator client.
|
||||
EnableSlashingProtectionPruning bool
|
||||
|
||||
// Bug fixes related flags.
|
||||
CorrectlyPruneCanonicalAtts bool
|
||||
|
||||
EnableNativeState bool // EnableNativeState defines whether the beacon state will be represented as a pure Go struct or a Go struct that wraps a proto struct.
|
||||
EnableVectorizedHTR bool // EnableVectorizedHTR specifies whether the beacon state will use the optimized sha256 routines.
|
||||
EnableForkChoiceDoublyLinkedTree bool // EnableForkChoiceDoublyLinkedTree specifies whether fork choice store will use a doubly linked tree.
|
||||
@@ -123,13 +120,15 @@ func configureTestnet(ctx *cli.Context) error {
|
||||
if err := params.SetActive(params.RopstenConfig().Copy()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ctx.Set(enableVecHTR.Names()[0], "true"); err != nil {
|
||||
log.WithError(err).Debug("error enabling vectorized HTR flag")
|
||||
}
|
||||
if err := ctx.Set(enableForkChoiceDoublyLinkedTree.Names()[0], "true"); err != nil {
|
||||
log.WithError(err).Debug("error enabling doubly linked tree forkchoice flag")
|
||||
}
|
||||
applyRopstenFeatureFlags(ctx)
|
||||
params.UseRopstenNetworkConfig()
|
||||
} else if ctx.Bool(SepoliaTestnet.Name) {
|
||||
log.Warn("Running on the Sepolia Beacon Chain Testnet")
|
||||
if err := params.SetActive(params.SepoliaConfig().Copy()); err != nil {
|
||||
return err
|
||||
}
|
||||
applySepoliaFeatureFlags(ctx)
|
||||
params.UseSepoliaNetworkConfig()
|
||||
} else {
|
||||
if ctx.IsSet(cmd.ChainConfigFileFlag.Name) {
|
||||
log.Warn("Running on custom Ethereum network specified in a chain configuration yaml file")
|
||||
@@ -143,6 +142,29 @@ func configureTestnet(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Insert feature flags within the function to be enabled for Ropsten testnet.
|
||||
func applyRopstenFeatureFlags(ctx *cli.Context) {
|
||||
if err := ctx.Set(enableVecHTR.Names()[0], "true"); err != nil {
|
||||
log.WithError(err).Debug("error enabling vectorized HTR flag")
|
||||
}
|
||||
if err := ctx.Set(enableForkChoiceDoublyLinkedTree.Names()[0], "true"); err != nil {
|
||||
log.WithError(err).Debug("error enabling doubly linked tree forkchoice flag")
|
||||
}
|
||||
}
|
||||
|
||||
// Insert feature flags within the function to be enabled for Sepolia testnet.
|
||||
func applySepoliaFeatureFlags(ctx *cli.Context) {
|
||||
if err := ctx.Set(enableVecHTR.Names()[0], "true"); err != nil {
|
||||
log.WithError(err).Debug("error enabling vectorized HTR flag")
|
||||
}
|
||||
if err := ctx.Set(enableForkChoiceDoublyLinkedTree.Names()[0], "true"); err != nil {
|
||||
log.WithError(err).Debug("error enabling doubly linked tree forkchoice flag")
|
||||
}
|
||||
if err := ctx.Set(enableNativeState.Names()[0], "true"); err != nil {
|
||||
log.WithError(err).Debug("error enabling native state flag")
|
||||
}
|
||||
}
|
||||
|
||||
// ConfigureBeaconChain sets the global config based
|
||||
// on what flags are enabled for the beacon-chain client.
|
||||
func ConfigureBeaconChain(ctx *cli.Context) error {
|
||||
@@ -187,11 +209,6 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
|
||||
log.WithField(enableHistoricalSpaceRepresentation.Name, enableHistoricalSpaceRepresentation.Usage).Warn(enabledFeatureFlag)
|
||||
cfg.EnableHistoricalSpaceRepresentation = true
|
||||
}
|
||||
cfg.CorrectlyPruneCanonicalAtts = true
|
||||
if ctx.Bool(disableCorrectlyPruneCanonicalAtts.Name) {
|
||||
logDisabled(disableCorrectlyPruneCanonicalAtts)
|
||||
cfg.CorrectlyPruneCanonicalAtts = false
|
||||
}
|
||||
cfg.EnableNativeState = false
|
||||
if ctx.Bool(enableNativeState.Name) {
|
||||
logEnabled(enableNativeState)
|
||||
|
||||
@@ -105,6 +105,11 @@ var (
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedDisableCorrectlyPruneCanonicalAtts = &cli.BoolFlag{
|
||||
Name: "disable-correctly-prune-canonical-atts",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedDisableCorrectlyInsertOrphanedAtts = &cli.BoolFlag{
|
||||
Name: "disable-correctly-insert-orphaned-atts",
|
||||
Usage: deprecatedUsage,
|
||||
@@ -132,4 +137,5 @@ var deprecatedFlags = []cli.Flag{
|
||||
deprecatedDisableBalanceTrieComputation,
|
||||
deprecatedDisableBatchGossipVerification,
|
||||
deprecatedDisableCorrectlyInsertOrphanedAtts,
|
||||
deprecatedDisableCorrectlyPruneCanonicalAtts,
|
||||
}
|
||||
|
||||
@@ -17,6 +17,11 @@ var (
|
||||
Name: "ropsten",
|
||||
Usage: "Run Prysm configured for the Ropsten beacon chain test network",
|
||||
}
|
||||
// SepoliaTestnet flag for the multiclient Ethereum consensus testnet.
|
||||
SepoliaTestnet = &cli.BoolFlag{
|
||||
Name: "sepolia",
|
||||
Usage: "Run Prysm configured for the Sepolia beacon chain test network",
|
||||
}
|
||||
// Mainnet flag for easier tooling, no-op
|
||||
Mainnet = &cli.BoolFlag{
|
||||
Value: true,
|
||||
@@ -96,11 +101,6 @@ var (
|
||||
" (Warning): Once enabled, this feature migrates your database in to a new schema and " +
|
||||
"there is no going back. At worst, your entire database might get corrupted.",
|
||||
}
|
||||
disableCorrectlyPruneCanonicalAtts = &cli.BoolFlag{
|
||||
Name: "disable-correctly-prune-canonical-atts",
|
||||
Usage: "Disable the fix for bug where any block attestations can get incorrectly pruned, which improves validator profitability and overall network health," +
|
||||
"see issue #9443 for further detail",
|
||||
}
|
||||
enableNativeState = &cli.BoolFlag{
|
||||
Name: "enable-native-state",
|
||||
Usage: "Enables representing the beacon state as a pure Go struct.",
|
||||
@@ -134,6 +134,7 @@ var ValidatorFlags = append(deprecatedFlags, []cli.Flag{
|
||||
disableAttestingHistoryDBCache,
|
||||
PraterTestnet,
|
||||
RopstenTestnet,
|
||||
SepoliaTestnet,
|
||||
Mainnet,
|
||||
dynamicKeyReloadDebounceInterval,
|
||||
attestTimely,
|
||||
@@ -153,6 +154,7 @@ var BeaconChainFlags = append(deprecatedFlags, []cli.Flag{
|
||||
disableGRPCConnectionLogging,
|
||||
PraterTestnet,
|
||||
RopstenTestnet,
|
||||
SepoliaTestnet,
|
||||
Mainnet,
|
||||
enablePeerScorer,
|
||||
enableLargerGossipHistory,
|
||||
@@ -160,7 +162,6 @@ var BeaconChainFlags = append(deprecatedFlags, []cli.Flag{
|
||||
disableBroadcastSlashingFlag,
|
||||
enableSlasherFlag,
|
||||
enableHistoricalSpaceRepresentation,
|
||||
disableCorrectlyPruneCanonicalAtts,
|
||||
enableNativeState,
|
||||
enableVecHTR,
|
||||
enableForkChoiceDoublyLinkedTree,
|
||||
|
||||
@@ -15,6 +15,7 @@ go_test(
|
||||
srcs = [
|
||||
"common_test.go",
|
||||
"mainnet_test.go",
|
||||
"minimal_test.go",
|
||||
],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build !minimal
|
||||
// +build !minimal
|
||||
|
||||
package field_params
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build !minimal
|
||||
// +build !minimal
|
||||
|
||||
package field_params_test
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build minimal
|
||||
// +build minimal
|
||||
|
||||
package field_params
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build minimal
|
||||
// +build minimal
|
||||
|
||||
package field_params_test
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ go_library(
|
||||
"testnet_e2e_config.go",
|
||||
"testnet_prater_config.go",
|
||||
"testnet_ropsten_config.go",
|
||||
"testnet_sepolia_config.go",
|
||||
"testutils.go",
|
||||
"values.go",
|
||||
],
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build !develop
|
||||
// +build !develop
|
||||
|
||||
package params_test
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build develop
|
||||
// +build develop
|
||||
|
||||
package params
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user