Compare commits

...

32 Commits

Author SHA1 Message Date
Raul Jordan
3043d4722f Attestation Dynamic Committee Subnets (#5123)
* initiate cache
* imports fix
* add in feature config flag
* utilize a dynamic set of subnets
* Merge branch 'master' into att-subnets
* add in feature config flag
* Merge branch 'att-subnets' of github.com:prysmaticlabs/prysm into att-subnets
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into att-subnets
* shift
* more changes
* gaz
* Update beacon-chain/rpc/validator/assignments.go
* Update beacon-chain/rpc/validator/assignments.go
* add flag
* Merge branch 'att-subnets' of https://github.com/prysmaticlabs/geth-sharding into att-subnets
* Merge branch 'master' into att-subnets
* Merge refs/heads/master into att-subnets
* no double flag
* Merge branch 'att-subnets' of github.com:prysmaticlabs/prysm into att-subnets
* amend committee ids to better name
* gaz
2020-03-18 23:13:37 +00:00
Ivan Martinez
c96c8b4aa3 Minor slasher fixes (#5129)
* Minor fixes

* Change log
2020-03-18 14:49:20 -05:00
Nishant Das
9f46000dba change to batch size (#5128)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-18 17:57:20 +08:00
Nishant Das
5450b3155e Integrate Field Tries into Current State (#5082)
* add new methods
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into improvedHTRArrays
* new field trie
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into improvedHTRArrays
* perform better copying
* fix bug
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into improvedHTRArrays
* add support for variable length arrays
* get it running
* save all new progress
* more fixes
* more fixes
* more cleanup
* some more clean up
* new memory pool
* remove lock
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into improvedHTRArrays
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into improvedHTRArrays
* Merge branch 'master' of https://github.com/prysmaticlabs/geth-sharding into improvedHTRArrays
* use wrapper
* remove redundant methods
* cleanup
* cleanup
* remove unused method
* change field
* Update beacon-chain/state/types.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* Update beacon-chain/state/types.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
* Apply suggestions from code review

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>
2020-03-18 04:52:08 +00:00
Nishant Das
1bb12c3568 Add Field Trie to State (#5118)
* add new helpers

* make zerohash public

* remove unused method

* add more tests

* cleanup

* add in new tests

* fix all tests

* Apply suggestions from code review

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-18 10:09:31 +08:00
Ivan Martinez
1be8b3aa5e Slasher lag fix (#5124)
* Slasher fixes

* fix
2020-03-17 16:53:08 -05:00
Nishant Das
431762164e Add New State Utils (#5117)
* add new helpers

* make zerohash public

* remove unused method

* add more tests

* cleanup

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-17 14:25:17 -05:00
Victor Farazdagi
3ec2a0f9e0 Refactoring of initial sync (#5096)
* implements blocks queue

* refactors updateCounter method

* fixes deadlock on stop w/o start

* refactors updateSchedulerState

* more tests on schduler

* parseFetchResponse tests

* wraps up tests for blocks queue

* eod commit

* fixes data race in round robin

* revamps fetcher

* fixes race conditions + livelocks + deadlocks

* less verbose output

* fixes data race, by isolating critical sections

* minor refactoring: resolves blocking calls

* implements init-sync queue

* udpate fetch/send buffers in blocks fetcher

* blockState enum-like type alias

* refactors common code into releaseTicket()

* better gc

* linter

* minor fix to round robin

* moves original round robin into its own package

* adds enableInitSyncQueue flag

* fixes issue with init-sync service selection

* Update beacon-chain/sync/initial-sync/round_robin.go

Co-Authored-By: terence tsao <terence@prysmaticlabs.com>

* initsyncv1 -> initsyncold

* adds span

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-03-17 12:27:18 -05:00
Victor Farazdagi
e96b45b29c asserts non-nil state (#5115) 2020-03-17 07:58:16 -07:00
terence tsao
e529f5b1d6 Part 1 of integrating new state mgmt to run time (#5108) 2020-03-16 12:07:07 -07:00
Victor Farazdagi
f18bada8c9 Init sync blocks queue (#5064)
* fixes data race, by isolating critical sections

* minor refactoring: resolves blocking calls

* implements init-sync queue

* udpate fetch/send buffers in blocks fetcher

* blockState enum-like type alias

* refactors common code into releaseTicket()

* better gc

* linter

* Update beacon-chain/sync/initial-sync/blocks_queue.go

Co-Authored-By: shayzluf <thezluf@gmail.com>

* Update beacon-chain/sync/initial-sync/blocks_queue_test.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: shayzluf <thezluf@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-16 18:21:36 +03:00
terence tsao
5657535c52 Fixed saveNewValidators error log (#5109) 2020-03-15 16:21:56 -07:00
Preston Van Loon
9da9fbdfba Fix reward and penality zero epoch bug (#5107)
* Fix reward and penality bug https://github.com/prysmaticlabs/prysm/issues/5105
* Merge branch 'master' into fuzz-fix-attestationDelta
2020-03-15 19:14:52 +00:00
Ivan Martinez
de2ec8e575 Update README for Slasher (#5106)
* Add readme
2020-03-15 18:46:21 +00:00
terence tsao
3660732f44 Resume new state mgmt (#5102) 2020-03-15 09:47:49 -07:00
Jim McDonald
8e6c16d416 Tweak validator logging (#5103)
* Tidy up logging
2020-03-15 15:46:22 +00:00
Ivan Martinez
8143cc36bc Add Slasher to E2E (#5061)
* Start adding "inject slashing into pool"

* Attempt at slashing

* Remove unneded

* Fix

* Begin adding slasher client to e2e

* Start slasher in e2e

* Get slashing detection working

* Get slashing evaluators working

* Progress on e2e

* Cleanup e2e

* Fix slasher e2e!

* lint

* Comment

* Fixes

* Improve accuracy of balance check

* REmove extra

* Remove extra

* Make more accurate
2020-03-15 01:09:23 -04:00
terence tsao
eeffa4fb30 New state getter (#5101)
* getter.go
* getter_test.go
* fixed a cold bug
* fmt gaz
* All tests pass
* Merge branch 'master' into new-state-getter
* Merge refs/heads/master into new-state-getter
2020-03-14 18:39:23 +00:00
Victor Farazdagi
1137403e4b Init sync pre queue (#5098)
* fixes data race, by isolating critical sections

* minor refactoring: resolves blocking calls

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-03-14 13:21:07 -05:00
terence tsao
f17818b1c0 New state setter (#5100)
* setter.go
* tests
* fmt
* Merge branch 'master' of https://github.com/prysmaticlabs/prysm into new-state-setter
* Merge refs/heads/master into new-state-setter
2020-03-14 16:31:21 +00:00
Nishant Das
691f0bba70 Minor Improvements (#5099)
* add fixes
2020-03-14 16:12:22 +00:00
terence tsao
b024191887 Get cold intermediate state with slot (#5097)
* loadColdIntermediateStateWithSlot

* Starting test

* Tests
2020-03-14 10:34:37 -05:00
Raul Jordan
1f87cb11fc Use Current Time Slot for Fetching Committees in RPC (#5094)
* use genesis time fetcher
* Merge branch 'master' into use-time-fetcher
* fix breaking
* Merge branch 'use-time-fetcher' of github.com:prysmaticlabs/prysm into use-time-fetcher
* list beacon committees tests fixed
* Merge branch 'master' into use-time-fetcher
* Merge branch 'master' into use-time-fetcher
* Merge refs/heads/master into use-time-fetcher
* Update beacon-chain/rpc/beacon/committees_test.go
2020-03-14 03:32:51 +00:00
Preston Van Loon
a0b142a26c Update to go 1.14 (#4947)
* Update to go 1.14
* Update with fix from https://github.com/bazelbuild/rules_go/pull/2388
* Merge branch 'master' into go-1.14
* Merge refs/heads/master into go-1.14
* Merge branch 'master' of github.com:prysmaticlabs/prysm into go-1.14
* Update gRPC
* Merge branch 'go-1.14' of github.com:prysmaticlabs/prysm into go-1.14
* Update golang.org/x/crypto
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Merge refs/heads/master into go-1.14
* Committing gc_goopts for issue repro
* Fix race and msan builds
* Merge branch 'master' of github.com:prysmaticlabs/prysm into go-1.14
* Merge refs/heads/master into go-1.14
* switch to LRU
* Merge branch 'go-1.14' of github.com:prysmaticlabs/prysm into go-1.14
* Fixed, but dont feel good about this
* Switch append ordering
2020-03-14 00:12:52 +00:00
shayzluf
035eaffd9d handle slashing from p2p (#5047)
* handle slashing from p2p

* gaz

* remove unneeded check

* add tests

* gaz  goimports

* text update

* Apply suggestions from code review

* add proto.equal

* fix test

* add context to call

* fix state bug found by terence

* fix tests add error type handling

* nil checks

* nil head state check

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-13 16:47:27 -05:00
Ivan Martinez
c41244ad34 Make spanner tests more thorough, fixes (#5093)
* Fix tests for spanner

* Start change to indexed atts

* Improve tests for spanner

* Fix tests

* Remove extra
2020-03-13 14:04:22 -04:00
terence tsao
c20d9ccbb3 Better attestation pool with map instead of expiration cache (#5087)
* update aggregated

* update block att

* update forkchoice att

* update unaggregated att

* gazelle

* Use copy

* Locks

* Genesis time

* Fixed tests

* Comments

* Fixed tests
2020-03-13 12:35:28 -05:00
Ivan Martinez
3380d14475 Include ejected indices in ActiveSetChanges endpoint (#5066)
* Fix ActiveSetChanges

* Include ejected indices in ActiveSetChanges RPC

* Fix test fails

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-03-13 12:23:19 -04:00
shayzluf
4f031d1988 fix slasher rpc disconnect on error (#5092) 2020-03-13 10:59:14 -05:00
Jim McDonald
02afb53ea4 Remove suprious error messages in wallet keymanager (#5090)
* Handle multiple passphrases

* Add tests
2020-03-13 05:26:10 -07:00
terence tsao
0974c02a00 Load cold state by root (#5086) 2020-03-12 15:27:55 -07:00
Raul Jordan
c6acf0a28c Use Target Epoch to Determine Indexed Attestations for Slasher (#5085)
* no more head fetchre

* no need for head fetcher

* nil checks
2020-03-12 17:02:12 -05:00
151 changed files with 7346 additions and 1572 deletions

View File

@@ -52,10 +52,10 @@ http_archive(
http_archive(
name = "bazel_gazelle",
sha256 = "86c6d481b3f7aedc1d60c1c211c6f76da282ae197c3b3160f54bd3a8f847896f",
sha256 = "d8c45ee70ec39a57e7a05e5027c32b1576cc7f16d9dd37135b0eddde45cf1b10",
urls = [
"https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/bazel-gazelle/releases/download/v0.19.1/bazel-gazelle-v0.19.1.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.19.1/bazel-gazelle-v0.19.1.tar.gz",
"https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/bazel-gazelle/releases/download/v0.20.0/bazel-gazelle-v0.20.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.20.0/bazel-gazelle-v0.20.0.tar.gz",
],
)
@@ -75,10 +75,10 @@ http_archive(
http_archive(
name = "io_bazel_rules_go",
sha256 = "e88471aea3a3a4f19ec1310a55ba94772d087e9ce46e41ae38ecebe17935de7b",
sha256 = "e6a6c016b0663e06fa5fccf1cd8152eab8aa8180c583ec20c872f4f9953a7ac5",
urls = [
"https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/rules_go/releases/download/v0.20.3/rules_go-v0.20.3.tar.gz",
"https://github.com/bazelbuild/rules_go/releases/download/v0.20.3/rules_go-v0.20.3.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.22.1/rules_go-v0.22.1.tar.gz",
"https://github.com/bazelbuild/rules_go/releases/download/v0.22.1/rules_go-v0.22.1.tar.gz",
],
)
@@ -628,8 +628,9 @@ go_repository(
go_repository(
name = "org_golang_x_crypto",
commit = "4def268fd1a49955bfb3dda92fe3db4f924f2285",
importpath = "golang.org/x/crypto",
sum = "h1:1ZiEyfaQIg3Qh0EoqpwAakHVhecoE5wlSg5GjnafJGw=",
version = "v0.0.0-20200221231518-2aa609cf4a9d",
)
go_repository(
@@ -755,10 +756,12 @@ go_repository(
importpath = "github.com/matttproud/golang_protobuf_extensions",
)
go_repository(
name = "com_github_boltdb_bolt",
commit = "2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8", # v1.3.1
importpath = "github.com/boltdb/bolt",
http_archive(
name = "com_github_boltdb_bolt", # v1.3.1
build_file = "//third_party:boltdb/bolt.BUILD",
sha256 = "95dc5842dab55f7519b7002bbec648321277b5d6f0ad59aab509ee59313b6386",
strip_prefix = "bolt-2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8",
urls = ["https://github.com/boltdb/bolt/archive/2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8.tar.gz"],
)
go_repository(
@@ -1200,8 +1203,9 @@ go_ssz_dependencies()
go_repository(
name = "org_golang_google_grpc",
build_file_proto_mode = "disable",
commit = "1d89a3c832915b2314551c1d2a506874d62e53f7", # v1.22.0
importpath = "google.golang.org/grpc",
sum = "h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=",
version = "v1.27.1",
)
go_repository(

View File

@@ -115,6 +115,13 @@ go_test(
"service_norace_test.go",
],
embed = [":go_default_library"],
gc_goopts = [
# Go 1.14 enables checkptr by default when building with -race or -msan. There is a pointer
# issue in boltdb, so must disable checkptr at compile time. This flag can be removed once
# the project is migrated to etcd's version of boltdb and the issue has been fixed.
# See: https://github.com/etcd-io/bbolt/issues/187.
"-d=checkptr=0",
],
race = "on",
tags = ["race_on"],
deps = [

View File

@@ -186,7 +186,7 @@ func (s *Service) CurrentFork() *pb.Fork {
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
}
}
return s.headState().Fork()
return s.head.state.Fork()
}
// Participation returns the participation stats of a given epoch.

View File

@@ -8,6 +8,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"go.opencensus.io/trace"
)
@@ -58,9 +59,15 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
// If the head state is not available, just return nil.
// There's nothing to cache
_, cached := s.initSyncState[headRoot]
if !cached && !s.beaconDB.HasState(ctx, headRoot) {
return nil
if featureconfig.Get().NewStateMgmt {
if !s.stateGen.StateSummaryExists(ctx, headRoot) {
return nil
}
} else {
_, cached := s.initSyncState[headRoot]
if !cached && !s.beaconDB.HasState(ctx, headRoot) {
return nil
}
}
// Get the new head block from DB.
@@ -74,15 +81,19 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
// Get the new head state from cached state or DB.
var newHeadState *state.BeaconState
var exists bool
newHeadState, exists = s.initSyncState[headRoot]
if !exists {
newHeadState, err = s.beaconDB.State(ctx, headRoot)
if featureconfig.Get().NewStateMgmt {
newHeadState, err = s.stateGen.StateByRoot(ctx, headRoot)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
if newHeadState == nil {
return errors.New("cannot save nil head state")
} else {
var exists bool
newHeadState, exists = s.initSyncState[headRoot]
if !exists {
newHeadState, err = s.beaconDB.State(ctx, headRoot)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
}
}
if newHeadState == nil {
@@ -108,19 +119,27 @@ func (s *Service) saveHeadNoDB(ctx context.Context, b *ethpb.SignedBeaconBlock,
return errors.New("cannot save nil head block")
}
headState, err := s.beaconDB.State(ctx, r)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
if headState == nil {
s.initSyncStateLock.RLock()
cachedHeadState, ok := s.initSyncState[r]
if ok {
headState = cachedHeadState
var headState *state.BeaconState
var err error
if featureconfig.Get().NewStateMgmt {
headState, err = s.stateGen.StateByRoot(ctx, r)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
} else {
headState, err = s.beaconDB.State(ctx, r)
if err != nil {
return errors.Wrap(err, "could not retrieve head state in DB")
}
if headState == nil {
s.initSyncStateLock.RLock()
cachedHeadState, ok := s.initSyncState[r]
if ok {
headState = cachedHeadState
}
s.initSyncStateLock.RUnlock()
}
s.initSyncStateLock.RUnlock()
}
if headState == nil {
return errors.New("nil head state")
}

View File

@@ -11,11 +11,12 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"gopkg.in/d4l3k/messagediff.v1"
messagediff "gopkg.in/d4l3k/messagediff.v1"
)
func TestFilterBoundaryCandidates_FilterCorrect(t *testing.T) {
@@ -209,7 +210,7 @@ func TestPruneNonBoundary_CanPrune(t *testing.T) {
func TestGenerateState_CorrectlyGenerated(t *testing.T) {
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
cfg := &Config{BeaconDB: db}
cfg := &Config{BeaconDB: db, StateGen: stategen.New(db)}
service, err := NewService(context.Background(), cfg)
if err != nil {
t.Fatal(err)

View File

@@ -30,30 +30,40 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*sta
if cachedState != nil {
return cachedState, nil
}
if featureconfig.Get().CheckHeadState {
headRoot, err := s.HeadRoot(ctx)
var baseState *stateTrie.BeaconState
if featureconfig.Get().NewStateMgmt {
baseState, err = s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
if err != nil {
return nil, errors.Wrapf(err, "could not get head root")
return nil, errors.Wrapf(err, "could not get pre state for slot %d", helpers.StartSlot(c.Epoch))
}
if bytes.Equal(headRoot, c.Root) {
st, err := s.HeadState(ctx)
} else {
if featureconfig.Get().CheckHeadState {
headRoot, err := s.HeadRoot(ctx)
if err != nil {
return nil, errors.Wrapf(err, "could not get head state")
return nil, errors.Wrapf(err, "could not get head root")
}
if err := s.checkpointState.AddCheckpointState(&cache.CheckpointState{
Checkpoint: c,
State: st.Copy(),
}); err != nil {
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
if bytes.Equal(headRoot, c.Root) {
st, err := s.HeadState(ctx)
if err != nil {
return nil, errors.Wrapf(err, "could not get head state")
}
if err := s.checkpointState.AddCheckpointState(&cache.CheckpointState{
Checkpoint: c,
State: st.Copy(),
}); err != nil {
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
}
return st, nil
}
return st, nil
}
baseState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(c.Root))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for slot %d", helpers.StartSlot(c.Epoch))
}
}
baseState, err := s.beaconDB.State(ctx, bytesutil.ToBytes32(c.Root))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for slot %d", helpers.StartSlot(c.Epoch))
}
if baseState == nil {
return nil, fmt.Errorf("pre state of target block %d does not exist", helpers.StartSlot(c.Epoch))
}
@@ -120,10 +130,18 @@ func (s *Service) verifyAttestation(ctx context.Context, baseState *stateTrie.Be
if err == blocks.ErrSigFailedToVerify {
// When sig fails to verify, check if there's a differences in committees due to
// different seeds.
aState, err := s.beaconDB.State(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
var aState *stateTrie.BeaconState
var err error
if featureconfig.Get().NewStateMgmt {
aState, err = s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
return nil, err
}
aState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
if err != nil {
return nil, err
}
epoch := helpers.SlotToEpoch(a.Data.Slot)
origSeed, err := helpers.Seed(baseState, epoch, params.BeaconConfig().DomainBeaconAttester)
if err != nil {

View File

@@ -14,6 +14,7 @@ import (
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/attestationutil"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -88,8 +89,14 @@ func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock)
return nil, errors.Wrapf(err, "could not insert block %d to fork choice store", b.Slot)
}
if err := s.beaconDB.SaveState(ctx, postState, root); err != nil {
return nil, errors.Wrap(err, "could not save state")
if featureconfig.Get().NewStateMgmt {
if err := s.stateGen.SaveState(ctx, root, postState); err != nil {
return nil, errors.Wrap(err, "could not save state")
}
} else {
if err := s.beaconDB.SaveState(ctx, postState, root); err != nil {
return nil, errors.Wrap(err, "could not save state")
}
}
// Update justified check point.
@@ -105,18 +112,21 @@ func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock)
return nil, errors.Wrap(err, "could not save finalized checkpoint")
}
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch)
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
if endSlot > startSlot {
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
return nil, errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
startSlot, endSlot)
if !featureconfig.Get().NewStateMgmt {
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch)
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
if endSlot > startSlot {
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
return nil, errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
startSlot, endSlot)
}
}
}
fRoot := bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root)
// Prune proto array fork choice nodes, all nodes before finalized check point will
// be pruned.
s.forkChoiceStore.Prune(ctx, bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root))
s.forkChoiceStore.Prune(ctx, fRoot)
s.prevFinalizedCheckpt = s.finalizedCheckpt
s.finalizedCheckpt = postState.FinalizedCheckpoint()
@@ -124,11 +134,21 @@ func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock)
if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil {
return nil, errors.Wrap(err, "could not save new justified")
}
if featureconfig.Get().NewStateMgmt {
finalizedState, err := s.stateGen.StateByRoot(ctx, fRoot)
if err != nil {
return nil, err
}
if err := s.stateGen.MigrateToCold(ctx, finalizedState, fRoot); err != nil {
return nil, err
}
}
}
// Update validator indices in database as needed.
if err := s.saveNewValidators(ctx, preStateValidatorCount, postState); err != nil {
return nil, errors.Wrap(err, "could not save finalized checkpoint")
return nil, errors.Wrap(err, "could not save new validators")
}
// Epoch boundary bookkeeping such as logging epoch summaries.
@@ -173,16 +193,18 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed
b := signed.Block
s.initSyncStateLock.Lock()
defer s.initSyncStateLock.Unlock()
// Retrieve incoming block's pre state.
preState, err := s.verifyBlkPreState(ctx, b)
if err != nil {
return err
}
preStateValidatorCount := preState.NumValidators()
// Exit early if the pre state slot is higher than incoming block's slot.
if preState.Slot() >= signed.Block.Slot {
return nil
}
preStateValidatorCount := preState.NumValidators()
postState, err := state.ExecuteStateTransitionNoVerifyAttSigs(ctx, preState, signed)
if err != nil {
return errors.Wrap(err, "could not execute state transition")
@@ -200,8 +222,16 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Slot)
}
s.initSyncState[root] = postState.Copy()
s.filterBoundaryCandidates(ctx, root, postState)
if featureconfig.Get().NewStateMgmt {
if err := s.stateGen.SaveState(ctx, root, postState); err != nil {
return errors.Wrap(err, "could not save state")
}
} else {
s.initSyncStateLock.Lock()
defer s.initSyncStateLock.Unlock()
s.initSyncState[root] = postState.Copy()
s.filterBoundaryCandidates(ctx, root, postState)
}
if flags.Get().EnableArchive {
atts := signed.Block.Body.Attestations
@@ -219,17 +249,19 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed
// Update finalized check point. Prune the block cache and helper caches on every new finalized epoch.
if postState.FinalizedCheckpointEpoch() > s.finalizedCheckpt.Epoch {
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch)
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
if endSlot > startSlot {
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
return errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
startSlot, endSlot)
if !featureconfig.Get().NewStateMgmt {
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch)
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
if endSlot > startSlot {
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
return errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
startSlot, endSlot)
}
}
}
if err := s.saveInitState(ctx, postState); err != nil {
return errors.Wrap(err, "could not save init sync finalized state")
if err := s.saveInitState(ctx, postState); err != nil {
return errors.Wrap(err, "could not save init sync finalized state")
}
}
if err := s.beaconDB.SaveFinalizedCheckpoint(ctx, postState.FinalizedCheckpoint()); err != nil {
@@ -242,21 +274,35 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed
if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil {
return errors.Wrap(err, "could not save new justified")
}
if featureconfig.Get().NewStateMgmt {
fRoot := bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root)
finalizedState, err := s.stateGen.StateByRoot(ctx, fRoot)
if err != nil {
return errors.Wrap(err, "could not get state by root for migration")
}
if err := s.stateGen.MigrateToCold(ctx, finalizedState, fRoot); err != nil {
return errors.Wrap(err, "could not migrate with new finalized root")
}
}
}
// Update validator indices in database as needed.
if err := s.saveNewValidators(ctx, preStateValidatorCount, postState); err != nil {
return errors.Wrap(err, "could not save finalized checkpoint")
return errors.Wrap(err, "could not save new validators")
}
numOfStates := len(s.boundaryRoots)
if numOfStates > initialSyncCacheSize {
if err = s.persistCachedStates(ctx, numOfStates); err != nil {
return err
if !featureconfig.Get().NewStateMgmt {
numOfStates := len(s.boundaryRoots)
if numOfStates > initialSyncCacheSize {
if err = s.persistCachedStates(ctx, numOfStates); err != nil {
return err
}
}
if len(s.initSyncState) > maxCacheSize {
s.pruneOldNonFinalizedStates()
}
}
if len(s.initSyncState) > maxCacheSize {
s.pruneOldNonFinalizedStates()
}
// Epoch boundary bookkeeping such as logging epoch summaries.
@@ -272,7 +318,7 @@ func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed
return err
}
if helpers.IsEpochStart(postState.Slot()) {
if !featureconfig.Get().NewStateMgmt && helpers.IsEpochStart(postState.Slot()) {
if err := s.beaconDB.SaveState(ctx, postState, root); err != nil {
return errors.Wrap(err, "could not save state")
}

View File

@@ -58,6 +58,20 @@ func (s *Service) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*
// verifyBlkPreState validates input block has a valid pre-state.
func (s *Service) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) (*stateTrie.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "chainService.verifyBlkPreState")
defer span.End()
if featureconfig.Get().NewStateMgmt {
preState, err := s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(b.ParentRoot))
if err != nil {
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot)
}
if preState == nil {
return nil, errors.Wrapf(err, "nil pre state for slot %d", b.Slot)
}
return preState, nil // No copy needed from newly hydrated state gen object.
}
preState := s.initSyncState[bytesutil.ToBytes32(b.ParentRoot)]
var err error
if preState == nil {
@@ -258,24 +272,26 @@ func (s *Service) updateJustified(ctx context.Context, state *stateTrie.BeaconSt
s.justifiedCheckpt = cpt
}
justifiedRoot := bytesutil.ToBytes32(cpt.Root)
if !featureconfig.Get().NewStateMgmt {
justifiedRoot := bytesutil.ToBytes32(cpt.Root)
justifiedState := s.initSyncState[justifiedRoot]
// If justified state is nil, resume back to normal syncing process and save
// justified check point.
if justifiedState == nil {
if s.beaconDB.HasState(ctx, justifiedRoot) {
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
justifiedState := s.initSyncState[justifiedRoot]
// If justified state is nil, resume back to normal syncing process and save
// justified check point.
if justifiedState == nil {
if s.beaconDB.HasState(ctx, justifiedRoot) {
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
}
justifiedState, err = s.generateState(ctx, bytesutil.ToBytes32(s.finalizedCheckpt.Root), justifiedRoot)
if err != nil {
log.Error(err)
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
}
}
justifiedState, err = s.generateState(ctx, bytesutil.ToBytes32(s.finalizedCheckpt.Root), justifiedRoot)
if err != nil {
log.Error(err)
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
if err := s.beaconDB.SaveState(ctx, justifiedState, justifiedRoot); err != nil {
return errors.Wrap(err, "could not save justified state")
}
}
if err := s.beaconDB.SaveState(ctx, justifiedState, justifiedRoot); err != nil {
return errors.Wrap(err, "could not save justified state")
}
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
}

View File

@@ -90,7 +90,13 @@ func (s *Service) processAttestation(subscribedToStateEvents chan struct{}) {
ctx := context.Background()
atts := s.attPool.ForkchoiceAttestations()
for _, a := range atts {
hasState := s.beaconDB.HasState(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot)) && s.beaconDB.HasState(ctx, bytesutil.ToBytes32(a.Data.Target.Root))
var hasState bool
if featureconfig.Get().NewStateMgmt {
hasState = s.stateGen.StateSummaryExists(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
} else {
hasState = s.beaconDB.HasState(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot)) && s.beaconDB.HasState(ctx, bytesutil.ToBytes32(a.Data.Target.Root))
}
hasBlock := s.hasBlock(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
if !(hasState && hasBlock) {
continue

View File

@@ -33,6 +33,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
@@ -73,6 +74,7 @@ type Service struct {
checkpointState *cache.CheckpointStateCache
checkpointStateLock sync.Mutex
stateGen *stategen.State
opsService *attestations.Service
}
// Config options for the service.
@@ -88,6 +90,8 @@ type Config struct {
MaxRoutines int64
StateNotifier statefeed.Notifier
ForkChoiceStore f.ForkChoicer
OpsService *attestations.Service
StateGen *stategen.State
}
// NewService instantiates a new block service instance that will
@@ -111,7 +115,8 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
initSyncState: make(map[[32]byte]*stateTrie.BeaconState),
boundaryRoots: [][32]byte{},
checkpointState: cache.NewCheckpointStateCache(),
stateGen: stategen.New(cfg.BeaconDB),
opsService: cfg.OpsService,
stateGen: cfg.StateGen,
}, nil
}
@@ -144,6 +149,7 @@ func (s *Service) Start() {
if beaconState != nil {
log.Info("Blockchain data already exists in DB, initializing...")
s.genesisTime = time.Unix(int64(beaconState.GenesisTime()), 0)
s.opsService.SetGenesisTime(beaconState.GenesisTime())
if err := s.initializeChainInfo(ctx); err != nil {
log.Fatalf("Could not set up chain info: %v", err)
}
@@ -260,6 +266,8 @@ func (s *Service) initializeBeaconChain(
return err
}
s.opsService.SetGenesisTime(genesisState.GenesisTime())
return nil
}
@@ -311,8 +319,21 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState *stateTrie.B
if err := s.beaconDB.SaveBlock(ctx, genesisBlk); err != nil {
return errors.Wrap(err, "could not save genesis block")
}
if err := s.beaconDB.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
return errors.Wrap(err, "could not save genesis state")
if featureconfig.Get().NewStateMgmt {
if err := s.stateGen.SaveState(ctx, genesisBlkRoot, genesisState); err != nil {
return errors.Wrap(err, "could not save genesis state")
}
if err := s.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
Slot: 0,
Root: genesisBlkRoot[:],
BoundaryRoot: genesisBlkRoot[:],
}); err != nil {
return err
}
} else {
if err := s.beaconDB.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
return errors.Wrap(err, "could not save genesis state")
}
}
if err := s.beaconDB.SaveHeadBlockRoot(ctx, genesisBlkRoot); err != nil {
return errors.Wrap(err, "could not save head block root")
@@ -388,11 +409,25 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
// would be the genesis state and block.
return errors.New("no finalized epoch in the database")
}
finalizedState, err := s.beaconDB.State(ctx, bytesutil.ToBytes32(finalized.Root))
if err != nil {
return errors.Wrap(err, "could not get finalized state from db")
finalizedRoot := bytesutil.ToBytes32(finalized.Root)
var finalizedState *stateTrie.BeaconState
if featureconfig.Get().NewStateMgmt {
finalizedRoot = s.beaconDB.LastArchivedIndexRoot(ctx)
finalizedState, err = s.stateGen.Resume(ctx, finalizedRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized state from db")
}
if finalizedRoot == params.BeaconConfig().ZeroHash {
finalizedRoot = bytesutil.ToBytes32(finalized.Root)
}
} else {
finalizedState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(finalized.Root))
if err != nil {
return errors.Wrap(err, "could not get finalized state from db")
}
}
finalizedBlock, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(finalized.Root))
finalizedBlock, err := s.beaconDB.Block(ctx, finalizedRoot)
if err != nil {
return errors.Wrap(err, "could not get finalized block from db")
}
@@ -401,7 +436,7 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
return errors.New("finalized state and block can't be nil")
}
s.setHead(bytesutil.ToBytes32(finalized.Root), finalizedBlock, finalizedState)
s.setHead(finalizedRoot, finalizedBlock, finalizedState)
return nil
}

View File

@@ -133,6 +133,10 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
t.Fatalf("unable to set up web3 service: %v", err)
}
opsService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
if err != nil {
t.Fatal(err)
}
cfg := &Config{
BeaconBlockBuf: 0,
BeaconDB: beaconDB,
@@ -142,10 +146,12 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
StateNotifier: &mockBeaconNode{},
AttPool: attestations.NewPool(),
ForkChoiceStore: protoarray.New(0, 0, params.BeaconConfig().ZeroHash),
OpsService: opsService,
}
if err != nil {
t.Fatalf("could not register blockchain service: %v", err)
}
chainService, err := NewService(ctx, cfg)
if err != nil {
t.Fatalf("unable to setup chain service: %v", err)

View File

@@ -219,7 +219,7 @@ func (ms *ChainService) GenesisTime() time.Time {
// CurrentSlot mocks the same method in the chain service.
func (ms *ChainService) CurrentSlot() uint64 {
return 0
return ms.HeadSlot()
}
// Participation mocks the same method in the chain service.

View File

@@ -6,6 +6,7 @@ go_library(
"attestation_data.go",
"checkpoint_state.go",
"committee.go",
"committee_ids.go",
"common.go",
"eth1_data.go",
"hot_state_cache.go",

44
beacon-chain/cache/committee_ids.go vendored Normal file
View File

@@ -0,0 +1,44 @@
package cache
import (
"sync"
lru "github.com/hashicorp/golang-lru"
"github.com/prysmaticlabs/prysm/shared/sliceutil"
)
type committeeIDs struct {
cache *lru.Cache
lock sync.RWMutex
}
// CommitteeIDs for attestations.
var CommitteeIDs = newCommitteeIDs()
func newCommitteeIDs() *committeeIDs {
cache, err := lru.New(8)
if err != nil {
panic(err)
}
return &committeeIDs{cache: cache}
}
// AddIDs to the cache for attestation committees by epoch.
func (t *committeeIDs) AddIDs(indices []uint64, epoch uint64) {
t.lock.Lock()
defer t.lock.Unlock()
val, exists := t.cache.Get(epoch)
if exists {
indices = sliceutil.UnionUint64(append(indices, val.([]uint64)...))
}
t.cache.Add(epoch, indices)
}
// GetIDs from the cache for attestation committees by epoch.
func (t *committeeIDs) GetIDs(epoch uint64) []uint64 {
val, exists := t.cache.Get(epoch)
if !exists {
return []uint64{}
}
return val.([]uint64)
}

View File

@@ -170,7 +170,7 @@ func ProcessSlashings(state *stateTrie.BeaconState) (*stateTrie.BeaconState, err
// a callback is used here to apply the following actions to all validators
// below equally.
err = state.ApplyToEveryValidator(func(idx int, val *ethpb.Validator) error {
err = state.ApplyToEveryValidator(func(idx int, val *ethpb.Validator) (bool, error) {
correctEpoch := (currentEpoch + exitLength/2) == val.WithdrawableEpoch
if val.Slashed && correctEpoch {
minSlashing := mathutil.Min(totalSlashing*3, totalBalance)
@@ -178,10 +178,11 @@ func ProcessSlashings(state *stateTrie.BeaconState) (*stateTrie.BeaconState, err
penaltyNumerator := val.EffectiveBalance / increment * minSlashing
penalty := penaltyNumerator / totalBalance * increment
if err := helpers.DecreaseBalance(state, uint64(idx), penalty); err != nil {
return err
return false, err
}
return true, nil
}
return nil
return false, nil
})
return state, err
}
@@ -235,12 +236,12 @@ func ProcessFinalUpdates(state *stateTrie.BeaconState) (*stateTrie.BeaconState,
bals := state.Balances()
// Update effective balances with hysteresis.
validatorFunc := func(idx int, val *ethpb.Validator) error {
validatorFunc := func(idx int, val *ethpb.Validator) (bool, error) {
if val == nil {
return fmt.Errorf("validator %d is nil in state", idx)
return false, fmt.Errorf("validator %d is nil in state", idx)
}
if idx >= len(bals) {
return fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(state.Balances()))
return false, fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(state.Balances()))
}
balance := bals[idx]
halfInc := params.BeaconConfig().EffectiveBalanceIncrement / 2
@@ -249,8 +250,9 @@ func ProcessFinalUpdates(state *stateTrie.BeaconState) (*stateTrie.BeaconState,
if val.EffectiveBalance > balance-balance%params.BeaconConfig().EffectiveBalanceIncrement {
val.EffectiveBalance = balance - balance%params.BeaconConfig().EffectiveBalanceIncrement
}
return true, nil
}
return nil
return false, nil
}
if err := state.ApplyToEveryValidator(validatorFunc); err != nil {

View File

@@ -72,7 +72,7 @@ func attestationDeltas(state *stateTrie.BeaconState, bp *Balance, vp []*Validato
func attestationDelta(state *stateTrie.BeaconState, bp *Balance, v *Validator) (uint64, uint64) {
eligible := v.IsActivePrevEpoch || (v.IsSlashed && !v.IsWithdrawableCurrentEpoch)
if !eligible {
if !eligible || bp.CurrentEpoch == 0 {
return 0, 0
}

View File

@@ -146,6 +146,47 @@ func TestAttestationDeltaPrecompute(t *testing.T) {
}
}
func TestAttestationDeltas_ZeroEpoch(t *testing.T) {
e := params.BeaconConfig().SlotsPerEpoch
validatorCount := uint64(2048)
base := buildState(e+2, validatorCount)
atts := make([]*pb.PendingAttestation, 3)
var emptyRoot [32]byte
for i := 0; i < len(atts); i++ {
atts[i] = &pb.PendingAttestation{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{
Root: emptyRoot[:],
},
Source: &ethpb.Checkpoint{
Root: emptyRoot[:],
},
BeaconBlockRoot: emptyRoot[:],
},
AggregationBits: bitfield.Bitlist{0xC0, 0xC0, 0xC0, 0xC0, 0x01},
InclusionDelay: 1,
}
}
base.PreviousEpochAttestations = atts
state, err := state.InitializeFromProto(base)
if err != nil {
t.Fatal(err)
}
vp, bp := New(context.Background(), state)
vp, bp, err = ProcessAttestations(context.Background(), state, vp, bp)
if err != nil {
t.Fatal(err)
}
bp.CurrentEpoch = 0 // Could cause a divide by zero panic.
_, _, err = attestationDeltas(state, bp, vp)
if err != nil {
t.Fatal(err)
}
}
func buildState(slot uint64, validatorCount uint64) *pb.BeaconState {
validators := make([]*ethpb.Validator, validatorCount)
for i := 0; i < len(validators); i++ {

View File

@@ -21,7 +21,7 @@ func ProcessSlashingsPrecompute(state *stateTrie.BeaconState, p *Balance) error
totalSlashing += slashing
}
validatorFunc := func(idx int, val *ethpb.Validator) error {
validatorFunc := func(idx int, val *ethpb.Validator) (bool, error) {
correctEpoch := (currentEpoch + exitLength/2) == val.WithdrawableEpoch
if val.Slashed && correctEpoch {
minSlashing := mathutil.Min(totalSlashing*3, p.CurrentEpoch)
@@ -29,10 +29,11 @@ func ProcessSlashingsPrecompute(state *stateTrie.BeaconState, p *Balance) error
penaltyNumerator := val.EffectiveBalance / increment * minSlashing
penalty := penaltyNumerator / p.CurrentEpoch * increment
if err := helpers.DecreaseBalance(state, uint64(idx), penalty); err != nil {
return err
return false, err
}
return true, nil
}
return nil
return false, nil
}
return state.ApplyToEveryValidator(validatorFunc)

View File

@@ -166,7 +166,7 @@ func SlashValidator(state *stateTrie.BeaconState, slashedIdx uint64, whistleBlow
return state, nil
}
// ActivatedValidatorIndices determines the indices activated during the current epoch.
// ActivatedValidatorIndices determines the indices activated during the given epoch.
func ActivatedValidatorIndices(epoch uint64, validators []*ethpb.Validator) []uint64 {
activations := make([]uint64, 0)
delayedActivationEpoch := helpers.ActivationExitEpoch(epoch)
@@ -179,7 +179,7 @@ func ActivatedValidatorIndices(epoch uint64, validators []*ethpb.Validator) []ui
return activations
}
// SlashedValidatorIndices determines the indices slashed during the current epoch.
// SlashedValidatorIndices determines the indices slashed during the given epoch.
func SlashedValidatorIndices(epoch uint64, validators []*ethpb.Validator) []uint64 {
slashed := make([]uint64, 0)
for i := 0; i < len(validators); i++ {
@@ -225,9 +225,51 @@ func ExitedValidatorIndices(epoch uint64, validators []*ethpb.Validator, activeV
}
withdrawableEpoch := exitQueueEpoch + params.BeaconConfig().MinValidatorWithdrawabilityDelay
for i, val := range validators {
if val.ExitEpoch == epoch && val.WithdrawableEpoch == withdrawableEpoch {
if val.ExitEpoch == epoch && val.WithdrawableEpoch == withdrawableEpoch &&
val.EffectiveBalance > params.BeaconConfig().EjectionBalance {
exited = append(exited, uint64(i))
}
}
return exited, nil
}
// EjectedValidatorIndices determines the indices ejected during the given epoch.
func EjectedValidatorIndices(epoch uint64, validators []*ethpb.Validator, activeValidatorCount uint64) ([]uint64, error) {
ejected := make([]uint64, 0)
exitEpochs := make([]uint64, 0)
for i := 0; i < len(validators); i++ {
val := validators[i]
if val.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
exitEpochs = append(exitEpochs, val.ExitEpoch)
}
}
exitQueueEpoch := uint64(0)
for _, i := range exitEpochs {
if exitQueueEpoch < i {
exitQueueEpoch = i
}
}
// We use the exit queue churn to determine if we have passed a churn limit.
exitQueueChurn := 0
for _, val := range validators {
if val.ExitEpoch == exitQueueEpoch {
exitQueueChurn++
}
}
churn, err := helpers.ValidatorChurnLimit(activeValidatorCount)
if err != nil {
return nil, errors.Wrap(err, "could not get churn limit")
}
if churn < uint64(exitQueueChurn) {
exitQueueEpoch++
}
withdrawableEpoch := exitQueueEpoch + params.BeaconConfig().MinValidatorWithdrawabilityDelay
for i, val := range validators {
if val.ExitEpoch == epoch && val.WithdrawableEpoch == withdrawableEpoch &&
val.EffectiveBalance <= params.BeaconConfig().EjectionBalance {
ejected = append(ejected, uint64(i))
}
}
return ejected, nil
}

View File

@@ -344,14 +344,17 @@ func TestExitedValidatorIndices(t *testing.T) {
Slot: helpers.SlotToEpoch(1),
Validators: []*ethpb.Validator{
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
},
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: 10,
},
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
},
@@ -364,6 +367,7 @@ func TestExitedValidatorIndices(t *testing.T) {
Slot: helpers.SlotToEpoch(1),
Validators: []*ethpb.Validator{
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
},
@@ -376,6 +380,7 @@ func TestExitedValidatorIndices(t *testing.T) {
Slot: helpers.SlotToEpoch(1),
Validators: []*ethpb.Validator{
{
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
ExitEpoch: 0,
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
},

View File

@@ -55,6 +55,8 @@ type ReadOnlyDatabase interface {
ArchivedPointState(ctx context.Context, index uint64) (*state.BeaconState, error)
ArchivedPointRoot(ctx context.Context, index uint64) [32]byte
HasArchivedPoint(ctx context.Context, index uint64) bool
LastArchivedIndexRoot(ctx context.Context) [32]byte
LastArchivedIndexState(ctx context.Context) (*state.BeaconState, error)
// Deposit contract related handlers.
DepositContractAddress(ctx context.Context) ([]byte, error)
// Powchain operations.
@@ -104,6 +106,7 @@ type NoHeadAccessDatabase interface {
SaveArchivedValidatorParticipation(ctx context.Context, epoch uint64, part *eth.ValidatorParticipation) error
SaveArchivedPointState(ctx context.Context, state *state.BeaconState, index uint64) error
SaveArchivedPointRoot(ctx context.Context, blockRoot [32]byte, index uint64) error
SaveLastArchivedIndex(ctx context.Context, index uint64) error
// Deposit contract related handlers.
SaveDepositContractAddress(ctx context.Context, addr common.Address) error
// Powchain operations.

View File

@@ -352,3 +352,18 @@ func (e Exporter) ArchivedPointRoot(ctx context.Context, index uint64) [32]byte
func (e Exporter) HasArchivedPoint(ctx context.Context, index uint64) bool {
return e.db.HasArchivedPoint(ctx, index)
}
// LastArchivedIndexRoot -- passthrough
func (e Exporter) LastArchivedIndexRoot(ctx context.Context) [32]byte {
return e.db.LastArchivedIndexRoot(ctx)
}
// LastArchivedIndexState -- passthrough
func (e Exporter) LastArchivedIndexState(ctx context.Context) (*state.BeaconState, error) {
return e.db.LastArchivedIndexState(ctx)
}
// SaveLastArchivedIndex -- passthrough
func (e Exporter) SaveLastArchivedIndex(ctx context.Context, index uint64) error {
return e.db.SaveLastArchivedIndex(ctx, index)
}

View File

@@ -32,6 +32,7 @@ go_library(
"//proto/beacon/db:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/params:go_default_library",
"//shared/sliceutil:go_default_library",
"//shared/traceutil:go_default_library",

View File

@@ -41,6 +41,67 @@ func (k *Store) SaveArchivedPointRoot(ctx context.Context, blockRoot [32]byte, i
})
}
// SaveLastArchivedIndex to the db.
func (k *Store) SaveLastArchivedIndex(ctx context.Context, index uint64) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveHeadBlockRoot")
defer span.End()
return k.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(archivedIndexRootBucket)
return bucket.Put(lastArchivedIndexKey, uint64ToBytes(index))
})
}
// LastArchivedIndexRoot from the db.
func (k *Store) LastArchivedIndexRoot(ctx context.Context) [32]byte {
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedIndexRoot")
defer span.End()
var blockRoot []byte
// #nosec G104. Always returns nil.
k.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(archivedIndexRootBucket)
lastArchivedIndex := bucket.Get(lastArchivedIndexKey)
if lastArchivedIndex == nil {
return nil
}
blockRoot = bucket.Get(lastArchivedIndex)
return nil
})
return bytesutil.ToBytes32(blockRoot)
}
// LastArchivedIndexState from the db.
func (k *Store) LastArchivedIndexState(ctx context.Context) (*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.LastArchivedIndexState")
defer span.End()
var s *pb.BeaconState
err := k.db.View(func(tx *bolt.Tx) error {
indexRootBucket := tx.Bucket(archivedIndexRootBucket)
lastArchivedIndex := indexRootBucket.Get(lastArchivedIndexKey)
if lastArchivedIndex == nil {
return nil
}
indexStateBucket := tx.Bucket(archivedIndexStateBucket)
enc := indexStateBucket.Get(lastArchivedIndex)
if enc == nil {
return nil
}
var err error
s, err = createState(enc)
return err
})
if err != nil {
return nil, err
}
if s == nil {
return nil, nil
}
return state.InitializeFromProtoUnsafe(s)
}
// ArchivedPointState returns the state of an archived point from the DB.
// This is essential for cold state management and to restore a cold state.
func (k *Store) ArchivedPointState(ctx context.Context, index uint64) (*state.BeaconState, error) {

View File

@@ -5,6 +5,7 @@ import (
"reflect"
"testing"
"github.com/gogo/protobuf/proto"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
)
@@ -90,3 +91,62 @@ func TestArchivedPointIndexHas_CanRetrieve(t *testing.T) {
t.Fatal("Should have an archived point")
}
}
func TestLastArchivedPoint_CanRetrieve(t *testing.T) {
db := setupDB(t)
defer teardownDB(t, db)
ctx := context.Background()
slot1 := uint64(100)
s1 := &pb.BeaconState{Slot: slot1}
st1, err := state.InitializeFromProto(s1)
if err != nil {
t.Fatal(err)
}
if err := db.SaveArchivedPointState(ctx, st1, 1); err != nil {
t.Fatal(err)
}
if err := db.SaveArchivedPointRoot(ctx, [32]byte{'A'}, 1); err != nil {
t.Fatal(err)
}
slot2 := uint64(200)
s2 := &pb.BeaconState{Slot: slot2}
st2, err := state.InitializeFromProto(s2)
if err != nil {
t.Fatal(err)
}
if err := db.SaveArchivedPointState(ctx, st2, 3); err != nil {
t.Fatal(err)
}
if err := db.SaveArchivedPointRoot(ctx, [32]byte{'B'}, 3); err != nil {
t.Fatal(err)
}
if err := db.SaveLastArchivedIndex(ctx, 1); err != nil {
t.Fatal(err)
}
lastSaved, err := db.LastArchivedIndexState(ctx)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(lastSaved.InnerStateUnsafe(), st1.InnerStateUnsafe()) {
t.Error("Did not get wanted saved state")
}
if db.LastArchivedIndexRoot(ctx) != [32]byte{'A'} {
t.Error("Did not get wanted root")
}
if err := db.SaveLastArchivedIndex(ctx, 3); err != nil {
t.Fatal(err)
}
lastSaved, err = db.LastArchivedIndexState(ctx)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(lastSaved.InnerStateUnsafe(), st2.InnerStateUnsafe()) {
t.Error("Did not get wanted saved state")
}
if db.LastArchivedIndexRoot(ctx) != [32]byte{'B'} {
t.Error("Did not get wanted root")
}
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/sliceutil"
log "github.com/sirupsen/logrus"
@@ -320,9 +321,16 @@ func (k *Store) SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveHeadBlockRoot")
defer span.End()
return k.db.Update(func(tx *bolt.Tx) error {
if tx.Bucket(stateBucket).Get(blockRoot[:]) == nil {
return errors.New("no state found with head block root")
if featureconfig.Get().NewStateMgmt {
if tx.Bucket(stateSummaryBucket).Get(blockRoot[:]) == nil {
return errors.New("no state summary found with head block root")
}
} else {
if tx.Bucket(stateBucket).Get(blockRoot[:]) == nil {
return errors.New("no state found with head block root")
}
}
bucket := tx.Bucket(blocksBucket)
return bucket.Put(headBlockRootKey, blockRoot[:])
})

View File

@@ -6,6 +6,7 @@ import (
"github.com/boltdb/bolt"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/traceutil"
"go.opencensus.io/trace"
)
@@ -63,12 +64,18 @@ func (k *Store) SaveJustifiedCheckpoint(ctx context.Context, checkpoint *ethpb.C
}
return k.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(checkpointBucket)
// The corresponding state must exist or there is a risk that the beacondb enters a state
// where the justified beaconState is missing. This may be a fatal condition requiring
// a new sync from genesis.
if tx.Bucket(stateBucket).Get(checkpoint.Root) == nil {
traceutil.AnnotateError(span, errMissingStateForCheckpoint)
return errMissingStateForCheckpoint
if featureconfig.Get().NewStateMgmt {
if tx.Bucket(stateSummaryBucket).Get(checkpoint.Root) == nil {
return errors.New("missing state summary for finalized root")
}
} else {
// The corresponding state must exist or there is a risk that the beacondb enters a state
// where the justified beaconState is missing. This may be a fatal condition requiring
// a new sync from genesis.
if tx.Bucket(stateBucket).Get(checkpoint.Root) == nil {
traceutil.AnnotateError(span, errMissingStateForCheckpoint)
return errMissingStateForCheckpoint
}
}
return bucket.Put(justifiedCheckpointKey, enc)
})
@@ -85,12 +92,18 @@ func (k *Store) SaveFinalizedCheckpoint(ctx context.Context, checkpoint *ethpb.C
}
return k.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(checkpointBucket)
// The corresponding state must exist or there is a risk that the beacondb enters a state
// where the finalized beaconState is missing. This would be a fatal condition requiring
// a new sync from genesis.
if tx.Bucket(stateBucket).Get(checkpoint.Root) == nil {
traceutil.AnnotateError(span, errMissingStateForCheckpoint)
return errMissingStateForCheckpoint
if featureconfig.Get().NewStateMgmt {
if tx.Bucket(stateSummaryBucket).Get(checkpoint.Root) == nil {
return errors.New("missing state summary for finalized root")
}
} else {
// The corresponding state must exist or there is a risk that the beacondb enters a state
// where the finalized beaconState is missing. This would be a fatal condition requiring
// a new sync from genesis.
if tx.Bucket(stateBucket).Get(checkpoint.Root) == nil {
traceutil.AnnotateError(span, errMissingStateForCheckpoint)
return errMissingStateForCheckpoint
}
}
if err := bucket.Put(finalizedCheckpointKey, enc); err != nil {

View File

@@ -42,6 +42,7 @@ var (
justifiedCheckpointKey = []byte("justified-checkpoint")
finalizedCheckpointKey = []byte("finalized-checkpoint")
powchainDataKey = []byte("powchain-data")
lastArchivedIndexKey = []byte("last-archived")
// Migration bucket.
migrationBucket = []byte("migrations")

View File

@@ -10,6 +10,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"go.opencensus.io/trace"
)
@@ -184,9 +185,15 @@ func (k *Store) DeleteState(ctx context.Context, blockRoot [32]byte) error {
bkt = tx.Bucket(blocksBucket)
headBlkRoot := bkt.Get(headBlockRootKey)
// Safe guard against deleting genesis, finalized, head state.
if bytes.Equal(blockRoot[:], checkpoint.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], headBlkRoot) {
return errors.New("cannot delete genesis, finalized, or head state")
if featureconfig.Get().NewStateMgmt {
if tx.Bucket(stateSummaryBucket).Get(blockRoot[:]) == nil {
return errors.New("cannot delete state without state summary")
}
} else {
// Safe guard against deleting genesis, finalized, head state.
if bytes.Equal(blockRoot[:], checkpoint.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], headBlkRoot) {
return errors.New("cannot delete genesis, finalized, or head state")
}
}
bkt = tx.Bucket(stateBucket)
@@ -229,9 +236,15 @@ func (k *Store) DeleteStates(ctx context.Context, blockRoots [][32]byte) error {
for blockRoot, _ := c.First(); blockRoot != nil; blockRoot, _ = c.Next() {
if rootMap[bytesutil.ToBytes32(blockRoot)] {
// Safe guard against deleting genesis, finalized, or head state.
if bytes.Equal(blockRoot[:], checkpoint.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], headBlkRoot) {
return errors.New("could not delete genesis, finalized, or head state")
if featureconfig.Get().NewStateMgmt {
if tx.Bucket(stateSummaryBucket).Get(blockRoot[:]) == nil {
return errors.New("cannot delete state without state summary")
}
} else {
// Safe guard against deleting genesis, finalized, head state.
if bytes.Equal(blockRoot[:], checkpoint.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], headBlkRoot) {
return errors.New("cannot delete genesis, finalized, or head state")
}
}
if err := c.Delete(); err != nil {
return err

View File

@@ -21,8 +21,10 @@ go_library(
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/rpc:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/sync:go_default_library",
"//beacon-chain/sync/initial-sync:go_default_library",
"//beacon-chain/sync/initial-sync-old:go_default_library",
"//shared:go_default_library",
"//shared/cmd:go_default_library",
"//shared/debug:go_default_library",

View File

@@ -31,8 +31,10 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
"github.com/prysmaticlabs/prysm/beacon-chain/rpc"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
prysmsync "github.com/prysmaticlabs/prysm/beacon-chain/sync"
initialsync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync"
initialsyncold "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync-old"
"github.com/prysmaticlabs/prysm/shared"
"github.com/prysmaticlabs/prysm/shared/cmd"
"github.com/prysmaticlabs/prysm/shared/debug"
@@ -69,6 +71,7 @@ type BeaconNode struct {
blockFeed *event.Feed
opFeed *event.Feed
forkChoiceStore forkchoice.ForkChoicer
stateGen *stategen.State
}
// NewBeaconNode creates a new node instance, sets up configuration options, and registers
@@ -104,6 +107,8 @@ func NewBeaconNode(ctx *cli.Context) (*BeaconNode, error) {
return nil, err
}
beacon.startStateGen()
if err := beacon.registerP2P(ctx); err != nil {
return nil, err
}
@@ -112,7 +117,7 @@ func NewBeaconNode(ctx *cli.Context) (*BeaconNode, error) {
return nil, err
}
if err := beacon.registerAttestationPool(ctx); err != nil {
if err := beacon.registerAttestationPool(); err != nil {
return nil, err
}
@@ -258,6 +263,10 @@ func (b *BeaconNode) startDB(ctx *cli.Context) error {
return nil
}
func (b *BeaconNode) startStateGen() {
b.stateGen = stategen.New(b.db)
}
func (b *BeaconNode) registerP2P(ctx *cli.Context) error {
// Bootnode ENR may be a filepath to an ENR file.
bootnodeAddrs := strings.Split(ctx.GlobalString(cmd.BootstrapNode.Name), ",")
@@ -302,12 +311,27 @@ func (b *BeaconNode) fetchP2P(ctx *cli.Context) p2p.P2P {
return p
}
func (b *BeaconNode) registerAttestationPool() error {
s, err := attestations.NewService(context.Background(), &attestations.Config{
Pool: b.attestationPool,
})
if err != nil {
return errors.Wrap(err, "could not register atts pool service")
}
return b.services.RegisterService(s)
}
func (b *BeaconNode) registerBlockchainService(ctx *cli.Context) error {
var web3Service *powchain.Service
if err := b.services.FetchService(&web3Service); err != nil {
return err
}
var opsService *attestations.Service
if err := b.services.FetchService(&opsService); err != nil {
return err
}
maxRoutines := ctx.GlobalInt64(cmd.MaxGoroutines.Name)
blockchainService, err := blockchain.NewService(context.Background(), &blockchain.Config{
BeaconDB: b.db,
@@ -320,6 +344,8 @@ func (b *BeaconNode) registerBlockchainService(ctx *cli.Context) error {
MaxRoutines: maxRoutines,
StateNotifier: b,
ForkChoiceStore: b.forkChoiceStore,
OpsService: opsService,
StateGen: b.stateGen,
})
if err != nil {
return errors.Wrap(err, "could not register blockchain service")
@@ -327,16 +353,6 @@ func (b *BeaconNode) registerBlockchainService(ctx *cli.Context) error {
return b.services.RegisterService(blockchainService)
}
func (b *BeaconNode) registerAttestationPool(ctx *cli.Context) error {
attPoolService, err := attestations.NewService(context.Background(), &attestations.Config{
Pool: b.attestationPool,
})
if err != nil {
return err
}
return b.services.RegisterService(attPoolService)
}
func (b *BeaconNode) registerPOWChainService(cliCtx *cli.Context) error {
if cliCtx.GlobalBool(testSkipPowFlag) {
return b.services.RegisterService(&powchain.Service{})
@@ -389,9 +405,19 @@ func (b *BeaconNode) registerSyncService(ctx *cli.Context) error {
return err
}
var initSync *initialsync.Service
if err := b.services.FetchService(&initSync); err != nil {
return err
var initSync prysmsync.Checker
if cfg := featureconfig.Get(); cfg.EnableInitSyncQueue {
var initSyncTmp *initialsync.Service
if err := b.services.FetchService(&initSyncTmp); err != nil {
return err
}
initSync = initSyncTmp
} else {
var initSyncTmp *initialsyncold.Service
if err := b.services.FetchService(&initSyncTmp); err != nil {
return err
}
initSync = initSyncTmp
}
rs := prysmsync.NewRegularSync(&prysmsync.Config{
@@ -404,6 +430,7 @@ func (b *BeaconNode) registerSyncService(ctx *cli.Context) error {
AttestationNotifier: b,
AttPool: b.attestationPool,
ExitPool: b.exitPool,
SlashingPool: b.slashingsPool,
})
return b.services.RegisterService(rs)
@@ -415,16 +442,25 @@ func (b *BeaconNode) registerInitialSyncService(ctx *cli.Context) error {
return err
}
is := initialsync.NewInitialSync(&initialsync.Config{
if cfg := featureconfig.Get(); cfg.EnableInitSyncQueue {
is := initialsync.NewInitialSync(&initialsync.Config{
DB: b.db,
Chain: chainService,
P2P: b.fetchP2P(ctx),
StateNotifier: b,
BlockNotifier: b,
})
return b.services.RegisterService(is)
}
is := initialsyncold.NewInitialSync(&initialsyncold.Config{
DB: b.db,
Chain: chainService,
P2P: b.fetchP2P(ctx),
StateNotifier: b,
BlockNotifier: b,
})
return b.services.RegisterService(is)
}
func (b *BeaconNode) registerRPCService(ctx *cli.Context) error {
@@ -438,9 +474,19 @@ func (b *BeaconNode) registerRPCService(ctx *cli.Context) error {
return err
}
var syncService *initialsync.Service
if err := b.services.FetchService(&syncService); err != nil {
return err
var syncService prysmsync.Checker
if cfg := featureconfig.Get(); cfg.EnableInitSyncQueue {
var initSyncTmp *initialsync.Service
if err := b.services.FetchService(&initSyncTmp); err != nil {
return err
}
syncService = initSyncTmp
} else {
var initSyncTmp *initialsyncold.Service
if err := b.services.FetchService(&initSyncTmp); err != nil {
return err
}
syncService = initSyncTmp
}
genesisValidators := ctx.GlobalUint64(flags.InteropNumValidatorsFlag.Name)
@@ -496,6 +542,7 @@ func (b *BeaconNode) registerRPCService(ctx *cli.Context) error {
OperationNotifier: b,
SlasherCert: slasherCert,
SlasherProvider: slasherProvider,
StateGen: b.stateGen,
})
return b.services.RegisterService(rpcService)

View File

@@ -8,6 +8,7 @@ go_library(
"metrics.go",
"pool.go",
"prepare_forkchoice.go",
"prune_expired.go",
"service.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations",
@@ -18,7 +19,8 @@ go_library(
"//beacon-chain/state:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/params:go_default_library",
"@com_github_dgraph_io_ristretto//:go_default_library",
"//shared/roughtime:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
@@ -35,6 +37,7 @@ go_test(
"aggregate_test.go",
"pool_test.go",
"prepare_forkchoice_test.go",
"prune_expired_test.go",
"service_test.go",
],
embed = [":go_default_library"],
@@ -42,6 +45,8 @@ go_test(
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/operations/attestations/kv:go_default_library",
"//shared/bls:go_default_library",
"//shared/params:go_default_library",
"//shared/roughtime:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",

View File

@@ -13,8 +13,7 @@ go_library(
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//shared/params:go_default_library",
"@com_github_patrickmn_go_cache//:go_default_library",
"//beacon-chain/state:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
@@ -31,10 +30,7 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//shared/params:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
],
)

View File

@@ -1,13 +1,11 @@
package kv
import (
"time"
"github.com/patrickmn/go-cache"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
)
// SaveAggregatedAttestation saves an aggregated attestation in cache.
@@ -20,33 +18,21 @@ func (p *AttCaches) SaveAggregatedAttestation(att *ethpb.Attestation) error {
return errors.Wrap(err, "could not tree hash attestation")
}
d, expTime, ok := p.aggregatedAtt.GetWithExpiration(string(r[:]))
// If we have not seen the attestation data before, store in in cache with
// the default expiration time out.
copiedAtt := stateTrie.CopyAttestation(att)
p.aggregatedAttLock.Lock()
defer p.aggregatedAttLock.Unlock()
atts, ok := p.aggregatedAtt[r]
if !ok {
atts := []*ethpb.Attestation{att}
p.aggregatedAtt.Set(string(r[:]), atts, cache.DefaultExpiration)
atts := []*ethpb.Attestation{copiedAtt}
p.aggregatedAtt[r] = atts
return nil
}
atts, ok := d.([]*ethpb.Attestation)
if !ok {
return errors.New("cached value is not of type []*ethpb.Attestation")
}
atts, err = helpers.AggregateAttestations(append(atts, att))
atts, err = helpers.AggregateAttestations(append(atts, copiedAtt))
if err != nil {
return err
}
// Delete attestation if the current time has passed the expiration time.
if time.Now().Unix() >= expTime.Unix() {
p.aggregatedAtt.Delete(string(r[:]))
return nil
}
// Reset expiration time given how much time has passed.
expDuration := time.Duration(expTime.Unix() - time.Now().Unix())
p.aggregatedAtt.Set(string(r[:]), atts, expDuration*time.Second)
p.aggregatedAtt[r] = atts
return nil
}
@@ -63,17 +49,11 @@ func (p *AttCaches) SaveAggregatedAttestations(atts []*ethpb.Attestation) error
// AggregatedAttestations returns the aggregated attestations in cache.
func (p *AttCaches) AggregatedAttestations() []*ethpb.Attestation {
// Delete all expired aggregated attestations before returning them.
p.aggregatedAtt.DeleteExpired()
atts := make([]*ethpb.Attestation, 0)
atts := make([]*ethpb.Attestation, 0, p.aggregatedAtt.ItemCount())
for s, i := range p.aggregatedAtt.Items() {
// Type assertion for the worst case. This shouldn't happen.
a, ok := i.Object.([]*ethpb.Attestation)
if !ok {
p.aggregatedAtt.Delete(s)
continue
}
p.aggregatedAttLock.RLock()
defer p.aggregatedAttLock.RUnlock()
for _, a := range p.aggregatedAtt {
atts = append(atts, a...)
}
@@ -83,19 +63,11 @@ func (p *AttCaches) AggregatedAttestations() []*ethpb.Attestation {
// AggregatedAttestationsBySlotIndex returns the aggregated attestations in cache,
// filtered by committee index and slot.
func (p *AttCaches) AggregatedAttestationsBySlotIndex(slot uint64, committeeIndex uint64) []*ethpb.Attestation {
// Delete all expired aggregated attestations before returning them.
p.aggregatedAtt.DeleteExpired()
atts := make([]*ethpb.Attestation, 0, p.aggregatedAtt.ItemCount())
for s, i := range p.aggregatedAtt.Items() {
// Type assertion for the worst case. This shouldn't happen.
a, ok := i.Object.([]*ethpb.Attestation)
if !ok {
p.aggregatedAtt.Delete(s)
continue
}
atts := make([]*ethpb.Attestation, 0)
p.aggregatedAttLock.RLock()
defer p.aggregatedAttLock.RUnlock()
for _, a := range p.aggregatedAtt {
if slot == a[0].Data.Slot && committeeIndex == a[0].Data.CommitteeIndex {
atts = append(atts, a...)
}
@@ -113,31 +85,24 @@ func (p *AttCaches) DeleteAggregatedAttestation(att *ethpb.Attestation) error {
if err != nil {
return errors.Wrap(err, "could not tree hash attestation data")
}
a, expTime, ok := p.aggregatedAtt.GetWithExpiration(string(r[:]))
p.aggregatedAttLock.Lock()
defer p.aggregatedAttLock.Unlock()
attList, ok := p.aggregatedAtt[r]
if !ok {
return nil
}
atts, ok := a.([]*ethpb.Attestation)
if !ok {
return errors.New("cached value is not of type []*ethpb.Attestation")
}
filtered := make([]*ethpb.Attestation, 0)
for _, a := range atts {
for _, a := range attList {
if !att.AggregationBits.Contains(a.AggregationBits) {
filtered = append(filtered, a)
}
}
if len(filtered) == 0 {
p.aggregatedAtt.Delete(string(r[:]))
delete(p.aggregatedAtt, r)
} else {
// Delete attestation if the current time has passed the expiration time.
if time.Now().Unix() >= expTime.Unix() {
p.aggregatedAtt.Delete(string(r[:]))
return nil
}
// Reset expiration time given how much time has passed.
expDuration := time.Duration(expTime.Unix() - time.Now().Unix())
p.aggregatedAtt.Set(string(r[:]), filtered, expDuration*time.Second)
p.aggregatedAtt[r] = filtered
}
return nil
@@ -150,16 +115,20 @@ func (p *AttCaches) HasAggregatedAttestation(att *ethpb.Attestation) (bool, erro
return false, errors.Wrap(err, "could not tree hash attestation")
}
if atts, ok := p.aggregatedAtt.Get(string(r[:])); ok {
for _, a := range atts.([]*ethpb.Attestation) {
p.aggregatedAttLock.RLock()
defer p.aggregatedAttLock.RUnlock()
if atts, ok := p.aggregatedAtt[r]; ok {
for _, a := range atts {
if a.AggregationBits.Contains(att.AggregationBits) {
return true, nil
}
}
}
if atts, ok := p.blockAtt.Get(string(r[:])); ok {
for _, a := range atts.([]*ethpb.Attestation) {
p.blockAttLock.RLock()
defer p.blockAttLock.RUnlock()
if atts, ok := p.blockAtt[r]; ok {
for _, a := range atts {
if a.AggregationBits.Contains(att.AggregationBits) {
return true, nil
}
@@ -171,5 +140,7 @@ func (p *AttCaches) HasAggregatedAttestation(att *ethpb.Attestation) (bool, erro
// AggregatedAttestationCount returns the number of aggregated attestations key in the pool.
func (p *AttCaches) AggregatedAttestationCount() int {
return p.aggregatedAtt.ItemCount()
p.aggregatedAttLock.RLock()
defer p.aggregatedAttLock.RUnlock()
return len(p.aggregatedAtt)
}

View File

@@ -1,18 +1,13 @@
package kv
import (
"math"
"reflect"
"sort"
"strings"
"testing"
"time"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/shared/params"
)
func TestKV_Aggregated_NotAggregated(t *testing.T) {
@@ -51,64 +46,6 @@ func TestKV_Aggregated_CanSaveRetrieve(t *testing.T) {
}
}
func TestKV_Aggregated_SaveAndVerifyExpireTime(t *testing.T) {
cache := NewAttCaches()
d := &ethpb.AttestationData{Slot: 1}
att1 := &ethpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b11100}}
att2 := &ethpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b10110}}
att3 := &ethpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b11011}}
r, err := ssz.HashTreeRoot(d)
if err != nil {
t.Fatal(err)
}
if err := cache.SaveAggregatedAttestation(att1); err != nil {
t.Fatal(err)
}
a, expTime, ok := cache.aggregatedAtt.GetWithExpiration(string(r[:]))
if !ok {
t.Fatal("Did not save attestations")
}
if len(a.([]*ethpb.Attestation)) != 1 {
t.Fatal("Did not save attestations")
}
// Let time pass by one second to test expiration time.
time.Sleep(1 * time.Second)
// Save attestation 2 too the pool, the expiration time should not change.
if err := cache.SaveAggregatedAttestation(att2); err != nil {
t.Fatal(err)
}
newA, newExpTime, ok := cache.aggregatedAtt.GetWithExpiration(string(r[:]))
if !ok {
t.Fatal("Did not save attestations")
}
if len(newA.([]*ethpb.Attestation)) != 2 {
t.Fatal("Did not delete attestations")
}
if expTime.Unix() != newExpTime.Unix() {
t.Error("Expiration time should not change")
}
// Let time pass by another second to test expiration time.
time.Sleep(1 * time.Second)
// Save attestation 3 too the pool, the expiration time should not change.
if err := cache.SaveAggregatedAttestation(att3); err != nil {
t.Fatal(err)
}
newA, newExpTime, _ = cache.aggregatedAtt.GetWithExpiration(string(r[:]))
if len(newA.([]*ethpb.Attestation)) != 3 {
t.Fatal("Did not delete attestations")
}
if expTime.Unix() != newExpTime.Unix() {
t.Error("Expiration time should not change")
}
}
func TestKV_Aggregated_CanDelete(t *testing.T) {
cache := NewAttCaches()
@@ -138,90 +75,6 @@ func TestKV_Aggregated_CanDelete(t *testing.T) {
}
}
func TestKV_Aggregated_DeleteAndVerifyExpireTime(t *testing.T) {
cache := NewAttCaches()
d := &ethpb.AttestationData{Slot: 1}
att1 := &ethpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b11100}}
att2 := &ethpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b10110}}
att3 := &ethpb.Attestation{Data: d, AggregationBits: bitfield.Bitlist{0b11011}}
atts := []*ethpb.Attestation{att1, att2, att3}
for _, att := range atts {
if err := cache.SaveAggregatedAttestation(att); err != nil {
t.Fatal(err)
}
}
r, err := ssz.HashTreeRoot(d)
if err != nil {
t.Fatal(err)
}
a, expTime, ok := cache.aggregatedAtt.GetWithExpiration(string(r[:]))
if !ok {
t.Fatal("Did not save attestations")
}
if len(a.([]*ethpb.Attestation)) != 3 {
t.Fatal("Did not save attestations")
}
// Let time pass by one second to test expiration time.
time.Sleep(1 * time.Second)
// Delete attestation 1 from the pool, the expiration time should not change.
if err := cache.DeleteAggregatedAttestation(att1); err != nil {
t.Fatal(err)
}
newA, newExpTime, _ := cache.aggregatedAtt.GetWithExpiration(string(r[:]))
if len(newA.([]*ethpb.Attestation)) != 2 {
t.Fatal("Did not delete attestations")
}
if expTime.Unix() != newExpTime.Unix() {
t.Error("Expiration time should not change")
}
// Let time pass by another second to test expiration time.
time.Sleep(1 * time.Second)
// Delete attestation 1 from the pool, the expiration time should not change.
if err := cache.DeleteAggregatedAttestation(att2); err != nil {
t.Fatal(err)
}
newA, newExpTime, _ = cache.aggregatedAtt.GetWithExpiration(string(r[:]))
if len(newA.([]*ethpb.Attestation)) != 1 {
t.Fatal("Did not delete attestations")
}
if expTime.Unix() != newExpTime.Unix() {
t.Error("Expiration time should not change")
}
}
func TestKV_Aggregated_CheckExpTime(t *testing.T) {
cache := NewAttCaches()
att := &ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b111}}
r, _ := ssz.HashTreeRoot(att.Data)
if err := cache.SaveAggregatedAttestation(att); err != nil {
t.Fatal(err)
}
item, exp, exists := cache.aggregatedAtt.GetWithExpiration(string(r[:]))
if !exists {
t.Error("Saved att does not exist")
}
receivedAtt := item.([]*ethpb.Attestation)[0]
if !proto.Equal(att, receivedAtt) {
t.Error("Did not receive correct aggregated att")
}
wanted := float64(params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot)
if math.RoundToEven(exp.Sub(time.Now()).Seconds()) != wanted {
t.Errorf("Did not receive correct exp time. Wanted: %f, got: %f", wanted,
math.RoundToEven(exp.Sub(time.Now()).Seconds()))
}
}
func TestKV_HasAggregatedAttestation(t *testing.T) {
tests := []struct {
name string

View File

@@ -1,10 +1,10 @@
package kv
import (
"github.com/patrickmn/go-cache"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
)
// SaveBlockAttestation saves an block attestation in cache.
@@ -14,15 +14,11 @@ func (p *AttCaches) SaveBlockAttestation(att *ethpb.Attestation) error {
return errors.Wrap(err, "could not tree hash attestation")
}
var atts []*ethpb.Attestation
d, ok := p.blockAtt.Get(string(r[:]))
p.blockAttLock.Lock()
defer p.blockAttLock.Unlock()
atts, ok := p.blockAtt[r]
if !ok {
atts = make([]*ethpb.Attestation, 0)
} else {
atts, ok = d.([]*ethpb.Attestation)
if !ok {
return errors.New("cached value is not of type []*ethpb.Attestation")
}
}
// Ensure that this attestation is not already fully contained in an existing attestation.
@@ -31,11 +27,8 @@ func (p *AttCaches) SaveBlockAttestation(att *ethpb.Attestation) error {
return nil
}
}
atts = append(atts, att)
// DefaultExpiration is set to what was given to New(). In this case
// it's one epoch.
p.blockAtt.Set(string(r[:]), atts, cache.DefaultExpiration)
p.blockAtt[r] = append(atts, stateTrie.CopyAttestation(att))
return nil
}
@@ -53,14 +46,11 @@ func (p *AttCaches) SaveBlockAttestations(atts []*ethpb.Attestation) error {
// BlockAttestations returns the block attestations in cache.
func (p *AttCaches) BlockAttestations() []*ethpb.Attestation {
atts := make([]*ethpb.Attestation, 0, p.blockAtt.ItemCount())
for s, i := range p.blockAtt.Items() {
// Type assertion for the worst case. This shouldn't happen.
att, ok := i.Object.([]*ethpb.Attestation)
if !ok {
p.blockAtt.Delete(s)
continue
}
atts := make([]*ethpb.Attestation, 0)
p.blockAttLock.RLock()
defer p.blockAttLock.RUnlock()
for _, att := range p.blockAtt {
atts = append(atts, att...)
}
@@ -74,7 +64,9 @@ func (p *AttCaches) DeleteBlockAttestation(att *ethpb.Attestation) error {
return errors.Wrap(err, "could not tree hash attestation")
}
p.blockAtt.Delete(string(r[:]))
p.blockAttLock.Lock()
defer p.blockAttLock.Unlock()
delete(p.blockAtt, r)
return nil
}

View File

@@ -1,17 +1,12 @@
package kv
import (
"math"
"reflect"
"sort"
"testing"
"time"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/shared/params"
)
func TestKV_BlockAttestation_CanSaveRetrieve(t *testing.T) {
@@ -67,30 +62,3 @@ func TestKV_BlockAttestation_CanDelete(t *testing.T) {
t.Error("Did not receive correct aggregated atts")
}
}
func TestKV_BlockAttestation_CheckExpTime(t *testing.T) {
cache := NewAttCaches()
att := &ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b111}}
r, _ := ssz.HashTreeRoot(att.Data)
if err := cache.SaveBlockAttestation(att); err != nil {
t.Fatal(err)
}
item, exp, exists := cache.blockAtt.GetWithExpiration(string(r[:]))
if !exists {
t.Error("Saved att does not exist")
}
receivedAtt := item.([]*ethpb.Attestation)[0]
if !proto.Equal(att, receivedAtt) {
t.Error("Did not receive correct aggregated att")
}
wanted := float64(params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot)
if math.RoundToEven(exp.Sub(time.Now()).Seconds()) != wanted {
t.Errorf("Did not receive correct exp time. Wanted: %f, got: %f", wanted,
math.RoundToEven(exp.Sub(time.Now()).Seconds()))
}
}

View File

@@ -1,10 +1,10 @@
package kv
import (
"github.com/patrickmn/go-cache"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
)
// SaveForkchoiceAttestation saves an forkchoice attestation in cache.
@@ -14,9 +14,9 @@ func (p *AttCaches) SaveForkchoiceAttestation(att *ethpb.Attestation) error {
return errors.Wrap(err, "could not tree hash attestation")
}
// DefaultExpiration is set to what was given to New(). In this case
// it's one epoch.
p.forkchoiceAtt.Set(string(r[:]), att, cache.DefaultExpiration)
p.forkchoiceAttLock.Lock()
defer p.forkchoiceAttLock.Unlock()
p.forkchoiceAtt[r] = stateTrie.CopyAttestation(att) // Copied.
return nil
}
@@ -34,15 +34,12 @@ func (p *AttCaches) SaveForkchoiceAttestations(atts []*ethpb.Attestation) error
// ForkchoiceAttestations returns the forkchoice attestations in cache.
func (p *AttCaches) ForkchoiceAttestations() []*ethpb.Attestation {
atts := make([]*ethpb.Attestation, 0, p.forkchoiceAtt.ItemCount())
for s, i := range p.forkchoiceAtt.Items() {
// Type assertion for the worst case. This shouldn't happen.
att, ok := i.Object.(*ethpb.Attestation)
if !ok {
p.forkchoiceAtt.Delete(s)
continue
}
atts = append(atts, att)
atts := make([]*ethpb.Attestation, 0)
p.forkchoiceAttLock.RLock()
defer p.forkchoiceAttLock.RUnlock()
for _, att := range p.forkchoiceAtt {
atts = append(atts, stateTrie.CopyAttestation(att) /* Copied */)
}
return atts
@@ -55,7 +52,9 @@ func (p *AttCaches) DeleteForkchoiceAttestation(att *ethpb.Attestation) error {
return errors.Wrap(err, "could not tree hash attestation")
}
p.forkchoiceAtt.Delete(string(r[:]))
p.forkchoiceAttLock.Lock()
defer p.forkchoiceAttLock.Unlock()
delete(p.forkchoiceAtt, r)
return nil
}

View File

@@ -1,17 +1,12 @@
package kv
import (
"math"
"reflect"
"sort"
"testing"
"time"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/shared/params"
)
func TestKV_Forkchoice_CanSaveRetrieve(t *testing.T) {
@@ -67,30 +62,3 @@ func TestKV_Forkchoice_CanDelete(t *testing.T) {
t.Error("Did not receive correct aggregated atts")
}
}
func TestKV_Forkchoice_CheckExpTime(t *testing.T) {
cache := NewAttCaches()
att := &ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b111}}
r, _ := ssz.HashTreeRoot(att)
if err := cache.SaveForkchoiceAttestation(att); err != nil {
t.Fatal(err)
}
item, exp, exists := cache.forkchoiceAtt.GetWithExpiration(string(r[:]))
if !exists {
t.Error("Saved att does not exist")
}
receivedAtt := item.(*ethpb.Attestation)
if !proto.Equal(att, receivedAtt) {
t.Error("Did not receive correct aggregated att")
}
wanted := float64(params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot)
if math.RoundToEven(exp.Sub(time.Now()).Seconds()) != wanted {
t.Errorf("Did not receive correct exp time. Wanted: %f, got: %f", wanted,
math.RoundToEven(exp.Sub(time.Now()).Seconds()))
}
}

View File

@@ -1,34 +1,33 @@
package kv
import (
"time"
"sync"
"github.com/patrickmn/go-cache"
"github.com/prysmaticlabs/prysm/shared/params"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
)
// AttCaches defines the caches used to satisfy attestation pool interface.
// These caches are KV store for various attestations
// such are unaggregated, aggregated or attestations within a block.
type AttCaches struct {
aggregatedAtt *cache.Cache
unAggregatedAtt *cache.Cache
forkchoiceAtt *cache.Cache
blockAtt *cache.Cache
aggregatedAttLock sync.RWMutex
aggregatedAtt map[[32]byte][]*ethpb.Attestation
unAggregateAttLock sync.RWMutex
unAggregatedAtt map[[32]byte]*ethpb.Attestation
forkchoiceAttLock sync.RWMutex
forkchoiceAtt map[[32]byte]*ethpb.Attestation
blockAttLock sync.RWMutex
blockAtt map[[32]byte][]*ethpb.Attestation
}
// NewAttCaches initializes a new attestation pool consists of multiple KV store in cache for
// various kind of attestations.
func NewAttCaches() *AttCaches {
secsInEpoch := time.Duration(params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot)
// Create caches with default expiration time of one epoch and which
// purges expired items every epoch.
pool := &AttCaches{
unAggregatedAtt: cache.New(secsInEpoch*time.Second, secsInEpoch*time.Second),
aggregatedAtt: cache.New(secsInEpoch*time.Second, secsInEpoch*time.Second),
forkchoiceAtt: cache.New(secsInEpoch*time.Second, secsInEpoch*time.Second),
blockAtt: cache.New(secsInEpoch*time.Second, secsInEpoch*time.Second),
unAggregatedAtt: make(map[[32]byte]*ethpb.Attestation),
aggregatedAtt: make(map[[32]byte][]*ethpb.Attestation),
forkchoiceAtt: make(map[[32]byte]*ethpb.Attestation),
blockAtt: make(map[[32]byte][]*ethpb.Attestation),
}
return pool

View File

@@ -1,11 +1,11 @@
package kv
import (
"github.com/patrickmn/go-cache"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
)
// SaveUnaggregatedAttestation saves an unaggregated attestation in cache.
@@ -19,9 +19,9 @@ func (p *AttCaches) SaveUnaggregatedAttestation(att *ethpb.Attestation) error {
return errors.Wrap(err, "could not tree hash attestation")
}
// DefaultExpiration is set to what was given to New(). In this case
// it's one epoch.
p.unAggregatedAtt.Set(string(r[:]), att, cache.DefaultExpiration)
p.unAggregateAttLock.Lock()
defer p.unAggregateAttLock.Unlock()
p.unAggregatedAtt[r] = stateTrie.CopyAttestation(att) // Copied.
return nil
}
@@ -39,17 +39,12 @@ func (p *AttCaches) SaveUnaggregatedAttestations(atts []*ethpb.Attestation) erro
// UnaggregatedAttestations returns all the unaggregated attestations in cache.
func (p *AttCaches) UnaggregatedAttestations() []*ethpb.Attestation {
atts := make([]*ethpb.Attestation, 0, p.unAggregatedAtt.ItemCount())
for s, i := range p.unAggregatedAtt.Items() {
atts := make([]*ethpb.Attestation, 0)
// Type assertion for the worst case. This shouldn't happen.
att, ok := i.Object.(*ethpb.Attestation)
if !ok {
p.unAggregatedAtt.Delete(s)
continue
}
atts = append(atts, att)
p.unAggregateAttLock.RLock()
defer p.unAggregateAttLock.RUnlock()
for _, att := range p.unAggregatedAtt {
atts = append(atts, stateTrie.CopyAttestation(att) /* Copied */)
}
return atts
@@ -66,12 +61,16 @@ func (p *AttCaches) DeleteUnaggregatedAttestation(att *ethpb.Attestation) error
return errors.Wrap(err, "could not tree hash attestation")
}
p.unAggregatedAtt.Delete(string(r[:]))
p.unAggregateAttLock.Lock()
defer p.unAggregateAttLock.Unlock()
delete(p.unAggregatedAtt, r)
return nil
}
// UnaggregatedAttestationCount returns the number of unaggregated attestations key in the pool.
func (p *AttCaches) UnaggregatedAttestationCount() int {
return p.unAggregatedAtt.ItemCount()
p.unAggregateAttLock.RLock()
defer p.unAggregateAttLock.RUnlock()
return len(p.unAggregatedAtt)
}

View File

@@ -1,17 +1,12 @@
package kv
import (
"math"
"reflect"
"strings"
"testing"
"time"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/go-ssz"
"github.com/prysmaticlabs/prysm/shared/params"
)
func TestKV_Unaggregated_AlreadyAggregated(t *testing.T) {
@@ -55,30 +50,3 @@ func TestKV_Unaggregated_CanDelete(t *testing.T) {
t.Error("Did not receive correct aggregated atts")
}
}
func TestKV_Unaggregated_CheckExpTime(t *testing.T) {
cache := NewAttCaches()
att := &ethpb.Attestation{AggregationBits: bitfield.Bitlist{0b11}}
r, _ := ssz.HashTreeRoot(att)
if err := cache.SaveUnaggregatedAttestation(att); err != nil {
t.Fatal(err)
}
item, exp, exists := cache.unAggregatedAtt.GetWithExpiration(string(r[:]))
if !exists {
t.Error("Saved att does not exist")
}
receivedAtt := item.(*ethpb.Attestation)
if !proto.Equal(att, receivedAtt) {
t.Error("Did not receive correct unaggregated att")
}
wanted := float64(params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot)
if math.RoundToEven(exp.Sub(time.Now()).Seconds()) != wanted {
t.Errorf("Did not receive correct exp time. Wanted: %f, got: %f", wanted,
math.RoundToEven(exp.Sub(time.Now()).Seconds()))
}
}

View File

@@ -8,16 +8,28 @@ import (
var (
aggregatedAttsCount = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "aggregated_attestations_in_pool_count",
Name: "aggregated_attestations_in_pool_total",
Help: "The number of aggregated attestations in the pool.",
},
)
unaggregatedAttsCount = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "unaggregated_attestations_in_pool_count",
Name: "unaggregated_attestations_in_pool_total",
Help: "The number of unaggregated attestations in the pool.",
},
)
expiredAggregatedAtts = promauto.NewCounter(prometheus.CounterOpts{
Name: "expired_aggregated_atts_total",
Help: "The number of expired and deleted aggregated attestations in the pool.",
})
expiredUnaggregatedAtts = promauto.NewCounter(prometheus.CounterOpts{
Name: "expired_unaggregated_atts_total",
Help: "The number of expired and deleted unaggregated attestations in the pool.",
})
expiredBlockAtts = promauto.NewCounter(prometheus.CounterOpts{
Name: "expired_block_atts_total",
Help: "The number of expired and deleted block attestations in the pool.",
})
)
func (s *Service) updateMetrics() {

View File

@@ -1,6 +1,7 @@
package attestations
import (
"bytes"
"context"
"errors"
"time"
@@ -108,7 +109,7 @@ func (s *Service) seen(att *ethpb.Attestation) (bool, error) {
return false, err
}
incomingBits := att.AggregationBits
savedBits, ok := s.forkChoiceProcessedRoots.Get(string(attRoot[:]))
savedBits, ok := s.forkChoiceProcessedRoots.Get(attRoot)
if ok {
savedBitlist, ok := savedBits.(bitfield.Bitlist)
if !ok {
@@ -116,7 +117,7 @@ func (s *Service) seen(att *ethpb.Attestation) (bool, error) {
}
if savedBitlist.Len() == incomingBits.Len() {
// Returns true if the node has seen all the bits in the new bit field of the incoming attestation.
if savedBitlist.Contains(incomingBits) {
if bytes.Equal(savedBitlist, incomingBits) || savedBitlist.Contains(incomingBits) {
return true, nil
}
// Update the bit fields by Or'ing them with the new ones.
@@ -124,6 +125,6 @@ func (s *Service) seen(att *ethpb.Attestation) (bool, error) {
}
}
s.forkChoiceProcessedRoots.Set(string(attRoot[:]), incomingBits, 1 /*cost*/)
s.forkChoiceProcessedRoots.Add(attRoot, incomingBits)
return false, nil
}

View File

@@ -5,7 +5,6 @@ import (
"reflect"
"sort"
"testing"
"time"
"github.com/gogo/protobuf/proto"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
@@ -172,7 +171,7 @@ func TestBatchAttestations_Single(t *testing.T) {
t.Fatal(err)
}
wanted, err := helpers.AggregateAttestations(append(unaggregatedAtts, aggregatedAtts...))
wanted, err := helpers.AggregateAttestations(append(aggregatedAtts, unaggregatedAtts...))
if err != nil {
t.Fatal(err)
}
@@ -182,7 +181,8 @@ func TestBatchAttestations_Single(t *testing.T) {
t.Fatal(err)
}
if !reflect.DeepEqual(wanted, s.pool.ForkchoiceAttestations()) {
got := s.pool.ForkchoiceAttestations()
if !reflect.DeepEqual(wanted, got) {
t.Error("Did not aggregate and save for batch")
}
}
@@ -296,8 +296,6 @@ func TestSeenAttestations_PresentInCache(t *testing.T) {
t.Error("Wanted false, got true")
}
time.Sleep(100 * time.Millisecond)
att2 := &ethpb.Attestation{Data: &ethpb.AttestationData{}, Signature: []byte{'A'}, AggregationBits: bitfield.Bitlist{0x17} /* 0b00010111 */}
got, err = s.seen(att2)
if err != nil {
@@ -307,8 +305,6 @@ func TestSeenAttestations_PresentInCache(t *testing.T) {
t.Error("Wanted false, got true")
}
time.Sleep(100 * time.Millisecond)
att3 := &ethpb.Attestation{Data: &ethpb.AttestationData{}, Signature: []byte{'A'}, AggregationBits: bitfield.Bitlist{0x17} /* 0b00010111 */}
got, err = s.seen(att3)
if err != nil {
@@ -382,6 +378,5 @@ func TestService_seen(t *testing.T) {
if got != tt.want {
t.Errorf("Test %d failed. Got=%v want=%v", i, got, tt.want)
}
time.Sleep(10) // Sleep briefly for cache to routine to buffer.
}
}

View File

@@ -0,0 +1,71 @@
package attestations
import (
"time"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/roughtime"
)
// Prune expired attestations from the pool every slot interval.
var pruneExpiredAttsPeriod = time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
// This prunes attestations pool by running pruneExpiredAtts
// at every pruneExpiredAttsPeriod.
func (s *Service) pruneAttsPool() {
ticker := time.NewTicker(pruneExpiredAttsPeriod)
for {
select {
case <-ticker.C:
s.pruneExpiredAtts()
case <-s.ctx.Done():
log.Debug("Context closed, exiting routine")
return
}
}
}
// This prunes expired attestations from the pool.
func (s *Service) pruneExpiredAtts() {
aggregatedAtts := s.pool.AggregatedAttestations()
for _, att := range aggregatedAtts {
if s.expired(att.Data.Slot) {
if err := s.pool.DeleteAggregatedAttestation(att); err != nil {
log.WithError(err).Error("Could not delete expired aggregated attestation")
}
expiredAggregatedAtts.Inc()
}
}
unAggregatedAtts := s.pool.UnaggregatedAttestations()
for _, att := range unAggregatedAtts {
if s.expired(att.Data.Slot) {
if err := s.pool.DeleteUnaggregatedAttestation(att); err != nil {
log.WithError(err).Error("Could not delete expired unaggregated attestation")
}
expiredUnaggregatedAtts.Inc()
}
}
blockAtts := s.pool.BlockAttestations()
for _, att := range blockAtts {
if s.expired(att.Data.Slot) {
if err := s.pool.DeleteBlockAttestation(att); err != nil {
log.WithError(err).Error("Could not delete expired block attestation")
}
}
expiredBlockAtts.Inc()
}
}
// Return true if the input slot has been expired.
// Expired is defined as one epoch behind than current time.
func (s *Service) expired(slot uint64) bool {
expirationSlot := slot + params.BeaconConfig().SlotsPerEpoch
expirationTime := s.genesisTime + expirationSlot*params.BeaconConfig().SecondsPerSlot
currentTime := uint64(roughtime.Now().Unix())
if currentTime >= expirationTime {
return true
}
return false
}

View File

@@ -0,0 +1,62 @@
package attestations
import (
"context"
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/roughtime"
)
func TestPruneExpiredAtts_CanPrune(t *testing.T) {
s, err := NewService(context.Background(), &Config{Pool: NewPool()})
if err != nil {
t.Fatal(err)
}
att1 := &ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 0}, AggregationBits: bitfield.Bitlist{0b1101}}
att2 := &ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 0}, AggregationBits: bitfield.Bitlist{0b1111}}
att3 := &ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1101}}
att4 := &ethpb.Attestation{Data: &ethpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b1110}}
atts := []*ethpb.Attestation{att1, att2, att3, att4}
if err := s.pool.SaveAggregatedAttestations(atts); err != nil {
t.Fatal(err)
}
if err := s.pool.SaveBlockAttestations(atts); err != nil {
t.Fatal(err)
}
// Rewind back one epoch worth of time.
s.genesisTime = uint64(roughtime.Now().Unix()) - params.BeaconConfig().SlotsPerEpoch*params.BeaconConfig().SecondsPerSlot
s.pruneExpiredAtts()
// All the attestations on slot 0 should be pruned.
for _, attestation := range s.pool.AggregatedAttestations() {
if attestation.Data.Slot == 0 {
t.Error("Should be pruned")
}
}
for _, attestation := range s.pool.BlockAttestations() {
if attestation.Data.Slot == 0 {
t.Error("Should be pruned")
}
}
}
func TestExpired_AttsCanExpire(t *testing.T) {
s, err := NewService(context.Background(), &Config{Pool: NewPool()})
if err != nil {
t.Fatal(err)
}
// Rewind back one epoch worth of time.
s.genesisTime = uint64(roughtime.Now().Unix()) - params.BeaconConfig().SlotsPerEpoch*params.BeaconConfig().SecondsPerSlot
if !s.expired(0) {
t.Error("Should expired")
}
if s.expired(1) {
t.Error("Should not expired")
}
}

View File

@@ -3,10 +3,10 @@ package attestations
import (
"context"
"github.com/dgraph-io/ristretto"
lru "github.com/hashicorp/golang-lru"
)
var forkChoiceProcessedRootsSize = int64(1 << 16)
var forkChoiceProcessedRootsSize = 1 << 16
// Service of attestation pool operations.
type Service struct {
@@ -14,7 +14,8 @@ type Service struct {
cancel context.CancelFunc
pool Pool
err error
forkChoiceProcessedRoots *ristretto.Cache
forkChoiceProcessedRoots *lru.Cache
genesisTime uint64
}
// Config options for the service.
@@ -25,11 +26,7 @@ type Config struct {
// NewService instantiates a new attestation pool service instance that will
// be registered into a running beacon node.
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
cache, err := ristretto.NewCache(&ristretto.Config{
NumCounters: forkChoiceProcessedRootsSize,
MaxCost: forkChoiceProcessedRootsSize,
BufferItems: 64,
})
cache, err := lru.New(forkChoiceProcessedRootsSize)
if err != nil {
return nil, err
}
@@ -47,6 +44,7 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
func (s *Service) Start() {
go s.prepareForkChoiceAtts()
go s.aggregateRoutine()
go s.pruneAttsPool()
}
// Stop the beacon block attestation pool service's main event loop
@@ -63,3 +61,8 @@ func (s *Service) Status() error {
}
return nil
}
// SetGenesisTime sets genesis time for operation service to use.
func (s *Service) SetGenesisTime(t uint64) {
s.genesisTime = t
}

View File

@@ -22,6 +22,7 @@ go_library(
"//beacon-chain/rpc/beacon:go_default_library",
"//beacon-chain/rpc/node:go_default_library",
"//beacon-chain/rpc/validator:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/sync:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//proto/beacon/rpc/v1:go_default_library",

View File

@@ -34,6 +34,7 @@ go_library(
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/attestationutil:go_default_library",
"//shared/bytesutil:go_default_library",

View File

@@ -277,7 +277,14 @@ func (bs *Server) StreamIndexedAttestations(
err,
)
}
epoch := helpers.SlotToEpoch(bs.HeadFetcher.HeadSlot())
if len(aggAtts) == 0 {
continue
}
// All attestations we receive have the same target epoch given they
// have the same data root, so we just use the target epoch from
// the first one to determine committees for converting into indexed
// form.
epoch := aggAtts[0].Data.Target.Epoch
committeesBySlot, _, err := bs.retrieveCommitteesForEpoch(stream.Context(), epoch)
if err != nil {
return status.Errorf(

View File

@@ -22,7 +22,7 @@ func (bs *Server) ListBeaconCommittees(
var requestingGenesis bool
var startSlot uint64
headSlot := bs.HeadFetcher.HeadSlot()
headSlot := bs.GenesisTimeFetcher.CurrentSlot()
switch q := req.QueryFilter.(type) {
case *ethpb.ListCommitteesRequest_Epoch:
startSlot = helpers.StartSlot(q.Epoch)
@@ -58,8 +58,8 @@ func (bs *Server) retrieveCommitteesForEpoch(
var activeIndices []uint64
var err error
startSlot := helpers.StartSlot(epoch)
headEpoch := helpers.SlotToEpoch(bs.HeadFetcher.HeadSlot())
if helpers.SlotToEpoch(startSlot)+1 < headEpoch {
currentEpoch := helpers.SlotToEpoch(bs.GenesisTimeFetcher.CurrentSlot())
if helpers.SlotToEpoch(startSlot)+1 < currentEpoch {
activeIndices, err = bs.HeadFetcher.HeadValidatorsIndices(helpers.SlotToEpoch(startSlot))
if err != nil {
return nil, nil, status.Errorf(
@@ -86,7 +86,7 @@ func (bs *Server) retrieveCommitteesForEpoch(
)
}
attesterSeed = bytesutil.ToBytes32(archivedCommitteeInfo.AttesterSeed)
} else if helpers.SlotToEpoch(startSlot)+1 == headEpoch || helpers.SlotToEpoch(startSlot) == headEpoch {
} else if helpers.SlotToEpoch(startSlot)+1 == currentEpoch || helpers.SlotToEpoch(startSlot) == currentEpoch {
// Otherwise, we use current beacon state to calculate the committees.
requestedEpoch := helpers.SlotToEpoch(startSlot)
activeIndices, err = bs.HeadFetcher.HeadValidatorsIndices(requestedEpoch)
@@ -112,7 +112,7 @@ func (bs *Server) retrieveCommitteesForEpoch(
return nil, nil, status.Errorf(
codes.InvalidArgument,
"Cannot retrieve information about an epoch in the future, current epoch %d, requesting %d",
headEpoch,
currentEpoch,
helpers.SlotToEpoch(startSlot),
)
}

View File

@@ -34,10 +34,12 @@ func TestServer_ListBeaconCommittees_CurrentEpoch(t *testing.T) {
t.Fatal(err)
}
m := &mock.ChainService{
State: headState,
}
bs := &Server{
HeadFetcher: &mock.ChainService{
State: headState,
},
HeadFetcher: m,
GenesisTimeFetcher: m,
}
activeIndices, err := helpers.ActiveValidatorIndices(headState, 0)
@@ -84,10 +86,12 @@ func TestServer_ListBeaconCommittees_PreviousEpoch(t *testing.T) {
headState.SetRandaoMixes(mixes)
headState.SetSlot(params.BeaconConfig().SlotsPerEpoch * 2)
m := &mock.ChainService{
State: headState,
}
bs := &Server{
HeadFetcher: &mock.ChainService{
State: headState,
},
HeadFetcher: m,
GenesisTimeFetcher: m,
}
activeIndices, err := helpers.ActiveValidatorIndices(headState, 1)
@@ -183,11 +187,13 @@ func TestServer_ListBeaconCommittees_FromArchive(t *testing.T) {
t.Fatal(err)
}
m := &mock.ChainService{
State: headState,
}
bs := &Server{
BeaconDB: db,
HeadFetcher: &mock.ChainService{
State: headState,
},
BeaconDB: db,
HeadFetcher: m,
GenesisTimeFetcher: m,
}
activeIndices, err := helpers.ActiveValidatorIndices(headState, 0)

View File

@@ -15,6 +15,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
)
@@ -41,4 +42,5 @@ type Server struct {
ChainStartChan chan time.Time
ReceivedAttestationsBuffer chan *ethpb.Attestation
CollectedAttestationsBuffer chan []*ethpb.Attestation
StateGen *stategen.State
}

View File

@@ -337,8 +337,9 @@ func (bs *Server) GetValidatorActiveSetChanges(
}
activatedIndices := make([]uint64, 0)
slashedIndices := make([]uint64, 0)
exitedIndices := make([]uint64, 0)
slashedIndices := make([]uint64, 0)
ejectedIndices := make([]uint64, 0)
if requestingGenesis || requestedEpoch < currentEpoch {
archivedChanges, err := bs.BeaconDB.ArchivedActiveValidatorChanges(ctx, requestedEpoch)
if err != nil {
@@ -352,8 +353,9 @@ func (bs *Server) GetValidatorActiveSetChanges(
)
}
activatedIndices = archivedChanges.Activated
slashedIndices = archivedChanges.Slashed
exitedIndices = archivedChanges.Exited
slashedIndices = archivedChanges.Slashed
ejectedIndices = archivedChanges.Ejected
} else if requestedEpoch == currentEpoch {
activeValidatorCount, err := helpers.ActiveValidatorCount(headState, helpers.PrevEpoch(headState))
if err != nil {
@@ -361,11 +363,15 @@ func (bs *Server) GetValidatorActiveSetChanges(
}
vals := headState.Validators()
activatedIndices = validators.ActivatedValidatorIndices(helpers.PrevEpoch(headState), vals)
slashedIndices = validators.SlashedValidatorIndices(helpers.PrevEpoch(headState), vals)
exitedIndices, err = validators.ExitedValidatorIndices(helpers.PrevEpoch(headState), vals, activeValidatorCount)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not determine exited validator indices: %v", err)
}
slashedIndices = validators.SlashedValidatorIndices(helpers.PrevEpoch(headState), vals)
ejectedIndices, err = validators.EjectedValidatorIndices(helpers.PrevEpoch(headState), vals, activeValidatorCount)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not determine ejected validator indices: %v", err)
}
} else {
// We are requesting data from the future and we return an error.
return nil, status.Errorf(
@@ -378,25 +384,35 @@ func (bs *Server) GetValidatorActiveSetChanges(
// We retrieve the public keys for the indices.
activatedKeys := make([][]byte, len(activatedIndices))
slashedKeys := make([][]byte, len(slashedIndices))
exitedKeys := make([][]byte, len(exitedIndices))
slashedKeys := make([][]byte, len(slashedIndices))
ejectedKeys := make([][]byte, len(ejectedIndices))
for i, idx := range activatedIndices {
pubkey := headState.PubkeyAtIndex(idx)
activatedKeys[i] = pubkey[:]
}
for i, idx := range slashedIndices {
pubkey := headState.PubkeyAtIndex(idx)
slashedKeys[i] = pubkey[:]
}
for i, idx := range exitedIndices {
pubkey := headState.PubkeyAtIndex(idx)
exitedKeys[i] = pubkey[:]
}
for i, idx := range slashedIndices {
pubkey := headState.PubkeyAtIndex(idx)
slashedKeys[i] = pubkey[:]
}
for i, idx := range ejectedIndices {
pubkey := headState.PubkeyAtIndex(idx)
ejectedKeys[i] = pubkey[:]
}
return &ethpb.ActiveSetChanges{
Epoch: requestedEpoch,
ActivatedPublicKeys: activatedKeys,
ActivatedIndices: activatedIndices,
ExitedPublicKeys: exitedKeys,
ExitedIndices: exitedIndices,
SlashedPublicKeys: slashedKeys,
SlashedIndices: slashedIndices,
EjectedPublicKeys: ejectedKeys,
EjectedIndices: ejectedIndices,
}, nil
}

View File

@@ -1106,7 +1106,7 @@ func TestServer_GetValidatorActiveSetChanges_CannotRequestFutureEpoch(t *testing
func TestServer_GetValidatorActiveSetChanges(t *testing.T) {
ctx := context.Background()
validators := make([]*ethpb.Validator, 6)
validators := make([]*ethpb.Validator, 8)
headState, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{
Slot: 0,
Validators: validators,
@@ -1119,6 +1119,7 @@ func TestServer_GetValidatorActiveSetChanges(t *testing.T) {
withdrawableEpoch := params.BeaconConfig().FarFutureEpoch
exitEpoch := params.BeaconConfig().FarFutureEpoch
slashed := false
balance := params.BeaconConfig().MaxEffectiveBalance
// Mark indices divisible by two as activated.
if i%2 == 0 {
activationEpoch = helpers.ActivationExitEpoch(0)
@@ -1130,10 +1131,16 @@ func TestServer_GetValidatorActiveSetChanges(t *testing.T) {
// Mark indices divisible by 5 as exited.
exitEpoch = 0
withdrawableEpoch = params.BeaconConfig().MinValidatorWithdrawabilityDelay
} else if i%7 == 0 {
// Mark indices divisible by 7 as ejected.
exitEpoch = 0
withdrawableEpoch = params.BeaconConfig().MinValidatorWithdrawabilityDelay
balance = params.BeaconConfig().EjectionBalance
}
if err := headState.UpdateValidatorAtIndex(uint64(i), &ethpb.Validator{
ActivationEpoch: activationEpoch,
PublicKey: pubKey(uint64(i)),
EffectiveBalance: balance,
WithdrawalCredentials: make([]byte, 32),
WithdrawableEpoch: withdrawableEpoch,
Slashed: slashed,
@@ -1158,21 +1165,34 @@ func TestServer_GetValidatorActiveSetChanges(t *testing.T) {
pubKey(0),
pubKey(2),
pubKey(4),
pubKey(6),
}
wantedSlashed := [][]byte{
pubKey(3),
}
wantedActiveIndices := []uint64{0, 2, 4, 6}
wantedExited := [][]byte{
pubKey(5),
}
wantedExitedIndices := []uint64{5}
wantedSlashed := [][]byte{
pubKey(3),
}
wantedSlashedIndices := []uint64{3}
wantedEjected := [][]byte{
pubKey(7),
}
wantedEjectedIndices := []uint64{7}
wanted := &ethpb.ActiveSetChanges{
Epoch: 0,
ActivatedPublicKeys: wantedActive,
ActivatedIndices: wantedActiveIndices,
ExitedPublicKeys: wantedExited,
ExitedIndices: wantedExitedIndices,
SlashedPublicKeys: wantedSlashed,
SlashedIndices: wantedSlashedIndices,
EjectedPublicKeys: wantedEjected,
EjectedIndices: wantedEjectedIndices,
}
if !proto.Equal(wanted, res) {
t.Errorf("Wanted %v, received %v", wanted, res)
t.Errorf("Wanted \n%v, received \n%v", wanted, res)
}
}
@@ -1180,7 +1200,7 @@ func TestServer_GetValidatorActiveSetChanges_FromArchive(t *testing.T) {
db := dbTest.SetupDB(t)
defer dbTest.TeardownDB(t, db)
ctx := context.Background()
validators := make([]*ethpb.Validator, 6)
validators := make([]*ethpb.Validator, 8)
headState, err := stateTrie.InitializeFromProto(&pbp2p.BeaconState{
Slot: helpers.StartSlot(100),
Validators: validators,
@@ -1189,8 +1209,9 @@ func TestServer_GetValidatorActiveSetChanges_FromArchive(t *testing.T) {
t.Fatal(err)
}
activatedIndices := make([]uint64, 0)
slashedIndices := make([]uint64, 0)
exitedIndices := make([]uint64, 0)
slashedIndices := make([]uint64, 0)
ejectedIndices := make([]uint64, 0)
for i := 0; i < len(validators); i++ {
// Mark indices divisible by two as activated.
if i%2 == 0 {
@@ -1201,6 +1222,9 @@ func TestServer_GetValidatorActiveSetChanges_FromArchive(t *testing.T) {
} else if i%5 == 0 {
// Mark indices divisible by 5 as exited.
exitedIndices = append(exitedIndices, uint64(i))
} else if i%7 == 0 {
// Mark indices divisible by 7 as ejected.
ejectedIndices = append(ejectedIndices, uint64(i))
}
key := make([]byte, 48)
copy(key, strconv.Itoa(i))
@@ -1214,6 +1238,7 @@ func TestServer_GetValidatorActiveSetChanges_FromArchive(t *testing.T) {
Activated: activatedIndices,
Exited: exitedIndices,
Slashed: slashedIndices,
Ejected: ejectedIndices,
}
// We store the changes during the genesis epoch.
if err := db.SaveArchivedActiveValidatorChanges(ctx, 0, archivedChanges); err != nil {
@@ -1235,7 +1260,7 @@ func TestServer_GetValidatorActiveSetChanges_FromArchive(t *testing.T) {
if err != nil {
t.Fatal(err)
}
wantedKeys := make([][]byte, 6)
wantedKeys := make([][]byte, 8)
for i := 0; i < len(wantedKeys); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
@@ -1245,21 +1270,34 @@ func TestServer_GetValidatorActiveSetChanges_FromArchive(t *testing.T) {
wantedKeys[0],
wantedKeys[2],
wantedKeys[4],
wantedKeys[6],
}
wantedSlashed := [][]byte{
wantedKeys[3],
}
wantedActiveIndices := []uint64{0, 2, 4, 6}
wantedExited := [][]byte{
wantedKeys[5],
}
wantedExitedIndices := []uint64{5}
wantedSlashed := [][]byte{
wantedKeys[3],
}
wantedSlashedIndices := []uint64{3}
wantedEjected := [][]byte{
wantedKeys[7],
}
wantedEjectedIndices := []uint64{7}
wanted := &ethpb.ActiveSetChanges{
Epoch: 0,
ActivatedPublicKeys: wantedActive,
ActivatedIndices: wantedActiveIndices,
ExitedPublicKeys: wantedExited,
ExitedIndices: wantedExitedIndices,
SlashedPublicKeys: wantedSlashed,
SlashedIndices: wantedSlashedIndices,
EjectedPublicKeys: wantedEjected,
EjectedIndices: wantedEjectedIndices,
}
if !proto.Equal(wanted, res) {
t.Errorf("Wanted %v, received %v", wanted, res)
t.Errorf("Wanted \n%v, received \n%v", wanted, res)
}
res, err = bs.GetValidatorActiveSetChanges(ctx, &ethpb.GetValidatorActiveSetChangesRequest{
QueryFilter: &ethpb.GetValidatorActiveSetChangesRequest_Epoch{Epoch: 5},
@@ -1269,7 +1307,7 @@ func TestServer_GetValidatorActiveSetChanges_FromArchive(t *testing.T) {
}
wanted.Epoch = 5
if !proto.Equal(wanted, res) {
t.Errorf("Wanted %v, received %v", wanted, res)
t.Errorf("Wanted \n%v, received \n%v", wanted, res)
}
}

View File

@@ -29,6 +29,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/rpc/beacon"
"github.com/prysmaticlabs/prysm/beacon-chain/rpc/node"
"github.com/prysmaticlabs/prysm/beacon-chain/rpc/validator"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/beacon-chain/sync"
pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
pb "github.com/prysmaticlabs/prysm/proto/beacon/rpc/v1"
@@ -90,6 +91,7 @@ type Service struct {
slasherCert string
slasherCredentialError error
slasherClient slashpb.SlasherClient
stateGen *stategen.State
}
// Config options for the beacon node RPC server.
@@ -122,6 +124,7 @@ type Config struct {
StateNotifier statefeed.Notifier
BlockNotifier blockfeed.Notifier
OperationNotifier opfeed.Notifier
StateGen *stategen.State
}
// NewService instantiates a new RPC service instance that will
@@ -161,6 +164,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
operationNotifier: cfg.OperationNotifier,
slasherProvider: cfg.SlasherProvider,
slasherCert: cfg.SlasherCert,
stateGen: cfg.StateGen,
}
}
@@ -233,6 +237,7 @@ func (s *Service) Start() {
PendingDepositsFetcher: s.pendingDepositFetcher,
GenesisTime: genesisTime,
SlashingsPool: s.slashingsPool,
StateGen: s.stateGen,
}
nodeServer := &node.Server{
BeaconDB: s.beaconDB,

View File

@@ -32,12 +32,14 @@ go_library(
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/powchain:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/sync:go_default_library",
"//proto/beacon/db:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//proto/beacon/rpc/v1:go_default_library",
"//shared/bls:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/params:go_default_library",
"//shared/roughtime:go_default_library",

View File

@@ -4,8 +4,10 @@ import (
"context"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
@@ -33,12 +35,18 @@ func (vs *Server) GetDuties(ctx context.Context, req *ethpb.DutiesRequest) (*eth
return nil, status.Errorf(codes.Internal, "Could not process slots up to %d: %v", epochStartSlot, err)
}
}
committeeAssignments, proposerIndexToSlot, err := helpers.CommitteeAssignments(s, req.Epoch)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not compute committee assignments: %v", err)
}
// Query the next epoch assignments for committee subnet subscriptions.
nextCommitteeAssignments, _, err := helpers.CommitteeAssignments(s, req.Epoch+1)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not compute committee assignments: %v", err)
}
var committeeIDs []uint64
var nextCommitteeIDs []uint64
var validatorAssignments []*ethpb.DutiesResponse_Duty
for _, pubKey := range req.PublicKeys {
if ctx.Err() != nil {
@@ -63,12 +71,25 @@ func (vs *Server) GetDuties(ctx context.Context, req *ethpb.DutiesRequest) (*eth
assignment.AttesterSlot = ca.AttesterSlot
assignment.ProposerSlot = proposerIndexToSlot[idx]
assignment.CommitteeIndex = ca.CommitteeIndex
committeeIDs = append(committeeIDs, ca.CommitteeIndex)
}
// Save the next epoch assignments.
ca, ok = nextCommitteeAssignments[idx]
if ok {
nextCommitteeIDs = append(nextCommitteeIDs, ca.CommitteeIndex)
}
} else {
vs := vs.validatorStatus(ctx, pubKey, s)
assignment.Status = vs.Status
}
validatorAssignments = append(validatorAssignments, assignment)
}
if featureconfig.Get().EnableDynamicCommitteeSubnets {
cache.CommitteeIDs.AddIDs(committeeIDs, req.Epoch)
cache.CommitteeIDs.AddIDs(nextCommitteeIDs, req.Epoch+1)
}
return &ethpb.DutiesResponse{

View File

@@ -15,6 +15,7 @@ import (
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/roughtime"
"github.com/prysmaticlabs/prysm/shared/slotutil"
@@ -33,6 +34,12 @@ func (vs *Server) GetAttestationData(ctx context.Context, req *ethpb.Attestation
trace.Int64Attribute("committeeIndex", int64(req.CommitteeIndex)),
)
// If attestation committee subnets are enabled, we track the committee
// index into a cache.
if featureconfig.Get().EnableDynamicCommitteeSubnets {
cache.CommitteeIDs.AddIDs([]uint64{req.CommitteeIndex}, helpers.SlotToEpoch(req.Slot))
}
if vs.SyncChecker.Syncing() {
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
}
@@ -123,6 +130,12 @@ func (vs *Server) ProposeAttestation(ctx context.Context, att *ethpb.Attestation
return nil, status.Error(codes.InvalidArgument, "Incorrect attestation signature")
}
// If attestation committee subnets are enabled, we track the committee
// index into a cache.
if featureconfig.Get().EnableDynamicCommitteeSubnets {
cache.CommitteeIDs.AddIDs([]uint64{att.Data.CommitteeIndex}, helpers.SlotToEpoch(att.Data.Slot))
}
root, err := ssz.HashTreeRoot(att.Data)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not tree hash attestation: %v", err)

View File

@@ -18,6 +18,7 @@ import (
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/trieutil"
@@ -211,9 +212,18 @@ func (vs *Server) randomETH1DataVote(ctx context.Context) (*ethpb.Eth1Data, erro
// computeStateRoot computes the state root after a block has been processed through a state transition and
// returns it to the validator client.
func (vs *Server) computeStateRoot(ctx context.Context, block *ethpb.SignedBeaconBlock) ([]byte, error) {
beaconState, err := vs.BeaconDB.State(ctx, bytesutil.ToBytes32(block.Block.ParentRoot))
if err != nil {
return nil, errors.Wrap(err, "could not retrieve beacon state")
var beaconState *stateTrie.BeaconState
var err error
if featureconfig.Get().NewStateMgmt {
beaconState, err = vs.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(block.Block.ParentRoot))
if err != nil {
return nil, errors.Wrap(err, "could not retrieve beacon state")
}
} else {
beaconState, err = vs.BeaconDB.State(ctx, bytesutil.ToBytes32(block.Block.ParentRoot))
if err != nil {
return nil, errors.Wrap(err, "could not retrieve beacon state")
}
}
root, err := state.CalculateStateRoot(

View File

@@ -4,6 +4,7 @@ import (
"context"
"time"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
ptypes "github.com/gogo/protobuf/types"
@@ -67,6 +68,7 @@ type Server struct {
PendingDepositsFetcher depositcache.PendingDepositsFetcher
OperationNotifier opfeed.Notifier
GenesisTime time.Time
StateGen *stategen.State
}
// WaitForActivation checks if a validator public key exists in the active validator registry of the current

View File

@@ -4,8 +4,10 @@ go_library(
name = "go_default_library",
srcs = [
"cloners.go",
"field_trie.go",
"getters.go",
"setters.go",
"state_trie.go",
"types.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state",
@@ -20,9 +22,11 @@ go_library(
"//beacon-chain/state/stateutil:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/memorypool:go_default_library",
"//shared/params:go_default_library",
"//shared/sliceutil:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_protolambda_zssz//merkle:go_default_library",
@@ -34,6 +38,7 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"field_trie_test.go",
"getters_test.go",
"references_test.go",
"types_test.go",
@@ -45,6 +50,7 @@ go_test(
"//shared/bytesutil:go_default_library",
"//shared/interop:go_default_library",
"//shared/params:go_default_library",
"//shared/testutil:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",

View File

@@ -0,0 +1,289 @@
package state
import (
"reflect"
"sync"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/memorypool"
)
// FieldTrie is the representation of the representative
// trie of the particular field.
type FieldTrie struct {
*sync.Mutex
*reference
fieldLayers [][]*[32]byte
field fieldIndex
}
// NewFieldTrie is the constructor for the field trie data structure. It creates the corresponding
// trie according to the given parameters. Depending on whether the field is a basic/composite array
// which is either fixed/variable length, it will appropriately determine the trie.
func NewFieldTrie(field fieldIndex, elements interface{}, length uint64) (*FieldTrie, error) {
if elements == nil {
return &FieldTrie{
field: field,
reference: &reference{1},
Mutex: new(sync.Mutex),
}, nil
}
datType, ok := fieldMap[field]
if !ok {
return nil, errors.Errorf("unrecognized field in trie")
}
fieldRoots, err := fieldConverters(field, []uint64{}, elements, true)
if err != nil {
return nil, err
}
switch datType {
case basicArray:
return &FieldTrie{
fieldLayers: stateutil.ReturnTrieLayer(fieldRoots, length),
field: field,
reference: &reference{1},
Mutex: new(sync.Mutex),
}, nil
case compositeArray:
return &FieldTrie{
fieldLayers: stateutil.ReturnTrieLayerVariable(fieldRoots, length),
field: field,
reference: &reference{1},
Mutex: new(sync.Mutex),
}, nil
default:
return nil, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(datType).Name())
}
}
// RecomputeTrie rebuilds the affected branches in the trie according to the provided
// changed indices and elements. This recomputes the trie according to the particular
// field the trie is based on.
func (f *FieldTrie) RecomputeTrie(indices []uint64, elements interface{}) ([32]byte, error) {
f.Lock()
defer f.Unlock()
var fieldRoot [32]byte
datType, ok := fieldMap[f.field]
if !ok {
return [32]byte{}, errors.Errorf("unrecognized field in trie")
}
fieldRoots, err := fieldConverters(f.field, indices, elements, false)
if err != nil {
return [32]byte{}, err
}
switch datType {
case basicArray:
fieldRoot, f.fieldLayers, err = stateutil.RecomputeFromLayer(fieldRoots, indices, f.fieldLayers)
if err != nil {
return [32]byte{}, err
}
return fieldRoot, nil
case compositeArray:
fieldRoot, f.fieldLayers, err = stateutil.RecomputeFromLayerVariable(fieldRoots, indices, f.fieldLayers)
if err != nil {
return [32]byte{}, err
}
return stateutil.AddInMixin(fieldRoot, uint64(len(f.fieldLayers[0])))
default:
return [32]byte{}, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(datType).Name())
}
}
// CopyTrie copies the references to the elements the trie
// is built on.
func (f *FieldTrie) CopyTrie() *FieldTrie {
if f.fieldLayers == nil {
return &FieldTrie{
field: f.field,
reference: &reference{1},
Mutex: new(sync.Mutex),
}
}
dstFieldTrie := [][]*[32]byte{}
switch f.field {
case randaoMixes:
dstFieldTrie = memorypool.GetRandaoMixesTrie(len(f.fieldLayers))
case blockRoots:
dstFieldTrie = memorypool.GetBlockRootsTrie(len(f.fieldLayers))
case stateRoots:
dstFieldTrie = memorypool.GetStateRootsTrie(len(f.fieldLayers))
default:
dstFieldTrie = make([][]*[32]byte, len(f.fieldLayers))
}
for i, layer := range f.fieldLayers {
if len(dstFieldTrie[i]) < len(layer) {
diffSlice := make([]*[32]byte, len(layer)-len(dstFieldTrie[i]))
dstFieldTrie[i] = append(dstFieldTrie[i], diffSlice...)
}
dstFieldTrie[i] = dstFieldTrie[i][:len(layer)]
copy(dstFieldTrie[i], layer)
}
return &FieldTrie{
fieldLayers: dstFieldTrie,
field: f.field,
reference: &reference{1},
Mutex: new(sync.Mutex),
}
}
// TrieRoot returns the corresponding root of the trie.
func (f *FieldTrie) TrieRoot() ([32]byte, error) {
datType, ok := fieldMap[f.field]
if !ok {
return [32]byte{}, errors.Errorf("unrecognized field in trie")
}
switch datType {
case basicArray:
return *f.fieldLayers[len(f.fieldLayers)-1][0], nil
case compositeArray:
trieRoot := *f.fieldLayers[len(f.fieldLayers)-1][0]
return stateutil.AddInMixin(trieRoot, uint64(len(f.fieldLayers[0])))
default:
return [32]byte{}, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(datType).Name())
}
}
// this converts the corresponding field and the provided elements to the appropriate roots.
func fieldConverters(field fieldIndex, indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
switch field {
case blockRoots, stateRoots, randaoMixes:
val, ok := elements.([][]byte)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([][]byte{}).Name(), reflect.TypeOf(elements).Name())
}
return handleByteArrays(val, indices, convertAll)
case eth1DataVotes:
val, ok := elements.([]*ethpb.Eth1Data)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([]*ethpb.Eth1Data{}).Name(), reflect.TypeOf(elements).Name())
}
return handleEth1DataSlice(val, indices, convertAll)
case validators:
val, ok := elements.([]*ethpb.Validator)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([]*ethpb.Validator{}).Name(), reflect.TypeOf(elements).Name())
}
return handleValidatorSlice(val, indices, convertAll)
case previousEpochAttestations, currentEpochAttestations:
val, ok := elements.([]*pb.PendingAttestation)
if !ok {
return nil, errors.Errorf("Wanted type of %v but got %v",
reflect.TypeOf([]*pb.PendingAttestation{}).Name(), reflect.TypeOf(elements).Name())
}
return handlePendingAttestation(val, indices, convertAll)
default:
return [][32]byte{}, errors.Errorf("got unsupported type of %v", reflect.TypeOf(elements).Name())
}
}
func handleByteArrays(val [][]byte, indices []uint64, convertAll bool) ([][32]byte, error) {
roots := [][32]byte{}
rootCreater := func(input []byte) {
newRoot := bytesutil.ToBytes32(input)
roots = append(roots, newRoot)
}
if convertAll {
for i := range val {
rootCreater(val[i])
}
return roots, nil
}
for _, idx := range indices {
rootCreater(val[idx])
}
return roots, nil
}
func handleEth1DataSlice(val []*ethpb.Eth1Data, indices []uint64, convertAll bool) ([][32]byte, error) {
roots := [][32]byte{}
rootCreater := func(input *ethpb.Eth1Data) error {
newRoot, err := stateutil.Eth1Root(input)
if err != nil {
return err
}
roots = append(roots, newRoot)
return nil
}
if convertAll {
for i := range val {
err := rootCreater(val[i])
if err != nil {
return nil, err
}
}
return roots, nil
}
for _, idx := range indices {
err := rootCreater(val[idx])
if err != nil {
return nil, err
}
}
return roots, nil
}
func handleValidatorSlice(val []*ethpb.Validator, indices []uint64, convertAll bool) ([][32]byte, error) {
roots := [][32]byte{}
rootCreater := func(input *ethpb.Validator) error {
newRoot, err := stateutil.ValidatorRoot(input)
if err != nil {
return err
}
roots = append(roots, newRoot)
return nil
}
if convertAll {
for i := range val {
err := rootCreater(val[i])
if err != nil {
return nil, err
}
}
return roots, nil
}
for _, idx := range indices {
err := rootCreater(val[idx])
if err != nil {
return nil, err
}
}
return roots, nil
}
func handlePendingAttestation(val []*pb.PendingAttestation, indices []uint64, convertAll bool) ([][32]byte, error) {
roots := [][32]byte{}
rootCreator := func(input *pb.PendingAttestation) error {
newRoot, err := stateutil.PendingAttestationRoot(input)
if err != nil {
return err
}
roots = append(roots, newRoot)
return nil
}
if convertAll {
for i := range val {
err := rootCreator(val[i])
if err != nil {
return nil, err
}
}
return roots, nil
}
for _, idx := range indices {
err := rootCreator(val[idx])
if err != nil {
return nil, err
}
}
return roots, nil
}

View File

@@ -0,0 +1,98 @@
package state_test
import (
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func TestFieldTrie_NewTrie(t *testing.T) {
newState, _ := testutil.DeterministicGenesisState(t, 40)
// 5 represents the enum value of state roots
trie, err := state.NewFieldTrie(5, newState.StateRoots(), params.BeaconConfig().SlotsPerHistoricalRoot)
if err != nil {
t.Fatal(err)
}
root, err := stateutil.RootsArrayHashTreeRoot(newState.StateRoots(), params.BeaconConfig().SlotsPerHistoricalRoot, "StateRoots")
if err != nil {
t.Fatal(err)
}
newRoot, err := trie.TrieRoot()
if newRoot != root {
t.Errorf("Wanted root of %#x but got %#x", root, newRoot)
}
}
func TestFieldTrie_RecomputeTrie(t *testing.T) {
newState, _ := testutil.DeterministicGenesisState(t, 32)
// 10 represents the enum value of validators
trie, err := state.NewFieldTrie(10, newState.Validators(), params.BeaconConfig().ValidatorRegistryLimit)
if err != nil {
t.Fatal(err)
}
changedIdx := []uint64{2, 29}
val1, err := newState.ValidatorAtIndex(10)
if err != nil {
t.Fatal(err)
}
val2, err := newState.ValidatorAtIndex(11)
if err != nil {
t.Fatal(err)
}
val1.Slashed = true
val1.ExitEpoch = 20
val2.Slashed = true
val2.ExitEpoch = 40
changedVals := []*ethpb.Validator{val1, val2}
newState.UpdateValidatorAtIndex(changedIdx[0], changedVals[0])
newState.UpdateValidatorAtIndex(changedIdx[1], changedVals[1])
expectedRoot, err := stateutil.ValidatorRegistryRoot(newState.Validators())
if err != nil {
t.Fatal(err)
}
root, err := trie.RecomputeTrie(changedIdx, newState.Validators())
if err != nil {
t.Fatal(err)
}
if root != expectedRoot {
t.Errorf("Wanted root of %#x but got %#x", expectedRoot, root)
}
}
func TestFieldTrie_CopyTrieImmutable(t *testing.T) {
newState, _ := testutil.DeterministicGenesisState(t, 32)
// 12 represents the enum value of randao mixes.
trie, err := state.NewFieldTrie(12, newState.RandaoMixes(), params.BeaconConfig().EpochsPerHistoricalVector)
if err != nil {
t.Fatal(err)
}
newTrie := trie.CopyTrie()
changedIdx := []uint64{2, 29}
changedVals := [][32]byte{{'A', 'B'}, {'C', 'D'}}
newState.UpdateRandaoMixesAtIndex(changedVals[0][:], changedIdx[0])
newState.UpdateRandaoMixesAtIndex(changedVals[1][:], changedIdx[1])
root, err := trie.RecomputeTrie(changedIdx, newState.RandaoMixes())
if err != nil {
t.Fatal(err)
}
newRoot, err := newTrie.TrieRoot()
if err != nil {
t.Fatal(err)
}
if root == newRoot {
t.Errorf("Wanted roots to be different, but they are the same: %#x", root)
}
}

View File

@@ -11,36 +11,6 @@ import (
"github.com/prysmaticlabs/prysm/shared/hashutil"
)
type fieldIndex int
// Below we define a set of useful enum values for the field
// indices of the beacon state. For example, genesisTime is the
// 0th field of the beacon state. This is helpful when we are
// updating the Merkle branches up the trie representation
// of the beacon state.
const (
genesisTime fieldIndex = iota
slot
fork
latestBlockHeader
blockRoots
stateRoots
historicalRoots
eth1Data
eth1DataVotes
eth1DepositIndex
validators
balances
randaoMixes
slashings
previousEpochAttestations
currentEpochAttestations
justificationBits
previousJustifiedCheckpoint
currentJustifiedCheckpoint
finalizedCheckpoint
)
// SetGenesisTime for the beacon state.
func (b *BeaconState) SetGenesisTime(val uint64) error {
b.lock.Lock()
@@ -123,7 +93,7 @@ func (b *BeaconState) UpdateBlockRootAtIndex(idx uint64, blockRoot [32]byte) err
// Copy on write since this is a shared array.
r = b.BlockRoots()
ref.refs--
ref.MinusRef()
b.sharedFieldReferences[blockRoots] = &reference{refs: 1}
}
b.lock.RUnlock()
@@ -136,6 +106,7 @@ func (b *BeaconState) UpdateBlockRootAtIndex(idx uint64, blockRoot [32]byte) err
b.state.BlockRoots = r
b.markFieldAsDirty(blockRoots)
b.AddDirtyIndices(blockRoots, []uint64{idx})
return nil
}
@@ -153,6 +124,7 @@ func (b *BeaconState) SetStateRoots(val [][]byte) error {
b.state.StateRoots = val
b.markFieldAsDirty(stateRoots)
b.rebuildTrie[stateRoots] = true
return nil
}
@@ -173,7 +145,7 @@ func (b *BeaconState) UpdateStateRootAtIndex(idx uint64, stateRoot [32]byte) err
// Perform a copy since this is a shared reference and we don't want to mutate others.
r = b.StateRoots()
ref.refs--
ref.MinusRef()
b.sharedFieldReferences[stateRoots] = &reference{refs: 1}
}
b.lock.RUnlock()
@@ -186,6 +158,7 @@ func (b *BeaconState) UpdateStateRootAtIndex(idx uint64, stateRoot [32]byte) err
b.state.StateRoots = r
b.markFieldAsDirty(stateRoots)
b.AddDirtyIndices(stateRoots, []uint64{idx})
return nil
}
@@ -233,6 +206,7 @@ func (b *BeaconState) SetEth1DataVotes(val []*ethpb.Eth1Data) error {
b.state.Eth1DataVotes = val
b.markFieldAsDirty(eth1DataVotes)
b.rebuildTrie[eth1DataVotes] = true
return nil
}
@@ -246,7 +220,7 @@ func (b *BeaconState) AppendEth1DataVotes(val *ethpb.Eth1Data) error {
votes := b.state.Eth1DataVotes
if b.sharedFieldReferences[eth1DataVotes].refs > 1 {
votes = b.Eth1DataVotes()
b.sharedFieldReferences[eth1DataVotes].refs--
b.sharedFieldReferences[eth1DataVotes].MinusRef()
b.sharedFieldReferences[eth1DataVotes] = &reference{refs: 1}
}
b.lock.RUnlock()
@@ -256,6 +230,7 @@ func (b *BeaconState) AppendEth1DataVotes(val *ethpb.Eth1Data) error {
b.state.Eth1DataVotes = append(votes, val)
b.markFieldAsDirty(eth1DataVotes)
b.AddDirtyIndices(eth1DataVotes, []uint64{uint64(len(b.state.Eth1DataVotes) - 1)})
return nil
}
@@ -285,12 +260,13 @@ func (b *BeaconState) SetValidators(val []*ethpb.Validator) error {
b.sharedFieldReferences[validators].refs--
b.sharedFieldReferences[validators] = &reference{refs: 1}
b.markFieldAsDirty(validators)
b.rebuildTrie[validators] = true
return nil
}
// ApplyToEveryValidator applies the provided callback function to each validator in the
// validator registry.
func (b *BeaconState) ApplyToEveryValidator(f func(idx int, val *ethpb.Validator) error) error {
func (b *BeaconState) ApplyToEveryValidator(f func(idx int, val *ethpb.Validator) (bool, error)) error {
if !b.HasInnerState() {
return ErrNilInnerState
}
@@ -300,16 +276,19 @@ func (b *BeaconState) ApplyToEveryValidator(f func(idx int, val *ethpb.Validator
// Perform a copy since this is a shared reference and we don't want to mutate others.
v = b.Validators()
ref.refs--
ref.MinusRef()
b.sharedFieldReferences[validators] = &reference{refs: 1}
}
b.lock.RUnlock()
changedVals := []uint64{}
for i, val := range v {
err := f(i, val)
changed, err := f(i, val)
if err != nil {
return err
}
if changed {
changedVals = append(changedVals, uint64(i))
}
}
b.lock.Lock()
@@ -317,6 +296,8 @@ func (b *BeaconState) ApplyToEveryValidator(f func(idx int, val *ethpb.Validator
b.state.Validators = v
b.markFieldAsDirty(validators)
b.AddDirtyIndices(validators, changedVals)
return nil
}
@@ -336,7 +317,7 @@ func (b *BeaconState) UpdateValidatorAtIndex(idx uint64, val *ethpb.Validator) e
// Perform a copy since this is a shared reference and we don't want to mutate others.
v = b.Validators()
ref.refs--
ref.MinusRef()
b.sharedFieldReferences[validators] = &reference{refs: 1}
}
b.lock.RUnlock()
@@ -347,6 +328,8 @@ func (b *BeaconState) UpdateValidatorAtIndex(idx uint64, val *ethpb.Validator) e
v[idx] = val
b.state.Validators = v
b.markFieldAsDirty(validators)
b.AddDirtyIndices(validators, []uint64{idx})
return nil
}
@@ -394,7 +377,7 @@ func (b *BeaconState) UpdateBalancesAtIndex(idx uint64, val uint64) error {
bals := b.state.Balances
if b.sharedFieldReferences[balances].refs > 1 {
bals = b.Balances()
b.sharedFieldReferences[balances].refs--
b.sharedFieldReferences[balances].MinusRef()
b.sharedFieldReferences[balances] = &reference{refs: 1}
}
b.lock.RUnlock()
@@ -422,6 +405,7 @@ func (b *BeaconState) SetRandaoMixes(val [][]byte) error {
b.state.RandaoMixes = val
b.markFieldAsDirty(randaoMixes)
b.rebuildTrie[randaoMixes] = true
return nil
}
@@ -439,7 +423,7 @@ func (b *BeaconState) UpdateRandaoMixesAtIndex(val []byte, idx uint64) error {
mixes := b.state.RandaoMixes
if refs := b.sharedFieldReferences[randaoMixes].refs; refs > 1 {
mixes = b.RandaoMixes()
b.sharedFieldReferences[randaoMixes].refs--
b.sharedFieldReferences[randaoMixes].MinusRef()
b.sharedFieldReferences[randaoMixes] = &reference{refs: 1}
}
b.lock.RUnlock()
@@ -450,6 +434,8 @@ func (b *BeaconState) UpdateRandaoMixesAtIndex(val []byte, idx uint64) error {
mixes[idx] = val
b.state.RandaoMixes = mixes
b.markFieldAsDirty(randaoMixes)
b.AddDirtyIndices(randaoMixes, []uint64{idx})
return nil
}
@@ -484,7 +470,7 @@ func (b *BeaconState) UpdateSlashingsAtIndex(idx uint64, val uint64) error {
if b.sharedFieldReferences[slashings].refs > 1 {
s = b.Slashings()
b.sharedFieldReferences[slashings].refs--
b.sharedFieldReferences[slashings].MinusRef()
b.sharedFieldReferences[slashings] = &reference{refs: 1}
}
b.lock.RUnlock()
@@ -514,6 +500,7 @@ func (b *BeaconState) SetPreviousEpochAttestations(val []*pbp2p.PendingAttestati
b.state.PreviousEpochAttestations = val
b.markFieldAsDirty(previousEpochAttestations)
b.rebuildTrie[previousEpochAttestations] = true
return nil
}
@@ -531,6 +518,7 @@ func (b *BeaconState) SetCurrentEpochAttestations(val []*pbp2p.PendingAttestatio
b.state.CurrentEpochAttestations = val
b.markFieldAsDirty(currentEpochAttestations)
b.rebuildTrie[currentEpochAttestations] = true
return nil
}
@@ -544,7 +532,7 @@ func (b *BeaconState) AppendHistoricalRoots(root [32]byte) error {
roots := b.state.HistoricalRoots
if b.sharedFieldReferences[historicalRoots].refs > 1 {
roots = b.HistoricalRoots()
b.sharedFieldReferences[historicalRoots].refs--
b.sharedFieldReferences[historicalRoots].MinusRef()
b.sharedFieldReferences[historicalRoots] = &reference{refs: 1}
}
b.lock.RUnlock()
@@ -568,7 +556,7 @@ func (b *BeaconState) AppendCurrentEpochAttestations(val *pbp2p.PendingAttestati
atts := b.state.CurrentEpochAttestations
if b.sharedFieldReferences[currentEpochAttestations].refs > 1 {
atts = b.CurrentEpochAttestations()
b.sharedFieldReferences[currentEpochAttestations].refs--
b.sharedFieldReferences[currentEpochAttestations].MinusRef()
b.sharedFieldReferences[currentEpochAttestations] = &reference{refs: 1}
}
b.lock.RUnlock()
@@ -578,6 +566,7 @@ func (b *BeaconState) AppendCurrentEpochAttestations(val *pbp2p.PendingAttestati
b.state.CurrentEpochAttestations = append(atts, val)
b.markFieldAsDirty(currentEpochAttestations)
b.dirtyIndices[currentEpochAttestations] = append(b.dirtyIndices[currentEpochAttestations], uint64(len(b.state.CurrentEpochAttestations)-1))
return nil
}
@@ -591,7 +580,7 @@ func (b *BeaconState) AppendPreviousEpochAttestations(val *pbp2p.PendingAttestat
atts := b.state.PreviousEpochAttestations
if b.sharedFieldReferences[previousEpochAttestations].refs > 1 {
atts = b.PreviousEpochAttestations()
b.sharedFieldReferences[previousEpochAttestations].refs--
b.sharedFieldReferences[previousEpochAttestations].MinusRef()
b.sharedFieldReferences[previousEpochAttestations] = &reference{refs: 1}
}
b.lock.RUnlock()
@@ -601,6 +590,8 @@ func (b *BeaconState) AppendPreviousEpochAttestations(val *pbp2p.PendingAttestat
b.state.PreviousEpochAttestations = append(atts, val)
b.markFieldAsDirty(previousEpochAttestations)
b.AddDirtyIndices(previousEpochAttestations, []uint64{uint64(len(b.state.PreviousEpochAttestations) - 1)})
return nil
}
@@ -614,7 +605,7 @@ func (b *BeaconState) AppendValidator(val *ethpb.Validator) error {
vals := b.state.Validators
if b.sharedFieldReferences[validators].refs > 1 {
vals = b.Validators()
b.sharedFieldReferences[validators].refs--
b.sharedFieldReferences[validators].MinusRef()
b.sharedFieldReferences[validators] = &reference{refs: 1}
}
b.lock.RUnlock()
@@ -624,6 +615,7 @@ func (b *BeaconState) AppendValidator(val *ethpb.Validator) error {
b.state.Validators = append(vals, val)
b.markFieldAsDirty(validators)
b.AddDirtyIndices(validators, []uint64{uint64(len(b.state.Validators) - 1)})
return nil
}
@@ -638,7 +630,7 @@ func (b *BeaconState) AppendBalance(bal uint64) error {
bals := b.state.Balances
if b.sharedFieldReferences[balances].refs > 1 {
bals = b.Balances()
b.sharedFieldReferences[balances].refs--
b.sharedFieldReferences[balances].MinusRef()
b.sharedFieldReferences[balances] = &reference{refs: 1}
}
b.lock.RUnlock()
@@ -745,3 +737,9 @@ func (b *BeaconState) markFieldAsDirty(field fieldIndex) {
}
// do nothing if field already exists
}
// AddDirtyIndices adds the relevant dirty field indices, so that they
// can be recomputed.
func (b *BeaconState) AddDirtyIndices(index fieldIndex, indices []uint64) {
b.dirtyIndices[index] = append(b.dirtyIndices[index], indices...)
}

View File

@@ -0,0 +1,403 @@
package state
import (
"runtime"
"sort"
"sync"
"github.com/prysmaticlabs/prysm/shared/sliceutil"
"github.com/gogo/protobuf/proto"
"github.com/pkg/errors"
"github.com/protolambda/zssz/merkle"
coreutils "github.com/prysmaticlabs/prysm/beacon-chain/core/state/stateutils"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/memorypool"
"github.com/prysmaticlabs/prysm/shared/params"
)
// InitializeFromProto the beacon state from a protobuf representation.
func InitializeFromProto(st *pbp2p.BeaconState) (*BeaconState, error) {
return InitializeFromProtoUnsafe(proto.Clone(st).(*pbp2p.BeaconState))
}
// InitializeFromProtoUnsafe directly uses the beacon state protobuf pointer
// and sets it as the inner state of the BeaconState type.
func InitializeFromProtoUnsafe(st *pbp2p.BeaconState) (*BeaconState, error) {
b := &BeaconState{
state: st,
dirtyFields: make(map[fieldIndex]interface{}, 20),
dirtyIndices: make(map[fieldIndex][]uint64, 20),
stateFieldLeaves: make(map[fieldIndex]*FieldTrie, 20),
sharedFieldReferences: make(map[fieldIndex]*reference, 10),
rebuildTrie: make(map[fieldIndex]bool, 20),
valIdxMap: coreutils.ValidatorIndexMap(st.Validators),
}
for i := 0; i < 20; i++ {
b.dirtyFields[fieldIndex(i)] = true
b.rebuildTrie[fieldIndex(i)] = true
b.dirtyIndices[fieldIndex(i)] = []uint64{}
b.stateFieldLeaves[fieldIndex(i)] = &FieldTrie{
field: fieldIndex(i),
reference: &reference{1},
Mutex: new(sync.Mutex),
}
}
// Initialize field reference tracking for shared data.
b.sharedFieldReferences[randaoMixes] = &reference{refs: 1}
b.sharedFieldReferences[stateRoots] = &reference{refs: 1}
b.sharedFieldReferences[blockRoots] = &reference{refs: 1}
b.sharedFieldReferences[previousEpochAttestations] = &reference{refs: 1}
b.sharedFieldReferences[currentEpochAttestations] = &reference{refs: 1}
b.sharedFieldReferences[slashings] = &reference{refs: 1}
b.sharedFieldReferences[eth1DataVotes] = &reference{refs: 1}
b.sharedFieldReferences[validators] = &reference{refs: 1}
b.sharedFieldReferences[balances] = &reference{refs: 1}
b.sharedFieldReferences[historicalRoots] = &reference{refs: 1}
return b, nil
}
// Copy returns a deep copy of the beacon state.
func (b *BeaconState) Copy() *BeaconState {
if !b.HasInnerState() {
return nil
}
b.lock.RLock()
defer b.lock.RUnlock()
dst := &BeaconState{
state: &pbp2p.BeaconState{
// Primitive types, safe to copy.
GenesisTime: b.state.GenesisTime,
Slot: b.state.Slot,
Eth1DepositIndex: b.state.Eth1DepositIndex,
// Large arrays, infrequently changed, constant size.
RandaoMixes: b.state.RandaoMixes,
StateRoots: b.state.StateRoots,
BlockRoots: b.state.BlockRoots,
PreviousEpochAttestations: b.state.PreviousEpochAttestations,
CurrentEpochAttestations: b.state.CurrentEpochAttestations,
Slashings: b.state.Slashings,
Eth1DataVotes: b.state.Eth1DataVotes,
// Large arrays, increases over time.
Validators: b.state.Validators,
Balances: b.state.Balances,
HistoricalRoots: b.state.HistoricalRoots,
// Everything else, too small to be concerned about, constant size.
Fork: b.Fork(),
LatestBlockHeader: b.LatestBlockHeader(),
Eth1Data: b.Eth1Data(),
JustificationBits: b.JustificationBits(),
PreviousJustifiedCheckpoint: b.PreviousJustifiedCheckpoint(),
CurrentJustifiedCheckpoint: b.CurrentJustifiedCheckpoint(),
FinalizedCheckpoint: b.FinalizedCheckpoint(),
},
dirtyFields: make(map[fieldIndex]interface{}, 20),
dirtyIndices: make(map[fieldIndex][]uint64, 20),
rebuildTrie: make(map[fieldIndex]bool, 20),
sharedFieldReferences: make(map[fieldIndex]*reference, 10),
stateFieldLeaves: make(map[fieldIndex]*FieldTrie, 20),
// Copy on write validator index map.
valIdxMap: b.valIdxMap,
}
for field, ref := range b.sharedFieldReferences {
ref.AddRef()
dst.sharedFieldReferences[field] = ref
}
for i := range b.dirtyFields {
dst.dirtyFields[i] = true
}
for i := range b.dirtyIndices {
indices := make([]uint64, len(b.dirtyIndices[i]))
copy(indices, b.dirtyIndices[i])
dst.dirtyIndices[i] = indices
}
for i := range b.rebuildTrie {
dst.rebuildTrie[i] = true
}
for fldIdx, fieldTrie := range b.stateFieldLeaves {
dst.stateFieldLeaves[fldIdx] = fieldTrie
if fieldTrie.reference != nil {
fieldTrie.Lock()
fieldTrie.AddRef()
fieldTrie.Unlock()
}
}
if b.merkleLayers != nil {
dst.merkleLayers = make([][][]byte, len(b.merkleLayers))
for i, layer := range b.merkleLayers {
dst.merkleLayers[i] = make([][]byte, len(layer))
for j, content := range layer {
dst.merkleLayers[i][j] = make([]byte, len(content))
copy(dst.merkleLayers[i][j], content)
}
}
}
// Finalizer runs when dst is being destroyed in garbage collection.
runtime.SetFinalizer(dst, func(b *BeaconState) {
for field, v := range b.sharedFieldReferences {
v.refs--
if b.stateFieldLeaves[field].reference != nil {
b.stateFieldLeaves[field].MinusRef()
}
if field == randaoMixes && v.refs == 0 {
memorypool.PutDoubleByteSlice(b.state.RandaoMixes)
if b.stateFieldLeaves[field].refs == 0 {
memorypool.PutRandaoMixesTrie(b.stateFieldLeaves[randaoMixes].fieldLayers)
}
}
if field == blockRoots && v.refs == 0 && b.stateFieldLeaves[field].refs == 0 {
memorypool.PutBlockRootsTrie(b.stateFieldLeaves[blockRoots].fieldLayers)
}
if field == stateRoots && v.refs == 0 && b.stateFieldLeaves[field].refs == 0 {
memorypool.PutStateRootsTrie(b.stateFieldLeaves[stateRoots].fieldLayers)
}
}
})
return dst
}
// HashTreeRoot of the beacon state retrieves the Merkle root of the trie
// representation of the beacon state based on the eth2 Simple Serialize specification.
func (b *BeaconState) HashTreeRoot() ([32]byte, error) {
b.lock.Lock()
defer b.lock.Unlock()
if b.merkleLayers == nil || len(b.merkleLayers) == 0 {
fieldRoots, err := stateutil.ComputeFieldRoots(b.state)
if err != nil {
return [32]byte{}, err
}
layers := merkleize(fieldRoots)
b.merkleLayers = layers
b.dirtyFields = make(map[fieldIndex]interface{})
}
for field := range b.dirtyFields {
root, err := b.rootSelector(field)
if err != nil {
return [32]byte{}, err
}
b.merkleLayers[0][field] = root[:]
b.recomputeRoot(int(field))
delete(b.dirtyFields, field)
}
return bytesutil.ToBytes32(b.merkleLayers[len(b.merkleLayers)-1][0]), nil
}
// Merkleize 32-byte leaves into a Merkle trie for its adequate depth, returning
// the resulting layers of the trie based on the appropriate depth. This function
// pads the leaves to a power-of-two length.
func merkleize(leaves [][]byte) [][][]byte {
hashFunc := hashutil.CustomSHA256Hasher()
layers := make([][][]byte, merkle.GetDepth(uint64(len(leaves)))+1)
for len(leaves) != 32 {
leaves = append(leaves, make([]byte, 32))
}
currentLayer := leaves
layers[0] = currentLayer
// We keep track of the hash layers of a Merkle trie until we reach
// the top layer of length 1, which contains the single root element.
// [Root] -> Top layer has length 1.
// [E] [F] -> This layer has length 2.
// [A] [B] [C] [D] -> The bottom layer has length 4 (needs to be a power of two).
i := 1
for len(currentLayer) > 1 && i < len(layers) {
layer := make([][]byte, 0)
for i := 0; i < len(currentLayer); i += 2 {
hashedChunk := hashFunc(append(currentLayer[i], currentLayer[i+1]...))
layer = append(layer, hashedChunk[:])
}
currentLayer = layer
layers[i] = currentLayer
i++
}
return layers
}
func (b *BeaconState) rootSelector(field fieldIndex) ([32]byte, error) {
switch field {
case genesisTime:
return stateutil.Uint64Root(b.state.GenesisTime), nil
case slot:
return stateutil.Uint64Root(b.state.Slot), nil
case eth1DepositIndex:
return stateutil.Uint64Root(b.state.Eth1DepositIndex), nil
case fork:
return stateutil.ForkRoot(b.state.Fork)
case latestBlockHeader:
return stateutil.BlockHeaderRoot(b.state.LatestBlockHeader)
case blockRoots:
if featureconfig.Get().EnableFieldTrie {
if b.rebuildTrie[field] {
err := b.resetFieldTrie(field, b.state.BlockRoots, params.BeaconConfig().SlotsPerHistoricalRoot)
if err != nil {
return [32]byte{}, err
}
b.dirtyIndices[field] = []uint64{}
delete(b.rebuildTrie, field)
return b.stateFieldLeaves[field].TrieRoot()
}
return b.recomputeFieldTrie(blockRoots, b.state.BlockRoots)
}
return stateutil.RootsArrayHashTreeRoot(b.state.BlockRoots, params.BeaconConfig().SlotsPerHistoricalRoot, "BlockRoots")
case stateRoots:
if featureconfig.Get().EnableFieldTrie {
if b.rebuildTrie[field] {
err := b.resetFieldTrie(field, b.state.StateRoots, params.BeaconConfig().SlotsPerHistoricalRoot)
if err != nil {
return [32]byte{}, err
}
b.dirtyIndices[field] = []uint64{}
delete(b.rebuildTrie, field)
return b.stateFieldLeaves[field].TrieRoot()
}
return b.recomputeFieldTrie(stateRoots, b.state.StateRoots)
}
return stateutil.RootsArrayHashTreeRoot(b.state.StateRoots, params.BeaconConfig().SlotsPerHistoricalRoot, "StateRoots")
case historicalRoots:
return stateutil.HistoricalRootsRoot(b.state.HistoricalRoots)
case eth1Data:
return stateutil.Eth1Root(b.state.Eth1Data)
case eth1DataVotes:
if featureconfig.Get().EnableFieldTrie {
if b.rebuildTrie[field] {
err := b.resetFieldTrie(field, b.state.Eth1DataVotes, params.BeaconConfig().SlotsPerEth1VotingPeriod)
if err != nil {
return [32]byte{}, err
}
b.dirtyIndices[field] = []uint64{}
delete(b.rebuildTrie, field)
return b.stateFieldLeaves[field].TrieRoot()
}
return b.recomputeFieldTrie(field, b.state.Eth1DataVotes)
}
return stateutil.Eth1DataVotesRoot(b.state.Eth1DataVotes)
case validators:
if featureconfig.Get().EnableFieldTrie {
if b.rebuildTrie[field] {
err := b.resetFieldTrie(field, b.state.Validators, params.BeaconConfig().ValidatorRegistryLimit)
if err != nil {
return [32]byte{}, err
}
b.dirtyIndices[validators] = []uint64{}
delete(b.rebuildTrie, validators)
return b.stateFieldLeaves[field].TrieRoot()
}
return b.recomputeFieldTrie(validators, b.state.Validators)
}
return stateutil.ValidatorRegistryRoot(b.state.Validators)
case balances:
return stateutil.ValidatorBalancesRoot(b.state.Balances)
case randaoMixes:
if featureconfig.Get().EnableFieldTrie {
if b.rebuildTrie[field] {
err := b.resetFieldTrie(field, b.state.RandaoMixes, params.BeaconConfig().EpochsPerHistoricalVector)
if err != nil {
return [32]byte{}, err
}
b.dirtyIndices[field] = []uint64{}
delete(b.rebuildTrie, field)
return b.stateFieldLeaves[field].TrieRoot()
}
return b.recomputeFieldTrie(randaoMixes, b.state.RandaoMixes)
}
return stateutil.RootsArrayHashTreeRoot(b.state.RandaoMixes, params.BeaconConfig().EpochsPerHistoricalVector, "RandaoMixes")
case slashings:
return stateutil.SlashingsRoot(b.state.Slashings)
case previousEpochAttestations:
if featureconfig.Get().EnableFieldTrie {
if b.rebuildTrie[field] {
err := b.resetFieldTrie(field, b.state.PreviousEpochAttestations, params.BeaconConfig().MaxAttestations*params.BeaconConfig().SlotsPerEpoch)
if err != nil {
return [32]byte{}, err
}
b.dirtyIndices[field] = []uint64{}
delete(b.rebuildTrie, field)
return b.stateFieldLeaves[field].TrieRoot()
}
return b.recomputeFieldTrie(field, b.state.PreviousEpochAttestations)
}
return stateutil.EpochAttestationsRoot(b.state.PreviousEpochAttestations)
case currentEpochAttestations:
if featureconfig.Get().EnableFieldTrie {
if b.rebuildTrie[field] {
err := b.resetFieldTrie(field, b.state.CurrentEpochAttestations, params.BeaconConfig().MaxAttestations*params.BeaconConfig().SlotsPerEpoch)
if err != nil {
return [32]byte{}, err
}
b.dirtyIndices[field] = []uint64{}
delete(b.rebuildTrie, field)
return b.stateFieldLeaves[field].TrieRoot()
}
return b.recomputeFieldTrie(field, b.state.CurrentEpochAttestations)
}
return stateutil.EpochAttestationsRoot(b.state.CurrentEpochAttestations)
case justificationBits:
return bytesutil.ToBytes32(b.state.JustificationBits), nil
case previousJustifiedCheckpoint:
return stateutil.CheckpointRoot(b.state.PreviousJustifiedCheckpoint)
case currentJustifiedCheckpoint:
return stateutil.CheckpointRoot(b.state.CurrentJustifiedCheckpoint)
case finalizedCheckpoint:
return stateutil.CheckpointRoot(b.state.FinalizedCheckpoint)
}
return [32]byte{}, errors.New("invalid field index provided")
}
func (b *BeaconState) recomputeFieldTrie(index fieldIndex, elements interface{}) ([32]byte, error) {
fTrie := b.stateFieldLeaves[index]
if fTrie.refs > 1 {
fTrie.Lock()
defer fTrie.Unlock()
fTrie.MinusRef()
newTrie := fTrie.CopyTrie()
b.stateFieldLeaves[index] = newTrie
fTrie = newTrie
}
// remove duplicate indexes
b.dirtyIndices[index] = sliceutil.UnionUint64(b.dirtyIndices[index], []uint64{})
// sort indexes again
sort.Slice(b.dirtyIndices[index], func(i int, j int) bool {
return b.dirtyIndices[index][i] < b.dirtyIndices[index][j]
})
root, err := fTrie.RecomputeTrie(b.dirtyIndices[index], elements)
if err != nil {
return [32]byte{}, err
}
b.dirtyIndices[index] = []uint64{}
return root, nil
}
func (b *BeaconState) resetFieldTrie(index fieldIndex, elements interface{}, length uint64) error {
fTrie := b.stateFieldLeaves[index]
var err error
fTrie, err = NewFieldTrie(index, elements, length)
if err != nil {
return err
}
b.stateFieldLeaves[index] = fTrie
b.dirtyIndices[index] = []uint64{}
return nil
}

View File

@@ -6,11 +6,13 @@ go_library(
"cold.go",
"epoch_boundary_root.go",
"errors.go",
"getter.go",
"hot.go",
"log.go",
"migrate.go",
"replay.go",
"service.go",
"setter.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen",
visibility = ["//beacon-chain:__subpackages__"],
@@ -38,10 +40,12 @@ go_test(
srcs = [
"cold_test.go",
"epoch_boundary_root_test.go",
"getter_test.go",
"hot_test.go",
"migrate_test.go",
"replay_test.go",
"service_test.go",
"setter_test.go",
],
embed = [":go_default_library"],
deps = [

View File

@@ -37,6 +37,112 @@ func (s *State) saveColdState(ctx context.Context, blockRoot [32]byte, state *st
return nil
}
// This loads the cold state by block root, it decides whether to load from archived point (faster) or
// somewhere between archived points (slower) because it requires replaying blocks.
// This method is more efficient than load cold state by slot.
func (s *State) loadColdStateByRoot(ctx context.Context, blockRoot [32]byte) (*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.loadColdStateByRoot")
defer span.End()
summary, err := s.beaconDB.StateSummary(ctx, blockRoot)
if err != nil {
return nil, err
}
if summary == nil {
return nil, errUnknownStateSummary
}
// Use the archived point state if the summary slot lies on top of the archived point.
if summary.Slot%s.slotsPerArchivedPoint == 0 {
archivedPoint := summary.Slot / s.slotsPerArchivedPoint
s, err := s.loadColdStateByArchivedPoint(ctx, archivedPoint)
if err != nil {
return nil, errors.Wrap(err, "could not get cold state using archived index")
}
if s == nil {
return nil, errUnknownArchivedState
}
return s, nil
}
return s.loadColdIntermediateStateByRoot(ctx, summary.Slot, blockRoot)
}
// This loads the cold state for the input archived point.
func (s *State) loadColdStateByArchivedPoint(ctx context.Context, archivedPoint uint64) (*state.BeaconState, error) {
return s.beaconDB.ArchivedPointState(ctx, archivedPoint)
}
// This loads a cold state by slot and block root combinations.
// This is a faster implementation than by slot given the input block root is provided.
func (s *State) loadColdIntermediateStateByRoot(ctx context.Context, slot uint64, blockRoot [32]byte) (*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.loadColdIntermediateStateByRoot")
defer span.End()
// Load the archive point for lower side of the intermediate state.
lowArchivedPointIdx := slot / s.slotsPerArchivedPoint
lowArchivedPointState, err := s.archivedPointByIndex(ctx, lowArchivedPointIdx)
if err != nil {
return nil, errors.Wrap(err, "could not get lower archived state using index")
}
if lowArchivedPointState == nil {
return nil, errUnknownArchivedState
}
replayBlks, err := s.LoadBlocks(ctx, lowArchivedPointState.Slot()+1, slot, blockRoot)
if err != nil {
return nil, errors.Wrap(err, "could not get load blocks for cold state using slot")
}
return s.ReplayBlocks(ctx, lowArchivedPointState, replayBlks, slot)
}
// This loads a cold state by slot where the slot lies between the archived point.
// This is a slower implementation given there's no root and slot is the only argument. It requires fetching
// all the blocks between the archival points.
func (s *State) loadColdIntermediateStateBySlot(ctx context.Context, slot uint64) (*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.loadColdIntermediateStateBySlot")
defer span.End()
// Load the archive point for lower and high side of the intermediate state.
lowArchivedPointIdx := slot / s.slotsPerArchivedPoint
highArchivedPointIdx := lowArchivedPointIdx + 1
lowArchivedPointState, err := s.archivedPointByIndex(ctx, lowArchivedPointIdx)
if err != nil {
return nil, errors.Wrap(err, "could not get lower bound archived state using index")
}
if lowArchivedPointState == nil {
return nil, errUnknownArchivedState
}
// If the slot of the high archived point lies outside of the split slot, use the split slot and root
// for the upper archived point.
var highArchivedPointRoot [32]byte
highArchivedPointSlot := highArchivedPointIdx * s.slotsPerArchivedPoint
if highArchivedPointSlot >= s.splitInfo.slot {
highArchivedPointRoot = s.splitInfo.root
highArchivedPointSlot = s.splitInfo.slot
} else {
if _, err := s.archivedPointByIndex(ctx, highArchivedPointIdx); err != nil {
return nil, errors.Wrap(err, "could not get upper bound archived state using index")
}
highArchivedPointRoot = s.beaconDB.ArchivedPointRoot(ctx, highArchivedPointIdx)
slot, err := s.blockRootSlot(ctx, highArchivedPointRoot)
if err != nil {
return nil, errors.Wrap(err, "could not get high archived point slot")
}
highArchivedPointSlot = slot
}
replayBlks, err := s.LoadBlocks(ctx, lowArchivedPointState.Slot()+1, highArchivedPointSlot, highArchivedPointRoot)
if err != nil {
return nil, errors.Wrap(err, "could not load block for cold state using slot")
}
return s.ReplayBlocks(ctx, lowArchivedPointState, replayBlks, slot)
}
// Given the archive index, this returns the archived cold state in the DB.
// If the archived state does not exist in the state, it'll compute it and save it.
func (s *State) archivedPointByIndex(ctx context.Context, archiveIndex uint64) (*state.BeaconState, error) {

View File

@@ -2,6 +2,7 @@ package stategen
import (
"context"
"strings"
"testing"
"github.com/gogo/protobuf/proto"
@@ -9,6 +10,7 @@ import (
"github.com/prysmaticlabs/go-ssz"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
@@ -59,6 +61,150 @@ func TestSaveColdState_CanSave(t *testing.T) {
}
}
func TestLoadColdStateByRoot_NoStateSummary(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
if _, err := service.loadColdStateByRoot(ctx, [32]byte{'a'}); err != errUnknownStateSummary {
t.Fatal("Did not get correct error")
}
}
func TestLoadColdStateByRoot_ByArchivedPoint(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
service.slotsPerArchivedPoint = 1
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
if err := service.beaconDB.SaveArchivedPointState(ctx, beaconState, 1); err != nil {
t.Fatal(err)
}
r := [32]byte{'a'}
if err := service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
Root: r[:],
Slot: 1,
}); err != nil {
t.Fatal(err)
}
loadedState, err := service.loadColdStateByRoot(ctx, r)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(loadedState.InnerStateUnsafe(), beaconState.InnerStateUnsafe()) {
t.Error("Did not correctly save state")
}
}
func TestLoadColdStateByRoot_IntermediatePlayback(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
service.slotsPerArchivedPoint = 2
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
if err := service.beaconDB.SaveArchivedPointState(ctx, beaconState, 1); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveArchivedPointRoot(ctx, [32]byte{}, 1); err != nil {
t.Fatal(err)
}
r := [32]byte{'a'}
slot := uint64(3)
if err := service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
Root: r[:],
Slot: slot,
}); err != nil {
t.Fatal(err)
}
loadedState, err := service.loadColdStateByRoot(ctx, r)
if err != nil {
t.Fatal(err)
}
if loadedState.Slot() != slot {
t.Error("Did not correctly save state")
}
}
func TestLoadColdStateBySlotIntermediatePlayback_BeforeCutoff(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
service.slotsPerArchivedPoint = params.BeaconConfig().SlotsPerEpoch * 2
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
if err := service.beaconDB.SaveArchivedPointState(ctx, beaconState, 0); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveArchivedPointRoot(ctx, [32]byte{}, 0); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveArchivedPointState(ctx, beaconState, 1); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveArchivedPointRoot(ctx, [32]byte{}, 1); err != nil {
t.Fatal(err)
}
slot := uint64(20)
loadedState, err := service.loadColdIntermediateStateBySlot(ctx, slot)
if err != nil {
t.Fatal(err)
}
if loadedState.Slot() != slot {
t.Error("Did not correctly save state")
}
}
func TestLoadColdStateBySlotIntermediatePlayback_AfterCutoff(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
service.slotsPerArchivedPoint = params.BeaconConfig().SlotsPerEpoch
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
if err := service.beaconDB.SaveArchivedPointState(ctx, beaconState, 0); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveArchivedPointRoot(ctx, [32]byte{}, 0); err != nil {
t.Fatal(err)
}
slot := uint64(10)
loadedState, err := service.loadColdIntermediateStateBySlot(ctx, slot)
if err != nil {
t.Fatal(err)
}
if loadedState.Slot() != slot {
t.Error("Did not correctly save state")
}
}
func TestLoadColdStateByRoot_UnknownArchivedState(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
service.slotsPerArchivedPoint = 1
if _, err := service.loadColdIntermediateStateBySlot(ctx, 0); !strings.Contains(err.Error(), errUnknownArchivedState.Error()) {
t.Log(err)
t.Error("Did not get wanted error")
}
}
func TestArchivedPointByIndex_HasPoint(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)

View File

@@ -0,0 +1,50 @@
package stategen
import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"go.opencensus.io/trace"
)
// StateByRoot retrieves the state from DB using input block root.
// It retrieves state from the hot section if the state summary slot
// is below the split point cut off.
func (s *State) StateByRoot(ctx context.Context, blockRoot [32]byte) (*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.StateByRoot")
defer span.End()
slot, err := s.blockRootSlot(ctx, blockRoot)
if err != nil {
return nil, errors.Wrap(err, "could not get state summary")
}
if slot < s.splitInfo.slot {
return s.loadColdStateByRoot(ctx, blockRoot)
}
return s.loadHotStateByRoot(ctx, blockRoot)
}
// StateBySlot retrieves the state from DB using input slot.
// It retrieves state from the cold section if the input slot
// is below the split point cut off.
// Note: `StateByRoot` is preferred over this. Retrieving state
// by root `StateByRoot` is more performant than retrieving by slot.
func (s *State) StateBySlot(ctx context.Context, slot uint64) (*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.StateBySlot")
defer span.End()
if slot < s.splitInfo.slot {
return s.loadColdIntermediateStateBySlot(ctx, slot)
}
return s.loadHotStateBySlot(ctx, slot)
}
// StateSummaryExists returns true if the corresponding state of the input block either
// exists in the DB or it can be generated by state gen.
func (s *State) StateSummaryExists(ctx context.Context, blockRoot [32]byte) bool {
return s.beaconDB.HasStateSummary(ctx, blockRoot)
}

View File

@@ -0,0 +1,185 @@
package stategen
import (
"context"
"testing"
"github.com/gogo/protobuf/proto"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func TestStateByRoot_ColdState(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
service.splitInfo.slot = 2
service.slotsPerArchivedPoint = 1
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
if err := service.beaconDB.SaveArchivedPointState(ctx, beaconState, 1); err != nil {
t.Fatal(err)
}
r := [32]byte{'a'}
if err := service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
Root: r[:],
Slot: 1,
}); err != nil {
t.Fatal(err)
}
loadedState, err := service.StateByRoot(ctx, r)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(loadedState.InnerStateUnsafe(), beaconState.InnerStateUnsafe()) {
t.Error("Did not correctly save state")
}
}
func TestStateByRoot_HotStateDB(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
boundaryRoot := [32]byte{'A'}
blkRoot := [32]byte{'B'}
if err := service.beaconDB.SaveState(ctx, beaconState, boundaryRoot); err != nil {
t.Fatal(err)
}
targetSlot := uint64(10)
if err := service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
Slot: targetSlot,
Root: blkRoot[:],
BoundaryRoot: boundaryRoot[:],
}); err != nil {
t.Fatal(err)
}
loadedState, err := service.StateByRoot(ctx, blkRoot)
if err != nil {
t.Fatal(err)
}
if loadedState.Slot() != targetSlot {
t.Error("Did not correctly load state")
}
}
func TestStateByRoot_HotStateCached(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
r := [32]byte{'A'}
if err := service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
Root: r[:],
BoundaryRoot: r[:],
}); err != nil {
t.Fatal(err)
}
service.hotStateCache.Put(r, beaconState)
loadedState, err := service.StateByRoot(ctx, r)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(loadedState.InnerStateUnsafe(), beaconState.InnerStateUnsafe()) {
t.Error("Did not correctly cache state")
}
}
func TestStateBySlot_ColdState(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
service.slotsPerArchivedPoint = params.BeaconConfig().SlotsPerEpoch * 2
service.splitInfo.slot = service.slotsPerArchivedPoint + 1
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
r := [32]byte{}
if err := service.beaconDB.SaveArchivedPointState(ctx, beaconState, 0); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveArchivedPointRoot(ctx, r, 0); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveArchivedPointState(ctx, beaconState, 1); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveArchivedPointRoot(ctx, r, 1); err != nil {
t.Fatal(err)
}
if err := service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
Slot: service.slotsPerArchivedPoint,
Root: r[:],
}); err != nil {
t.Fatal(err)
}
slot := uint64(20)
loadedState, err := service.StateBySlot(ctx, slot)
if err != nil {
t.Fatal(err)
}
if loadedState.Slot() != slot {
t.Error("Did not correctly save state")
}
}
func TestStateBySlot_HotStateCached(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
r := [32]byte{'A'}
service.hotStateCache.Put(r, beaconState)
service.setEpochBoundaryRoot(0, r)
slot := uint64(10)
loadedState, err := service.StateBySlot(ctx, slot)
if err != nil {
t.Fatal(err)
}
if loadedState.Slot() != slot {
t.Error("Did not correctly load state")
}
}
func TestStateBySlot_HotStateDB(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
r := [32]byte{'A'}
service.setEpochBoundaryRoot(0, r)
if err := service.beaconDB.SaveState(ctx, beaconState, r); err != nil {
t.Fatal(err)
}
slot := uint64(10)
loadedState, err := service.StateBySlot(ctx, slot)
if err != nil {
t.Fatal(err)
}
if loadedState.Slot() != slot {
t.Error("Did not correctly load state")
}
}

View File

@@ -75,12 +75,25 @@ func (s *State) loadHotStateByRoot(ctx context.Context, blockRoot [32]byte) (*st
if summary == nil {
return nil, errUnknownStateSummary
}
boundaryState, err := s.beaconDB.State(ctx, bytesutil.ToBytes32(summary.BoundaryRoot))
if err != nil {
return nil, err
}
if boundaryState == nil {
return nil, errUnknownBoundaryState
// Boundary state not available, get the last available state and start from there.
// This could happen if users toggle feature flags in between sync.
r, err := s.lastSavedState(ctx, helpers.StartSlot(summary.Slot))
if err != nil {
return nil, err
}
boundaryState, err = s.beaconDB.State(ctx, r)
if err != nil {
return nil, err
}
if boundaryState == nil {
return nil, errUnknownBoundaryState
}
}
// Don't need to replay the blocks if we're already on an epoch boundary,

View File

@@ -297,7 +297,7 @@ func TestLoadEpochBoundaryRoot_LastSavedBlock(t *testing.T) {
defer testDB.TeardownDB(t, db)
service := New(db)
b1 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: service.lastArchivedSlot + 5}}
b1 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: service.splitInfo.slot + 5}}
if err := service.beaconDB.SaveBlock(ctx, b1); err != nil {
t.Fatal(err)
}

View File

@@ -45,14 +45,15 @@ func (s *State) MigrateToCold(ctx context.Context, finalizedState *state.BeaconS
continue
}
if stateSummary.Slot%s.slotsPerArchivedPoint == 0 {
archivePointIndex := stateSummary.Slot / s.slotsPerArchivedPoint
archivedPointIndex := stateSummary.Slot / s.slotsPerArchivedPoint
alreadyArchived := s.beaconDB.HasArchivedPoint(ctx, archivedPointIndex)
if stateSummary.Slot%s.slotsPerArchivedPoint == 0 && !alreadyArchived {
if s.beaconDB.HasState(ctx, r) {
hotState, err := s.beaconDB.State(ctx, r)
if err != nil {
return err
}
if err := s.beaconDB.SaveArchivedPointState(ctx, hotState.Copy(), archivePointIndex); err != nil {
if err := s.beaconDB.SaveArchivedPointState(ctx, hotState.Copy(), archivedPointIndex); err != nil {
return err
}
} else {
@@ -60,17 +61,19 @@ func (s *State) MigrateToCold(ctx context.Context, finalizedState *state.BeaconS
if err != nil {
return err
}
if err := s.beaconDB.SaveArchivedPointState(ctx, hotState.Copy(), archivePointIndex); err != nil {
if err := s.beaconDB.SaveArchivedPointState(ctx, hotState.Copy(), archivedPointIndex); err != nil {
return err
}
}
if err := s.beaconDB.SaveArchivedPointRoot(ctx, r, archivePointIndex); err != nil {
if err := s.beaconDB.SaveArchivedPointRoot(ctx, r, archivedPointIndex); err != nil {
return err
}
if err := s.beaconDB.SaveLastArchivedIndex(ctx, archivedPointIndex); err != nil {
return err
}
log.WithFields(logrus.Fields{
"slot": stateSummary.Slot,
"archiveIndex": archivePointIndex,
"archiveIndex": archivedPointIndex,
"root": hex.EncodeToString(bytesutil.Trunc(r[:])),
}).Info("Saved archived point during state migration")
}

View File

@@ -66,10 +66,17 @@ func (s *State) ComputeStateUpToSlot(ctx context.Context, targetSlot uint64) (*s
// ReplayBlocks replays the input blocks on the input state until the target slot is reached.
func (s *State) ReplayBlocks(ctx context.Context, state *state.BeaconState, signed []*ethpb.SignedBeaconBlock, targetSlot uint64) (*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.ReplayBlocks")
defer span.End()
var err error
// The input block list is sorted in decreasing slots order.
if len(signed) > 0 {
for i := len(signed) - 1; i >= 0; i-- {
if state.Slot() >= targetSlot {
break
}
if featureconfig.Get().EnableStateGenSigVerify {
state, err = transition.ExecuteStateTransition(ctx, state, signed[i])
if err != nil {
@@ -85,15 +92,17 @@ func (s *State) ReplayBlocks(ctx context.Context, state *state.BeaconState, sign
}
// If there is skip slots at the end.
if featureconfig.Get().EnableStateGenSigVerify {
state, err = transition.ProcessSlots(ctx, state, targetSlot)
if err != nil {
return nil, err
}
} else {
state, err = processSlotsStateGen(ctx, state, targetSlot)
if err != nil {
return nil, err
if targetSlot > state.Slot() {
if featureconfig.Get().EnableStateGenSigVerify {
state, err = transition.ProcessSlots(ctx, state, targetSlot)
if err != nil {
return nil, err
}
} else {
state, err = processSlotsStateGen(ctx, state, targetSlot)
if err != nil {
return nil, err
}
}
}
@@ -242,7 +251,7 @@ func (s *State) lastSavedBlock(ctx context.Context, slot uint64) ([32]byte, uint
// Lower bound set as last archived slot is a reasonable assumption given
// block is saved at an archived point.
filter := filters.NewFilter().SetStartSlot(s.lastArchivedSlot).SetEndSlot(slot)
filter := filters.NewFilter().SetStartSlot(s.splitInfo.slot).SetEndSlot(slot)
rs, err := s.beaconDB.BlockRoots(ctx, filter)
if err != nil {
return [32]byte{}, 0, err
@@ -282,7 +291,7 @@ func (s *State) lastSavedState(ctx context.Context, slot uint64) ([32]byte, erro
// Lower bound set as last archived slot is a reasonable assumption given
// state is saved at an archived point.
filter := filters.NewFilter().SetStartSlot(s.lastArchivedSlot).SetEndSlot(slot)
filter := filters.NewFilter().SetStartSlot(s.splitInfo.slot).SetEndSlot(slot)
rs, err := s.beaconDB.BlockRoots(ctx, filter)
if err != nil {
return [32]byte{}, err

View File

@@ -352,8 +352,8 @@ func TestLastSavedBlock_Genesis(t *testing.T) {
defer testDB.TeardownDB(t, db)
ctx := context.Background()
s := &State{
beaconDB: db,
lastArchivedSlot: 128,
beaconDB: db,
splitInfo: &splitSlotAndRoot{slot: 128},
}
gBlk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{}}
@@ -385,28 +385,28 @@ func TestLastSavedBlock_CanGet(t *testing.T) {
defer testDB.TeardownDB(t, db)
ctx := context.Background()
s := &State{
beaconDB: db,
lastArchivedSlot: 128,
beaconDB: db,
splitInfo: &splitSlotAndRoot{slot: 128},
}
b1 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: s.lastArchivedSlot + 5}}
b1 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: s.splitInfo.slot + 5}}
if err := s.beaconDB.SaveBlock(ctx, b1); err != nil {
t.Fatal(err)
}
b2 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: s.lastArchivedSlot + 10}}
b2 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: s.splitInfo.slot + 10}}
if err := s.beaconDB.SaveBlock(ctx, b2); err != nil {
t.Fatal(err)
}
b3 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: s.lastArchivedSlot + 20}}
b3 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: s.splitInfo.slot + 20}}
if err := s.beaconDB.SaveBlock(ctx, b3); err != nil {
t.Fatal(err)
}
savedRoot, savedSlot, err := s.lastSavedBlock(ctx, s.lastArchivedSlot+100)
savedRoot, savedSlot, err := s.lastSavedBlock(ctx, s.splitInfo.slot+100)
if err != nil {
t.Fatal(err)
}
if savedSlot != s.lastArchivedSlot+20 {
if savedSlot != s.splitInfo.slot+20 {
t.Error("Did not save correct slot")
}
wantedRoot, _ := ssz.HashTreeRoot(b3.Block)
@@ -420,8 +420,8 @@ func TestLastSavedBlock_NoSavedBlock(t *testing.T) {
defer testDB.TeardownDB(t, db)
ctx := context.Background()
s := &State{
beaconDB: db,
lastArchivedSlot: 128,
beaconDB: db,
splitInfo: &splitSlotAndRoot{slot: 128},
}
b1 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 127}}
@@ -429,7 +429,7 @@ func TestLastSavedBlock_NoSavedBlock(t *testing.T) {
t.Fatal(err)
}
r, slot, err := s.lastSavedBlock(ctx, s.lastArchivedSlot+1)
r, slot, err := s.lastSavedBlock(ctx, s.splitInfo.slot+1)
if err != nil {
t.Fatal(err)
}
@@ -443,8 +443,8 @@ func TestLastSavedState_Genesis(t *testing.T) {
defer testDB.TeardownDB(t, db)
ctx := context.Background()
s := &State{
beaconDB: db,
lastArchivedSlot: 128,
beaconDB: db,
splitInfo: &splitSlotAndRoot{slot: 128},
}
gBlk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{}}
@@ -473,32 +473,32 @@ func TestLastSavedState_CanGet(t *testing.T) {
defer testDB.TeardownDB(t, db)
ctx := context.Background()
s := &State{
beaconDB: db,
lastArchivedSlot: 128,
beaconDB: db,
splitInfo: &splitSlotAndRoot{slot: 128},
}
b1 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: s.lastArchivedSlot + 5}}
b1 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: s.splitInfo.slot + 5}}
if err := s.beaconDB.SaveBlock(ctx, b1); err != nil {
t.Fatal(err)
}
b2 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: s.lastArchivedSlot + 10}}
b2 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: s.splitInfo.slot + 10}}
if err := s.beaconDB.SaveBlock(ctx, b2); err != nil {
t.Fatal(err)
}
b2Root, _ := ssz.HashTreeRoot(b2.Block)
st, err := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{Slot: s.lastArchivedSlot + 10})
st, err := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{Slot: s.splitInfo.slot + 10})
if err != nil {
t.Fatal(err)
}
if err := s.beaconDB.SaveState(ctx, st, b2Root); err != nil {
t.Fatal(err)
}
b3 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: s.lastArchivedSlot + 20}}
b3 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: s.splitInfo.slot + 20}}
if err := s.beaconDB.SaveBlock(ctx, b3); err != nil {
t.Fatal(err)
}
savedRoot, err := s.lastSavedState(ctx, s.lastArchivedSlot+100)
savedRoot, err := s.lastSavedState(ctx, s.splitInfo.slot+100)
if err != nil {
t.Fatal(err)
}
@@ -512,8 +512,8 @@ func TestLastSavedState_NoSavedBlockState(t *testing.T) {
defer testDB.TeardownDB(t, db)
ctx := context.Background()
s := &State{
beaconDB: db,
lastArchivedSlot: 128,
beaconDB: db,
splitInfo: &splitSlotAndRoot{slot: 128},
}
b1 := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Slot: 127}}
@@ -521,7 +521,7 @@ func TestLastSavedState_NoSavedBlockState(t *testing.T) {
t.Fatal(err)
}
r, err := s.lastSavedState(ctx, s.lastArchivedSlot+1)
r, err := s.lastSavedState(ctx, s.splitInfo.slot+1)
if err != nil {
t.Fatal(err)
}

View File

@@ -1,18 +1,24 @@
package stategen
import (
"context"
"sync"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/params"
"go.opencensus.io/trace"
)
const archivedInterval = 256
// State represents a management object that handles the internal
// logic of maintaining both hot and cold states in DB.
type State struct {
beaconDB db.NoHeadAccessDatabase
lastArchivedSlot uint64
slotsPerArchivedPoint uint64
epochBoundarySlotToRoot map[uint64][32]byte
epochBoundaryLock sync.RWMutex
@@ -34,9 +40,41 @@ func New(db db.NoHeadAccessDatabase) *State {
epochBoundarySlotToRoot: make(map[uint64][32]byte),
hotStateCache: cache.NewHotStateCache(),
splitInfo: &splitSlotAndRoot{slot: 0, root: params.BeaconConfig().ZeroHash},
slotsPerArchivedPoint: archivedInterval,
}
}
// Resume resumes a new state management object from previously saved finalized check point in DB.
func (s *State) Resume(ctx context.Context, lastArchivedRoot [32]byte) (*state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "stateGen.Resume")
defer span.End()
lastArchivedState, err := s.beaconDB.LastArchivedIndexState(ctx)
if err != nil {
return nil, err
}
// Resume as genesis state if there's no last archived state.
if lastArchivedState == nil {
return s.beaconDB.GenesisState(ctx)
}
s.splitInfo = &splitSlotAndRoot{slot: lastArchivedState.Slot(), root: lastArchivedRoot}
if err := s.beaconDB.SaveStateSummary(ctx,
&pb.StateSummary{Slot: lastArchivedState.Slot(), Root: lastArchivedRoot[:], BoundaryRoot: lastArchivedRoot[:]}); err != nil {
return nil, err
}
// In case the finalized state slot was skipped.
slot := lastArchivedState.Slot()
if !helpers.IsEpochStart(slot) {
slot = helpers.StartSlot(helpers.SlotToEpoch(slot) + 1)
}
s.setEpochBoundaryRoot(slot, lastArchivedRoot)
return lastArchivedState, nil
}
// This verifies the archive point frequency is valid. It checks the interval
// is a divisor of the number of slots per epoch. This ensures we have at least one
// archive point within range of our state root history when iterating

View File

@@ -1,12 +1,45 @@
package stategen
import (
"context"
"testing"
"github.com/gogo/protobuf/proto"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func Test_verifySlotsPerArchivePoint(t *testing.T) {
func TestResume(t *testing.T) {
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
root := [32]byte{'A'}
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch - 2)
service.beaconDB.SaveArchivedPointState(ctx, beaconState, 1)
service.beaconDB.SaveLastArchivedIndex(ctx, 1)
resumeState, err := service.Resume(ctx, root)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(beaconState.InnerStateUnsafe(), resumeState.InnerStateUnsafe()) {
t.Error("Diff saved state")
}
if !service.beaconDB.HasStateSummary(ctx, root) {
t.Error("Did not save state summary")
}
if cachedRoot, _ := service.epochBoundaryRoot(params.BeaconConfig().SlotsPerEpoch); cachedRoot != root {
t.Error("Did not save boundary root")
}
}
func TestVerifySlotsPerArchivePoint(t *testing.T) {
type tc struct {
input uint64
result bool

View File

@@ -0,0 +1,22 @@
package stategen
import (
"context"
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"go.opencensus.io/trace"
)
// SaveState saves the state in the DB.
// It knows which cold and hot state section the input state should belong to.
func (s *State) SaveState(ctx context.Context, root [32]byte, state *state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "stateGen.SaveState")
defer span.End()
// The state belongs to the cold section if it's below the split slot threshold.
if state.Slot() < s.splitInfo.slot {
return s.saveColdState(ctx, root, state)
}
return s.saveHotState(ctx, root, state)
}

View File

@@ -0,0 +1,106 @@
package stategen
import (
"context"
"testing"
"github.com/gogo/protobuf/proto"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestSaveState_ColdStateCanBeSaved(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
service.slotsPerArchivedPoint = 1
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
// This goes to cold section.
slot := uint64(1)
beaconState.SetSlot(slot)
service.splitInfo.slot = slot + 1
r := [32]byte{'a'}
if err := service.SaveState(ctx, r, beaconState); err != nil {
t.Fatal(err)
}
if !service.beaconDB.HasArchivedPoint(ctx, 1) {
t.Error("Did not save cold state")
}
if service.beaconDB.ArchivedPointRoot(ctx, 1) != r {
t.Error("Did not get wanted root")
}
receivedState, err := service.beaconDB.ArchivedPointState(ctx, 1)
if err != nil {
t.Fatal(err)
}
if !proto.Equal(receivedState.InnerStateUnsafe(), beaconState.InnerStateUnsafe()) {
t.Error("Did not get wanted state")
}
testutil.AssertLogsContain(t, hook, "Saved full state on archived point")
}
func TestSaveState_HotStateCanBeSaved(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
service.slotsPerArchivedPoint = 1
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
// This goes to hot section, verify it can save on epoch boundary.
beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch)
r := [32]byte{'a'}
if err := service.SaveState(ctx, r, beaconState); err != nil {
t.Fatal(err)
}
// Should save both state and state summary.
if !service.beaconDB.HasState(ctx, r) {
t.Error("Should have saved the state")
}
if !service.beaconDB.HasStateSummary(ctx, r) {
t.Error("Should have saved the state summary")
}
testutil.AssertLogsContain(t, hook, "Saved full state on epoch boundary")
}
func TestSaveState_HotStateCached(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
db := testDB.SetupDB(t)
defer testDB.TeardownDB(t, db)
service := New(db)
service.slotsPerArchivedPoint = 1
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch)
// Cache the state prior.
r := [32]byte{'a'}
service.hotStateCache.Put(r, beaconState)
if err := service.SaveState(ctx, r, beaconState); err != nil {
t.Fatal(err)
}
// Should not save the state and state summary.
if service.beaconDB.HasState(ctx, r) {
t.Error("Should not have saved the state")
}
if service.beaconDB.HasStateSummary(ctx, r) {
t.Error("Should have saved the state summary")
}
testutil.AssertLogsDoNotContain(t, hook, "Saved full state on epoch boundary")
}

View File

@@ -9,6 +9,7 @@ go_library(
"hash_function.go",
"helpers.go",
"state_root.go",
"trie_helpers.go",
"validators.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil",
@@ -22,6 +23,7 @@ go_library(
"//shared/featureconfig:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/params:go_default_library",
"//shared/trieutil:go_default_library",
"@com_github_dgraph_io_ristretto//:go_default_library",
"@com_github_minio_sha256_simd//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
@@ -36,13 +38,16 @@ go_test(
srcs = [
"state_root_cache_fuzz_test.go",
"state_root_test.go",
"trie_helpers_test.go",
],
embed = [":go_default_library"],
deps = [
"//proto/beacon/p2p/v1:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/interop:go_default_library",
"//shared/params:go_default_library",
"//shared/testutil:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",

View File

@@ -108,7 +108,17 @@ func (h *stateRootHasher) merkleizeWithCache(leaves [][32]byte, length uint64,
}
}
layers[0] = hashLayer
layers, hashLayer = merkleizeTrieLeaves(layers, hashLayer, hasher)
var root [32]byte
root = hashLayer[0]
if h.rootsCache != nil {
layersCache[fieldName] = layers
}
return root
}
func merkleizeTrieLeaves(layers [][][32]byte, hashLayer [][32]byte,
hasher func([]byte) [32]byte) ([][][32]byte, [][32]byte) {
// We keep track of the hash layers of a Merkle trie until we reach
// the top layer of length 1, which contains the single root element.
// [Root] -> Top layer has length 1.
@@ -130,12 +140,7 @@ func (h *stateRootHasher) merkleizeWithCache(leaves [][32]byte, length uint64,
layers[i] = hashLayer
i++
}
var root [32]byte
root = hashLayer[0]
if h.rootsCache != nil {
layersCache[fieldName] = layers
}
return root
return layers, hashLayer
}
func recomputeRoot(idx int, chunks [][32]byte, length uint64,

View File

@@ -22,6 +22,36 @@ func EpochAttestationsRoot(atts []*pb.PendingAttestation) ([32]byte, error) {
return nocachedHasher.epochAttestationsRoot(atts)
}
// PendingAttestationRoot describes a method from which the hash tree root
// of a pending attestation is returned.
func PendingAttestationRoot(att *pb.PendingAttestation) ([32]byte, error) {
fieldRoots := [][32]byte{}
if att != nil {
// Bitfield.
aggregationRoot, err := bitlistRoot(att.AggregationBits, 2048)
if err != nil {
return [32]byte{}, err
}
// Attestation data.
attDataRoot, err := attestationDataRoot(att.Data)
if err != nil {
return [32]byte{}, err
}
inclusionBuf := make([]byte, 8)
binary.LittleEndian.PutUint64(inclusionBuf, att.InclusionDelay)
// Inclusion delay.
inclusionRoot := bytesutil.ToBytes32(inclusionBuf)
proposerBuf := make([]byte, 8)
binary.LittleEndian.PutUint64(proposerBuf, att.ProposerIndex)
// Proposer index.
proposerRoot := bytesutil.ToBytes32(proposerBuf)
fieldRoots = [][32]byte{aggregationRoot, attDataRoot, inclusionRoot, proposerRoot}
}
return bitwiseMerkleizeArrays(fieldRoots, uint64(len(fieldRoots)), uint64(len(fieldRoots)))
}
func marshalAttestationData(data *ethpb.AttestationData) []byte {
enc := make([]byte, 128)

View File

@@ -114,3 +114,16 @@ func Eth1DataVotesRoot(eth1DataVotes []*ethpb.Eth1Data) ([32]byte, error) {
}
return root, nil
}
// AddInMixin describes a method from which a lenth mixin is added to the
// provided root.
func AddInMixin(root [32]byte, length uint64) ([32]byte, error) {
rootBuf := new(bytes.Buffer)
if err := binary.Write(rootBuf, binary.LittleEndian, length); err != nil {
return [32]byte{}, errors.Wrap(err, "could not marshal eth1data votes length")
}
// We need to mix in the length of the slice.
rootBufRoot := make([]byte, 32)
copy(rootBufRoot, rootBuf.Bytes())
return mixInLength(root, rootBufRoot), nil
}

View File

@@ -0,0 +1,215 @@
package stateutil
import (
"bytes"
"github.com/protolambda/zssz/merkle"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/trieutil"
)
// ReturnTrieLayer returns the representation of a merkle trie when
// provided with the elements of a fixed sized trie and the corresponding depth of
// it.
func ReturnTrieLayer(elements [][32]byte, length uint64) [][]*[32]byte {
hasher := hashutil.CustomSHA256Hasher()
leaves := elements
if len(leaves) == 1 {
return [][]*[32]byte{{&leaves[0]}}
}
hashLayer := leaves
layers := make([][][32]byte, merkle.GetDepth(length)+1)
layers[0] = hashLayer
layers, _ = merkleizeTrieLeaves(layers, hashLayer, hasher)
refLayers := make([][]*[32]byte, len(layers))
for i, val := range layers {
refLayers[i] = make([]*[32]byte, len(val))
for j, innerVal := range val {
newVal := innerVal
refLayers[i][j] = &newVal
}
}
return refLayers
}
// ReturnTrieLayerVariable returns the representation of a merkle trie when
// provided with the elements of a variable sized trie and the corresponding depth of
// it.
func ReturnTrieLayerVariable(elements [][32]byte, length uint64) [][]*[32]byte {
hasher := hashutil.CustomSHA256Hasher()
depth := merkle.GetDepth(length)
layers := make([][]*[32]byte, depth+1)
// Return zerohash at depth
if len(elements) == 0 {
zerohash := trieutil.ZeroHashes[depth]
layers[len(layers)-1] = []*[32]byte{&zerohash}
return layers
}
transformedLeaves := make([]*[32]byte, len(elements))
for i := range elements {
arr := elements[i]
transformedLeaves[i] = &arr
}
layers[0] = transformedLeaves
buffer := bytes.NewBuffer([]byte{})
buffer.Grow(64)
for i := 0; i < int(depth); i++ {
oddNodeLength := len(layers[i])%2 == 1
if oddNodeLength {
zerohash := trieutil.ZeroHashes[i]
layers[i] = append(layers[i], &zerohash)
}
updatedValues := make([]*[32]byte, 0, len(layers[i])/2)
for j := 0; j < len(layers[i]); j += 2 {
buffer.Write(layers[i][j][:])
buffer.Write(layers[i][j+1][:])
concat := hasher(buffer.Bytes())
updatedValues = append(updatedValues, &concat)
buffer.Reset()
}
// remove zerohash node from tree
if oddNodeLength {
layers[i] = layers[i][:len(layers[i])-1]
}
layers[i+1] = updatedValues
}
return layers
}
// RecomputeFromLayer recomputes specific branches of a fixed sized trie depending on the provided changed indexes.
func RecomputeFromLayer(changedLeaves [][32]byte, changedIdx []uint64, layer [][]*[32]byte) ([32]byte, [][]*[32]byte, error) {
hasher := hashutil.CustomSHA256Hasher()
for i, idx := range changedIdx {
layer[0][idx] = &changedLeaves[i]
}
if len(changedIdx) == 0 {
return *layer[0][0], layer, nil
}
leaves := layer[0]
// We need to ensure we recompute indices of the Merkle tree which
// changed in-between calls to this function. This check adds an offset
// to the recomputed indices to ensure we do so evenly.
maxChangedIndex := changedIdx[len(changedIdx)-1]
if int(maxChangedIndex+2) == len(leaves) && maxChangedIndex%2 != 0 {
changedIdx = append(changedIdx, maxChangedIndex+1)
}
root := *layer[0][0]
var err error
for _, idx := range changedIdx {
root, layer, err = recomputeRootFromLayer(int(idx), layer, leaves, hasher)
if err != nil {
return [32]byte{}, nil, err
}
}
return root, layer, nil
}
// RecomputeFromLayerVariable recomputes specific branches of a variable sized trie depending on the provided changed indexes.
func RecomputeFromLayerVariable(changedLeaves [][32]byte, changedIdx []uint64, layer [][]*[32]byte) ([32]byte, [][]*[32]byte, error) {
hasher := hashutil.CustomSHA256Hasher()
if len(changedIdx) == 0 {
return *layer[0][0], layer, nil
}
root := *layer[len(layer)-1][0]
var err error
for i, idx := range changedIdx {
root, layer, err = recomputeRootFromLayerVariable(int(idx), changedLeaves[i], layer, hasher)
if err != nil {
return [32]byte{}, nil, err
}
}
return root, layer, nil
}
// this method assumes that the provided trie already has all its elements included
// in the base depth.
func recomputeRootFromLayer(idx int, layers [][]*[32]byte, chunks []*[32]byte,
hasher func([]byte) [32]byte) ([32]byte, [][]*[32]byte, error) {
root := *chunks[idx]
layers[0] = chunks
// The merkle tree structure looks as follows:
// [[r1, r2, r3, r4], [parent1, parent2], [root]]
// Using information about the index which changed, idx, we recompute
// only its branch up the tree.
currentIndex := idx
for i := 0; i < len(layers)-1; i++ {
isLeft := currentIndex%2 == 0
neighborIdx := currentIndex ^ 1
neighbor := [32]byte{}
if layers[i] != nil && len(layers[i]) != 0 && neighborIdx < len(layers[i]) {
neighbor = *layers[i][neighborIdx]
}
if isLeft {
parentHash := hasher(append(root[:], neighbor[:]...))
root = parentHash
} else {
parentHash := hasher(append(neighbor[:], root[:]...))
root = parentHash
}
parentIdx := currentIndex / 2
// Update the cached layers at the parent index.
rootVal := root
if len(layers[i+1]) == 0 {
layers[i+1] = append(layers[i+1], &rootVal)
} else {
layers[i+1][parentIdx] = &rootVal
}
currentIndex = parentIdx
}
// If there is only a single leaf, we return it (the identity element).
if len(layers[0]) == 1 {
return *layers[0][0], layers, nil
}
return root, layers, nil
}
// this method assumes that the base branch does not consist of all leaves of the
// trie. Instead missing leaves are assumed to be zerohashes, following the structure
// of a sparse merkle trie.
func recomputeRootFromLayerVariable(idx int, item [32]byte, layers [][]*[32]byte,
hasher func([]byte) [32]byte) ([32]byte, [][]*[32]byte, error) {
for idx >= len(layers[0]) {
zerohash := trieutil.ZeroHashes[0]
layers[0] = append(layers[0], &zerohash)
}
layers[0][idx] = &item
currentIndex := idx
root := item
for i := 0; i < len(layers)-1; i++ {
isLeft := currentIndex%2 == 0
neighborIdx := currentIndex ^ 1
neighbor := [32]byte{}
if neighborIdx >= len(layers[i]) {
neighbor = trieutil.ZeroHashes[i]
} else {
neighbor = *layers[i][neighborIdx]
}
if isLeft {
parentHash := hasher(append(root[:], neighbor[:]...))
root = parentHash
} else {
parentHash := hasher(append(neighbor[:], root[:]...))
root = parentHash
}
parentIdx := currentIndex / 2
if len(layers[i+1]) == 0 || parentIdx >= len(layers[i+1]) {
newItem := root
layers[i+1] = append(layers[i+1], &newItem)
} else {
newItem := root
layers[i+1][parentIdx] = &newItem
}
currentIndex = parentIdx
}
return root, layers, nil
}

View File

@@ -0,0 +1,139 @@
package stateutil_test
import (
"testing"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
)
func TestReturnTrieLayer_OK(t *testing.T) {
newState, _ := testutil.DeterministicGenesisState(t, 32)
root, err := stateutil.RootsArrayHashTreeRoot(newState.BlockRoots(), params.BeaconConfig().SlotsPerHistoricalRoot, "BlockRoots")
if err != nil {
t.Fatal(err)
}
blockRts := newState.BlockRoots()
roots := make([][32]byte, 0, len(blockRts))
for _, rt := range blockRts {
roots = append(roots, bytesutil.ToBytes32(rt))
}
layers := stateutil.ReturnTrieLayer(roots, uint64(len(roots)))
newRoot := *layers[len(layers)-1][0]
if newRoot != root {
t.Errorf("Wanted root of %#x but got %#x", root, newRoot)
}
}
func TestReturnTrieLayerVariable_OK(t *testing.T) {
newState, _ := testutil.DeterministicGenesisState(t, 32)
root, err := stateutil.ValidatorRegistryRoot(newState.Validators())
if err != nil {
t.Fatal(err)
}
validators := newState.Validators()
roots := make([][32]byte, 0, len(validators))
for _, val := range validators {
rt, err := stateutil.ValidatorRoot(val)
if err != nil {
t.Fatal(err)
}
roots = append(roots, rt)
}
layers := stateutil.ReturnTrieLayerVariable(roots, params.BeaconConfig().ValidatorRegistryLimit)
newRoot := *layers[len(layers)-1][0]
newRoot, err = stateutil.AddInMixin(newRoot, uint64(len(validators)))
if err != nil {
t.Fatal(err)
}
if newRoot != root {
t.Errorf("Wanted root of %#x but got %#x", root, newRoot)
}
}
func TestRecomputeFromLayer_FixedSizedArray(t *testing.T) {
newState, _ := testutil.DeterministicGenesisState(t, 32)
blockRts := newState.BlockRoots()
roots := make([][32]byte, 0, len(blockRts))
for _, rt := range blockRts {
roots = append(roots, bytesutil.ToBytes32(rt))
}
layers := stateutil.ReturnTrieLayer(roots, uint64(len(roots)))
changedIdx := []uint64{24, 41}
changedRoots := [][32]byte{{'A', 'B', 'C'}, {'D', 'E', 'F'}}
newState.UpdateBlockRootAtIndex(changedIdx[0], changedRoots[0])
newState.UpdateBlockRootAtIndex(changedIdx[1], changedRoots[1])
expectedRoot, err := stateutil.RootsArrayHashTreeRoot(newState.BlockRoots(), params.BeaconConfig().SlotsPerHistoricalRoot, "BlockRoots")
if err != nil {
t.Fatal(err)
}
root, _, err := stateutil.RecomputeFromLayer(changedRoots, changedIdx, layers)
if err != nil {
t.Fatal(err)
}
if root != expectedRoot {
t.Errorf("Wanted root of %#x but got %#x", expectedRoot, root)
}
}
func TestRecomputeFromLayer_VariableSizedArray(t *testing.T) {
newState, _ := testutil.DeterministicGenesisState(t, 32)
validators := newState.Validators()
roots := make([][32]byte, 0, len(validators))
for _, val := range validators {
rt, err := stateutil.ValidatorRoot(val)
if err != nil {
t.Fatal(err)
}
roots = append(roots, rt)
}
layers := stateutil.ReturnTrieLayerVariable(roots, params.BeaconConfig().ValidatorRegistryLimit)
changedIdx := []uint64{2, 29}
val1, err := newState.ValidatorAtIndex(10)
if err != nil {
t.Fatal(err)
}
val2, err := newState.ValidatorAtIndex(11)
if err != nil {
t.Fatal(err)
}
val1.Slashed = true
val1.ExitEpoch = 20
val2.Slashed = true
val2.ExitEpoch = 40
changedVals := []*ethpb.Validator{val1, val2}
newState.UpdateValidatorAtIndex(changedIdx[0], changedVals[0])
newState.UpdateValidatorAtIndex(changedIdx[1], changedVals[1])
expectedRoot, err := stateutil.ValidatorRegistryRoot(newState.Validators())
if err != nil {
t.Fatal(err)
}
roots = make([][32]byte, 0, len(changedVals))
for _, val := range changedVals {
rt, err := stateutil.ValidatorRoot(val)
if err != nil {
t.Fatal(err)
}
roots = append(roots, rt)
}
root, _, err := stateutil.RecomputeFromLayerVariable(roots, changedIdx, layers)
if err != nil {
t.Fatal(err)
}
root, err = stateutil.AddInMixin(root, uint64(len(validators)))
if err != nil {
t.Fatal(err)
}
if root != expectedRoot {
t.Errorf("Wanted root of %#x but got %#x", expectedRoot, root)
}
}

View File

@@ -59,6 +59,49 @@ func ValidatorBalancesRoot(balances []uint64) ([32]byte, error) {
return mixInLength(balancesRootsRoot, balancesRootsBufRoot), nil
}
// ValidatorRoot describes a method from which the hash tree root
// of a validator is returned.
func ValidatorRoot(validator *ethpb.Validator) ([32]byte, error) {
fieldRoots := [][32]byte{}
if validator != nil {
pubkey := bytesutil.ToBytes48(validator.PublicKey)
withdrawCreds := bytesutil.ToBytes32(validator.WithdrawalCredentials)
effectiveBalanceBuf := [32]byte{}
binary.LittleEndian.PutUint64(effectiveBalanceBuf[:8], validator.EffectiveBalance)
// Slashed.
slashBuf := [32]byte{}
if validator.Slashed {
slashBuf[0] = uint8(1)
} else {
slashBuf[0] = uint8(0)
}
activationEligibilityBuf := [32]byte{}
binary.LittleEndian.PutUint64(activationEligibilityBuf[:8], validator.ActivationEligibilityEpoch)
activationBuf := [32]byte{}
binary.LittleEndian.PutUint64(activationBuf[:8], validator.ActivationEpoch)
exitBuf := [32]byte{}
binary.LittleEndian.PutUint64(exitBuf[:8], validator.ExitEpoch)
withdrawalBuf := [32]byte{}
binary.LittleEndian.PutUint64(withdrawalBuf[:8], validator.WithdrawableEpoch)
// Public key.
pubKeyChunks, err := pack([][]byte{pubkey[:]})
if err != nil {
return [32]byte{}, err
}
pubKeyRoot, err := bitwiseMerkleize(pubKeyChunks, uint64(len(pubKeyChunks)), uint64(len(pubKeyChunks)))
if err != nil {
return [32]byte{}, err
}
fieldRoots = [][32]byte{pubKeyRoot, withdrawCreds, effectiveBalanceBuf, slashBuf, activationEligibilityBuf,
activationBuf, exitBuf, withdrawalBuf}
}
return bitwiseMerkleizeArrays(fieldRoots, uint64(len(fieldRoots)), uint64(len(fieldRoots)))
}
func (h *stateRootHasher) validatorRegistryRoot(validators []*ethpb.Validator) ([32]byte, error) {
hashKeyElements := make([]byte, len(validators)*32)
roots := make([][32]byte, len(validators))

View File

@@ -1,22 +1,71 @@
package state
import (
"runtime"
"sync"
"github.com/gogo/protobuf/proto"
"github.com/pkg/errors"
"github.com/protolambda/zssz/merkle"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
coreutils "github.com/prysmaticlabs/prysm/beacon-chain/core/state/stateutils"
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/memorypool"
"github.com/prysmaticlabs/prysm/shared/params"
)
func init() {
fieldMap = make(map[fieldIndex]dataType)
// Initialize the fixed sized arrays.
fieldMap[blockRoots] = basicArray
fieldMap[stateRoots] = basicArray
fieldMap[randaoMixes] = basicArray
// Initialize the composite arrays.
fieldMap[eth1DataVotes] = compositeArray
fieldMap[validators] = compositeArray
fieldMap[previousEpochAttestations] = compositeArray
fieldMap[currentEpochAttestations] = compositeArray
}
type fieldIndex int
// dataType signifies the data type of the field.
type dataType int
// Below we define a set of useful enum values for the field
// indices of the beacon state. For example, genesisTime is the
// 0th field of the beacon state. This is helpful when we are
// updating the Merkle branches up the trie representation
// of the beacon state.
const (
genesisTime fieldIndex = iota
slot
fork
latestBlockHeader
blockRoots
stateRoots
historicalRoots
eth1Data
eth1DataVotes
eth1DepositIndex
validators
balances
randaoMixes
slashings
previousEpochAttestations
currentEpochAttestations
justificationBits
previousJustifiedCheckpoint
currentJustifiedCheckpoint
finalizedCheckpoint
)
// List of current data types the state supports.
const (
basicArray dataType = iota
compositeArray
)
// fieldMap keeps track of each field
// to its corresponding data type.
var fieldMap map[fieldIndex]dataType
// Reference structs are shared across BeaconState copies to understand when the state must use
// copy-on-write for shared fields or may modify a field in place when it holds the only reference
// to the field value. References are tracked in a map of fieldIndex -> *reference. Whenever a state
@@ -33,12 +82,14 @@ var ErrNilInnerState = errors.New("nil inner state")
// BeaconState defines a struct containing utilities for the eth2 chain state, defining
// getters and setters for its respective values and helpful functions such as HashTreeRoot().
type BeaconState struct {
state *pbp2p.BeaconState
lock sync.RWMutex
dirtyFields map[fieldIndex]interface{}
valIdxMap map[[48]byte]uint64
merkleLayers [][][]byte
state *pbp2p.BeaconState
lock sync.RWMutex
dirtyFields map[fieldIndex]interface{}
dirtyIndices map[fieldIndex][]uint64
stateFieldLeaves map[fieldIndex]*FieldTrie
rebuildTrie map[fieldIndex]bool
valIdxMap map[[48]byte]uint64
merkleLayers [][][]byte
sharedFieldReferences map[fieldIndex]*reference
}
@@ -48,219 +99,15 @@ type ReadOnlyValidator struct {
validator *ethpb.Validator
}
// InitializeFromProto the beacon state from a protobuf representation.
func InitializeFromProto(st *pbp2p.BeaconState) (*BeaconState, error) {
return InitializeFromProtoUnsafe(proto.Clone(st).(*pbp2p.BeaconState))
func (r *reference) AddRef() {
r.refs++
}
// InitializeFromProtoUnsafe directly uses the beacon state protobuf pointer
// and sets it as the inner state of the BeaconState type.
func InitializeFromProtoUnsafe(st *pbp2p.BeaconState) (*BeaconState, error) {
b := &BeaconState{
state: st,
dirtyFields: make(map[fieldIndex]interface{}, 20),
sharedFieldReferences: make(map[fieldIndex]*reference, 10),
valIdxMap: coreutils.ValidatorIndexMap(st.Validators),
func (r *reference) MinusRef() {
// Do not reduce further if object
// already has 0 reference to prevent overflow.
if r.refs == 0 {
return
}
for i := 0; i < 20; i++ {
b.dirtyFields[fieldIndex(i)] = true
}
// Initialize field reference tracking for shared data.
b.sharedFieldReferences[randaoMixes] = &reference{refs: 1}
b.sharedFieldReferences[stateRoots] = &reference{refs: 1}
b.sharedFieldReferences[blockRoots] = &reference{refs: 1}
b.sharedFieldReferences[previousEpochAttestations] = &reference{refs: 1}
b.sharedFieldReferences[currentEpochAttestations] = &reference{refs: 1}
b.sharedFieldReferences[slashings] = &reference{refs: 1}
b.sharedFieldReferences[eth1DataVotes] = &reference{refs: 1}
b.sharedFieldReferences[validators] = &reference{refs: 1}
b.sharedFieldReferences[balances] = &reference{refs: 1}
b.sharedFieldReferences[historicalRoots] = &reference{refs: 1}
return b, nil
}
// Copy returns a deep copy of the beacon state.
func (b *BeaconState) Copy() *BeaconState {
if !b.HasInnerState() {
return nil
}
b.lock.RLock()
defer b.lock.RUnlock()
dst := &BeaconState{
state: &pbp2p.BeaconState{
// Primitive types, safe to copy.
GenesisTime: b.state.GenesisTime,
Slot: b.state.Slot,
Eth1DepositIndex: b.state.Eth1DepositIndex,
// Large arrays, infrequently changed, constant size.
RandaoMixes: b.state.RandaoMixes,
StateRoots: b.state.StateRoots,
BlockRoots: b.state.BlockRoots,
PreviousEpochAttestations: b.state.PreviousEpochAttestations,
CurrentEpochAttestations: b.state.CurrentEpochAttestations,
Slashings: b.state.Slashings,
Eth1DataVotes: b.state.Eth1DataVotes,
// Large arrays, increases over time.
Validators: b.state.Validators,
Balances: b.state.Balances,
HistoricalRoots: b.state.HistoricalRoots,
// Everything else, too small to be concerned about, constant size.
Fork: b.Fork(),
LatestBlockHeader: b.LatestBlockHeader(),
Eth1Data: b.Eth1Data(),
JustificationBits: b.JustificationBits(),
PreviousJustifiedCheckpoint: b.PreviousJustifiedCheckpoint(),
CurrentJustifiedCheckpoint: b.CurrentJustifiedCheckpoint(),
FinalizedCheckpoint: b.FinalizedCheckpoint(),
},
dirtyFields: make(map[fieldIndex]interface{}, 20),
sharedFieldReferences: make(map[fieldIndex]*reference, 10),
// Copy on write validator index map.
valIdxMap: b.valIdxMap,
}
for field, ref := range b.sharedFieldReferences {
ref.refs++
dst.sharedFieldReferences[field] = ref
}
for i := range b.dirtyFields {
dst.dirtyFields[i] = true
}
if b.merkleLayers != nil {
dst.merkleLayers = make([][][]byte, len(b.merkleLayers))
for i, layer := range b.merkleLayers {
dst.merkleLayers[i] = make([][]byte, len(layer))
for j, content := range layer {
dst.merkleLayers[i][j] = make([]byte, len(content))
copy(dst.merkleLayers[i][j], content)
}
}
}
// Finalizer runs when dst is being destroyed in garbage collection.
runtime.SetFinalizer(dst, func(b *BeaconState) {
for field, v := range b.sharedFieldReferences {
v.refs--
if field == randaoMixes && v.refs == 0 {
memorypool.PutDoubleByteSlice(b.state.RandaoMixes)
}
}
})
return dst
}
// HashTreeRoot of the beacon state retrieves the Merkle root of the trie
// representation of the beacon state based on the eth2 Simple Serialize specification.
func (b *BeaconState) HashTreeRoot() ([32]byte, error) {
b.lock.Lock()
defer b.lock.Unlock()
if b.merkleLayers == nil || len(b.merkleLayers) == 0 {
fieldRoots, err := stateutil.ComputeFieldRoots(b.state)
if err != nil {
return [32]byte{}, err
}
layers := merkleize(fieldRoots)
b.merkleLayers = layers
b.dirtyFields = make(map[fieldIndex]interface{})
}
for field := range b.dirtyFields {
root, err := b.rootSelector(field)
if err != nil {
return [32]byte{}, err
}
b.merkleLayers[0][field] = root[:]
b.recomputeRoot(int(field))
delete(b.dirtyFields, field)
}
return bytesutil.ToBytes32(b.merkleLayers[len(b.merkleLayers)-1][0]), nil
}
// Merkleize 32-byte leaves into a Merkle trie for its adequate depth, returning
// the resulting layers of the trie based on the appropriate depth. This function
// pads the leaves to a power-of-two length.
func merkleize(leaves [][]byte) [][][]byte {
hashFunc := hashutil.CustomSHA256Hasher()
layers := make([][][]byte, merkle.GetDepth(uint64(len(leaves)))+1)
for len(leaves) != 32 {
leaves = append(leaves, make([]byte, 32))
}
currentLayer := leaves
layers[0] = currentLayer
// We keep track of the hash layers of a Merkle trie until we reach
// the top layer of length 1, which contains the single root element.
// [Root] -> Top layer has length 1.
// [E] [F] -> This layer has length 2.
// [A] [B] [C] [D] -> The bottom layer has length 4 (needs to be a power of two).
i := 1
for len(currentLayer) > 1 && i < len(layers) {
layer := make([][]byte, 0)
for i := 0; i < len(currentLayer); i += 2 {
hashedChunk := hashFunc(append(currentLayer[i], currentLayer[i+1]...))
layer = append(layer, hashedChunk[:])
}
currentLayer = layer
layers[i] = currentLayer
i++
}
return layers
}
func (b *BeaconState) rootSelector(field fieldIndex) ([32]byte, error) {
switch field {
case genesisTime:
return stateutil.Uint64Root(b.state.GenesisTime), nil
case slot:
return stateutil.Uint64Root(b.state.Slot), nil
case eth1DepositIndex:
return stateutil.Uint64Root(b.state.Eth1DepositIndex), nil
case fork:
return stateutil.ForkRoot(b.state.Fork)
case latestBlockHeader:
return stateutil.BlockHeaderRoot(b.state.LatestBlockHeader)
case blockRoots:
return stateutil.RootsArrayHashTreeRoot(b.state.BlockRoots, params.BeaconConfig().SlotsPerHistoricalRoot, "BlockRoots")
case stateRoots:
return stateutil.RootsArrayHashTreeRoot(b.state.StateRoots, params.BeaconConfig().SlotsPerHistoricalRoot, "StateRoots")
case historicalRoots:
return stateutil.HistoricalRootsRoot(b.state.HistoricalRoots)
case eth1Data:
return stateutil.Eth1Root(b.state.Eth1Data)
case eth1DataVotes:
return stateutil.Eth1DataVotesRoot(b.state.Eth1DataVotes)
case validators:
return stateutil.ValidatorRegistryRoot(b.state.Validators)
case balances:
return stateutil.ValidatorBalancesRoot(b.state.Balances)
case randaoMixes:
return stateutil.RootsArrayHashTreeRoot(b.state.RandaoMixes, params.BeaconConfig().EpochsPerHistoricalVector, "RandaoMixes")
case slashings:
return stateutil.SlashingsRoot(b.state.Slashings)
case previousEpochAttestations:
return stateutil.EpochAttestationsRoot(b.state.PreviousEpochAttestations)
case currentEpochAttestations:
return stateutil.EpochAttestationsRoot(b.state.CurrentEpochAttestations)
case justificationBits:
return bytesutil.ToBytes32(b.state.JustificationBits), nil
case previousJustifiedCheckpoint:
return stateutil.CheckpointRoot(b.state.PreviousJustifiedCheckpoint)
case currentJustifiedCheckpoint:
return stateutil.CheckpointRoot(b.state.CurrentJustifiedCheckpoint)
case finalizedCheckpoint:
return stateutil.CheckpointRoot(b.state.FinalizedCheckpoint)
}
return [32]byte{}, errors.New("invalid field index provided")
r.refs--
}

View File

@@ -34,6 +34,7 @@ go_library(
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/block:go_default_library",
@@ -45,6 +46,7 @@ go_library(
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/p2p/encoder:go_default_library",
@@ -59,6 +61,7 @@ go_library(
"//shared/params:go_default_library",
"//shared/roughtime:go_default_library",
"//shared/runutil:go_default_library",
"//shared/sliceutil:go_default_library",
"//shared/slotutil:go_default_library",
"//shared/traceutil:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
@@ -111,6 +114,7 @@ go_test(
"//beacon-chain/core/state:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/p2p/encoder:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",

View File

@@ -0,0 +1,63 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"log.go",
"round_robin.go",
"service.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync-old",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/block:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/flags:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/sync:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/mathutil:go_default_library",
"//shared/params:go_default_library",
"//shared/roughtime:go_default_library",
"@com_github_kevinms_leakybucket_go//:go_default_library",
"@com_github_libp2p_go_libp2p_core//peer:go_default_library",
"@com_github_paulbellamy_ratecounter//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["round_robin_test.go"],
embed = [":go_default_library"],
race = "on",
tags = ["race_on"],
deps = [
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/p2p/testing:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/sync:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/params:go_default_library",
"//shared/roughtime:go_default_library",
"//shared/sliceutil:go_default_library",
"@com_github_kevinms_leakybucket_go//:go_default_library",
"@com_github_libp2p_go_libp2p_core//network:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -0,0 +1,7 @@
package initialsyncold
import (
"github.com/sirupsen/logrus"
)
var log = logrus.WithField("prefix", "initial-sync")

View File

@@ -0,0 +1,360 @@
package initialsyncold
import (
"context"
"fmt"
"io"
"math/rand"
"sort"
"sync/atomic"
"time"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/paulbellamy/ratecounter"
"github.com/pkg/errors"
eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
blockfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/block"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
prysmsync "github.com/prysmaticlabs/prysm/beacon-chain/sync"
p2ppb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/mathutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/sirupsen/logrus"
)
const blockBatchSize = 64
const counterSeconds = 20
const refreshTime = 6 * time.Second
// Round Robin sync looks at the latest peer statuses and syncs with the highest
// finalized peer.
//
// Step 1 - Sync to finalized epoch.
// Sync with peers of lowest finalized root with epoch greater than head state.
//
// Step 2 - Sync to head from finalized epoch.
// Using the finalized root as the head_block_root and the epoch start slot
// after the finalized epoch, request blocks to head from some subset of peers
// where step = 1.
func (s *Service) roundRobinSync(genesis time.Time) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
defer s.chain.ClearCachedStates()
if cfg := featureconfig.Get(); cfg.EnableSkipSlotsCache {
cfg.EnableSkipSlotsCache = false
featureconfig.Init(cfg)
defer func() {
cfg := featureconfig.Get()
cfg.EnableSkipSlotsCache = true
featureconfig.Init(cfg)
}()
}
counter := ratecounter.NewRateCounter(counterSeconds * time.Second)
randGenerator := rand.New(rand.NewSource(time.Now().Unix()))
var lastEmptyRequests int
highestFinalizedSlot := helpers.StartSlot(s.highestFinalizedEpoch() + 1)
// Step 1 - Sync to end of finalized epoch.
for s.chain.HeadSlot() < highestFinalizedSlot {
root, finalizedEpoch, peers := s.p2p.Peers().BestFinalized(params.BeaconConfig().MaxPeersToSync, helpers.SlotToEpoch(s.chain.HeadSlot()))
if len(peers) == 0 {
log.Warn("No peers; waiting for reconnect")
time.Sleep(refreshTime)
continue
}
if len(peers) >= flags.Get().MinimumSyncPeers {
highestFinalizedSlot = helpers.StartSlot(finalizedEpoch + 1)
}
// shuffle peers to prevent a bad peer from
// stalling sync with invalid blocks
randGenerator.Shuffle(len(peers), func(i, j int) {
peers[i], peers[j] = peers[j], peers[i]
})
// request a range of blocks to be requested from multiple peers.
// Example:
// - number of peers = 4
// - range of block slots is 64...128
// Four requests will be spread across the peers using step argument to distribute the load
// i.e. the first peer is asked for block 64, 68, 72... while the second peer is asked for
// 65, 69, 73... and so on for other peers.
var request func(start uint64, step uint64, count uint64, peers []peer.ID, remainder int) ([]*eth.SignedBeaconBlock, error)
request = func(start uint64, step uint64, count uint64, peers []peer.ID, remainder int) ([]*eth.SignedBeaconBlock, error) {
if len(peers) == 0 {
return nil, errors.WithStack(errors.New("no peers left to request blocks"))
}
var p2pRequestCount int32
errChan := make(chan error)
blocksChan := make(chan []*eth.SignedBeaconBlock)
// Handle block large block ranges of skipped slots.
start += count * uint64(lastEmptyRequests*len(peers))
if count <= 1 {
step = 1
}
// Short circuit start far exceeding the highest finalized epoch in some infinite loop.
if start > highestFinalizedSlot {
return nil, errors.Errorf("attempted to ask for a start slot of %d which is greater than the next highest slot of %d", start, highestFinalizedSlot)
}
atomic.AddInt32(&p2pRequestCount, int32(len(peers)))
for i, pid := range peers {
if ctx.Err() != nil {
return nil, ctx.Err()
}
start := start + uint64(i)*step
step := step * uint64(len(peers))
count := mathutil.Min(count, (helpers.StartSlot(finalizedEpoch+1)-start)/step)
// If the count was divided by an odd number of peers, there will be some blocks
// missing from the first requests so we accommodate that scenario.
if i < remainder {
count++
}
// asking for no blocks may cause the client to hang. This should never happen and
// the peer may return an error anyway, but we'll ask for at least one block.
if count == 0 {
count = 1
}
req := &p2ppb.BeaconBlocksByRangeRequest{
HeadBlockRoot: root,
StartSlot: start,
Count: count,
Step: step,
}
go func(i int, pid peer.ID) {
defer func() {
zeroIfIAmTheLast := atomic.AddInt32(&p2pRequestCount, -1)
if zeroIfIAmTheLast == 0 {
close(blocksChan)
}
}()
resp, err := s.requestBlocks(ctx, req, pid)
if err != nil {
// fail over to other peers by splitting this requests evenly across them.
ps := append(peers[:i], peers[i+1:]...)
log.WithError(err).WithField(
"remaining peers",
len(ps),
).WithField(
"peer",
pid.Pretty(),
).Debug("Request failed, trying to round robin with other peers")
if len(ps) == 0 {
errChan <- errors.WithStack(errors.New("no peers left to request blocks"))
return
}
resp, err = request(start, step, count/uint64(len(ps)) /*count*/, ps, int(count)%len(ps) /*remainder*/)
if err != nil {
errChan <- err
return
}
}
log.WithField("peer", pid).WithField("count", len(resp)).Debug("Received blocks")
blocksChan <- resp
}(i, pid)
}
var unionRespBlocks []*eth.SignedBeaconBlock
for {
select {
case err := <-errChan:
return nil, err
case resp, ok := <-blocksChan:
if ok {
// if this synchronization becomes a bottleneck:
// think about immediately allocating space for all peers in unionRespBlocks,
// and write without synchronization
unionRespBlocks = append(unionRespBlocks, resp...)
} else {
return unionRespBlocks, nil
}
}
}
}
startBlock := s.chain.HeadSlot() + 1
skippedBlocks := blockBatchSize * uint64(lastEmptyRequests*len(peers))
if startBlock+skippedBlocks > helpers.StartSlot(finalizedEpoch+1) {
log.WithField("finalizedEpoch", finalizedEpoch).Debug("Requested block range is greater than the finalized epoch")
break
}
blocks, err := request(
s.chain.HeadSlot()+1, // start
1, // step
blockBatchSize, // count
peers, // peers
0, // remainder
)
if err != nil {
log.WithError(err).Error("Round robing sync request failed")
continue
}
// Since the block responses were appended to the list, we must sort them in order to
// process sequentially. This method doesn't make much wall time compared to block
// processing.
sort.Slice(blocks, func(i, j int) bool {
return blocks[i].Block.Slot < blocks[j].Block.Slot
})
for _, blk := range blocks {
s.logSyncStatus(genesis, blk.Block, peers, counter)
if !s.db.HasBlock(ctx, bytesutil.ToBytes32(blk.Block.ParentRoot)) {
log.Debugf("Beacon node doesn't have a block in db with root %#x", blk.Block.ParentRoot)
continue
}
s.blockNotifier.BlockFeed().Send(&feed.Event{
Type: blockfeed.ReceivedBlock,
Data: &blockfeed.ReceivedBlockData{SignedBlock: blk},
})
if featureconfig.Get().InitSyncNoVerify {
if err := s.chain.ReceiveBlockNoVerify(ctx, blk); err != nil {
return err
}
} else {
if err := s.chain.ReceiveBlockNoPubsubForkchoice(ctx, blk); err != nil {
return err
}
}
}
// If there were no blocks in the last request range, increment the counter so the same
// range isn't requested again on the next loop as the headSlot didn't change.
if len(blocks) == 0 {
lastEmptyRequests++
} else {
lastEmptyRequests = 0
}
}
log.Debug("Synced to finalized epoch - now syncing blocks up to current head")
if s.chain.HeadSlot() == helpers.SlotsSince(genesis) {
return nil
}
// Step 2 - sync to head from any single peer.
// This step might need to be improved for cases where there has been a long period since
// finality. This step is less important than syncing to finality in terms of threat
// mitigation. We are already convinced that we are on the correct finalized chain. Any blocks
// we receive there after must build on the finalized chain or be considered invalid during
// fork choice resolution / block processing.
root, _, pids := s.p2p.Peers().BestFinalized(1 /* maxPeers */, s.highestFinalizedEpoch())
for len(pids) == 0 {
log.Info("Waiting for a suitable peer before syncing to the head of the chain")
time.Sleep(refreshTime)
root, _, pids = s.p2p.Peers().BestFinalized(1 /* maxPeers */, s.highestFinalizedEpoch())
}
best := pids[0]
for head := helpers.SlotsSince(genesis); s.chain.HeadSlot() < head; {
req := &p2ppb.BeaconBlocksByRangeRequest{
HeadBlockRoot: root,
StartSlot: s.chain.HeadSlot() + 1,
Count: mathutil.Min(helpers.SlotsSince(genesis)-s.chain.HeadSlot()+1, 256),
Step: 1,
}
log.WithField("req", req).WithField("peer", best.Pretty()).Debug(
"Sending batch block request",
)
resp, err := s.requestBlocks(ctx, req, best)
if err != nil {
return err
}
for _, blk := range resp {
s.logSyncStatus(genesis, blk.Block, []peer.ID{best}, counter)
if err := s.chain.ReceiveBlockNoPubsubForkchoice(ctx, blk); err != nil {
log.WithError(err).Error("Failed to process block, exiting init sync")
return nil
}
}
if len(resp) == 0 {
break
}
}
return nil
}
// requestBlocks by range to a specific peer.
func (s *Service) requestBlocks(ctx context.Context, req *p2ppb.BeaconBlocksByRangeRequest, pid peer.ID) ([]*eth.SignedBeaconBlock, error) {
if s.blocksRateLimiter.Remaining(pid.String()) < int64(req.Count) {
log.WithField("peer", pid).Debug("Slowing down for rate limit")
time.Sleep(s.blocksRateLimiter.TillEmpty(pid.String()))
}
s.blocksRateLimiter.Add(pid.String(), int64(req.Count))
log.WithFields(logrus.Fields{
"peer": pid,
"start": req.StartSlot,
"count": req.Count,
"step": req.Step,
"head": fmt.Sprintf("%#x", req.HeadBlockRoot),
}).Debug("Requesting blocks")
stream, err := s.p2p.Send(ctx, req, pid)
if err != nil {
return nil, errors.Wrap(err, "failed to send request to peer")
}
defer stream.Close()
resp := make([]*eth.SignedBeaconBlock, 0, req.Count)
for {
blk, err := prysmsync.ReadChunkedBlock(stream, s.p2p)
if err == io.EOF {
break
}
if err != nil {
return nil, errors.Wrap(err, "failed to read chunked block")
}
resp = append(resp, blk)
}
return resp, nil
}
// highestFinalizedEpoch returns the absolute highest finalized epoch of all connected peers.
// Note this can be lower than our finalized epoch if we have no peers or peers that are all behind us.
func (s *Service) highestFinalizedEpoch() uint64 {
highest := uint64(0)
for _, pid := range s.p2p.Peers().Connected() {
peerChainState, err := s.p2p.Peers().ChainState(pid)
if err == nil && peerChainState != nil && peerChainState.FinalizedEpoch > highest {
highest = peerChainState.FinalizedEpoch
}
}
return highest
}
// logSyncStatus and increment block processing counter.
func (s *Service) logSyncStatus(genesis time.Time, blk *eth.BeaconBlock, syncingPeers []peer.ID, counter *ratecounter.RateCounter) {
counter.Incr(1)
rate := float64(counter.Rate()) / counterSeconds
if rate == 0 {
rate = 1
}
timeRemaining := time.Duration(float64(helpers.SlotsSince(genesis)-blk.Slot)/rate) * time.Second
log.WithField(
"peers",
fmt.Sprintf("%d/%d", len(syncingPeers), len(s.p2p.Peers().Connected())),
).WithField(
"blocksPerSecond",
fmt.Sprintf("%.1f", rate),
).Infof(
"Processing block %d/%d - estimated time remaining %s",
blk.Slot,
helpers.SlotsSince(genesis),
timeRemaining,
)
}

View File

@@ -0,0 +1,449 @@
package initialsyncold
import (
"context"
"fmt"
"reflect"
"sync"
"testing"
"time"
"github.com/kevinms/leakybucket-go"
"github.com/libp2p/go-libp2p-core/network"
eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
dbtest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
p2pt "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
beaconsync "github.com/prysmaticlabs/prysm/beacon-chain/sync"
p2ppb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/roughtime"
"github.com/prysmaticlabs/prysm/shared/sliceutil"
"github.com/sirupsen/logrus"
)
type testCache struct {
sync.RWMutex
rootCache map[uint64][32]byte
parentSlotCache map[uint64]uint64
}
var cache = &testCache{}
type peerData struct {
blocks []uint64 // slots that peer has blocks
finalizedEpoch uint64
headSlot uint64
failureSlots []uint64 // slots at which the peer will return an error
forkedPeer bool
}
func init() {
logrus.SetLevel(logrus.DebugLevel)
}
func TestConstants(t *testing.T) {
if params.BeaconConfig().MaxPeersToSync*blockBatchSize > 1000 {
t.Fatal("rpc rejects requests over 1000 range slots")
}
}
func TestRoundRobinSync(t *testing.T) {
tests := []struct {
name string
currentSlot uint64
expectedBlockSlots []uint64
peers []*peerData
}{
{
name: "Single peer with all blocks",
currentSlot: 131,
expectedBlockSlots: makeSequence(1, 131),
peers: []*peerData{
{
blocks: makeSequence(1, 131),
finalizedEpoch: 1,
headSlot: 131,
},
},
},
{
name: "Multiple peers with all blocks",
currentSlot: 131,
expectedBlockSlots: makeSequence(1, 131),
peers: []*peerData{
{
blocks: makeSequence(1, 131),
finalizedEpoch: 1,
headSlot: 131,
},
{
blocks: makeSequence(1, 131),
finalizedEpoch: 1,
headSlot: 131,
},
{
blocks: makeSequence(1, 131),
finalizedEpoch: 1,
headSlot: 131,
},
{
blocks: makeSequence(1, 131),
finalizedEpoch: 1,
headSlot: 131,
},
},
},
{
name: "Multiple peers with failures",
currentSlot: 320, // 10 epochs
expectedBlockSlots: makeSequence(1, 320),
peers: []*peerData{
{
blocks: makeSequence(1, 320),
finalizedEpoch: 8,
headSlot: 320,
},
{
blocks: makeSequence(1, 320),
finalizedEpoch: 8,
headSlot: 320,
failureSlots: makeSequence(1, 32), // first epoch
},
{
blocks: makeSequence(1, 320),
finalizedEpoch: 8,
headSlot: 320,
},
{
blocks: makeSequence(1, 320),
finalizedEpoch: 8,
headSlot: 320,
},
},
},
{
name: "Multiple peers with many skipped slots",
currentSlot: 640, // 10 epochs
expectedBlockSlots: append(makeSequence(1, 64), makeSequence(500, 640)...),
peers: []*peerData{
{
blocks: append(makeSequence(1, 64), makeSequence(500, 640)...),
finalizedEpoch: 18,
headSlot: 640,
},
{
blocks: append(makeSequence(1, 64), makeSequence(500, 640)...),
finalizedEpoch: 18,
headSlot: 640,
},
{
blocks: append(makeSequence(1, 64), makeSequence(500, 640)...),
finalizedEpoch: 18,
headSlot: 640,
},
},
},
// TODO(3147): Handle multiple failures.
//{
// name: "Multiple peers with multiple failures",
// currentSlot: 320, // 10 epochs
// expectedBlockSlots: makeSequence(1, 320),
// peers: []*peerData{
// {
// blocks: makeSequence(1, 320),
// finalizedEpoch: 4,
// headSlot: 320,
// },
// {
// blocks: makeSequence(1, 320),
// finalizedEpoch: 4,
// headSlot: 320,
// failureSlots: makeSequence(1, 320),
// },
// {
// blocks: makeSequence(1, 320),
// finalizedEpoch: 4,
// headSlot: 320,
// failureSlots: makeSequence(1, 320),
// },
// {
// blocks: makeSequence(1, 320),
// finalizedEpoch: 4,
// headSlot: 320,
// failureSlots: makeSequence(1, 320),
// },
// },
//},
{
name: "Multiple peers with different finalized epoch",
currentSlot: 320, // 10 epochs
expectedBlockSlots: makeSequence(1, 320),
peers: []*peerData{
{
blocks: makeSequence(1, 320),
finalizedEpoch: 4,
headSlot: 320,
},
{
blocks: makeSequence(1, 256),
finalizedEpoch: 3,
headSlot: 256,
},
{
blocks: makeSequence(1, 256),
finalizedEpoch: 3,
headSlot: 256,
},
{
blocks: makeSequence(1, 192),
finalizedEpoch: 2,
headSlot: 192,
},
},
},
{
name: "Multiple peers with missing parent blocks",
currentSlot: 160, // 5 epochs
expectedBlockSlots: makeSequence(1, 160),
peers: []*peerData{
{
blocks: makeSequence(1, 160),
finalizedEpoch: 4,
headSlot: 160,
},
{
blocks: append(makeSequence(1, 6), makeSequence(161, 165)...),
finalizedEpoch: 4,
headSlot: 160,
forkedPeer: true,
},
{
blocks: makeSequence(1, 160),
finalizedEpoch: 4,
headSlot: 160,
},
{
blocks: makeSequence(1, 160),
finalizedEpoch: 4,
headSlot: 160,
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cache.initializeRootCache(tt.expectedBlockSlots, t)
p := p2pt.NewTestP2P(t)
beaconDB := dbtest.SetupDB(t)
connectPeers(t, p, tt.peers, p.Peers())
cache.RLock()
genesisRoot := cache.rootCache[0]
cache.RUnlock()
err := beaconDB.SaveBlock(context.Background(), &eth.SignedBeaconBlock{
Block: &eth.BeaconBlock{
Slot: 0,
}})
if err != nil {
t.Fatal(err)
}
st, err := stateTrie.InitializeFromProto(&p2ppb.BeaconState{})
if err != nil {
t.Fatal(err)
}
mc := &mock.ChainService{
State: st,
Root: genesisRoot[:],
DB: beaconDB,
} // no-op mock
s := &Service{
chain: mc,
blockNotifier: mc.BlockNotifier(),
p2p: p,
db: beaconDB,
synced: false,
chainStarted: true,
blocksRateLimiter: leakybucket.NewCollector(allowedBlocksPerSecond, allowedBlocksPerSecond, false /* deleteEmptyBuckets */),
}
if err := s.roundRobinSync(makeGenesisTime(tt.currentSlot)); err != nil {
t.Error(err)
}
if s.chain.HeadSlot() != tt.currentSlot {
t.Errorf("Head slot (%d) is not currentSlot (%d)", s.chain.HeadSlot(), tt.currentSlot)
}
if len(mc.BlocksReceived) != len(tt.expectedBlockSlots) {
t.Errorf("Processes wrong number of blocks. Wanted %d got %d", len(tt.expectedBlockSlots), len(mc.BlocksReceived))
}
var receivedBlockSlots []uint64
for _, blk := range mc.BlocksReceived {
receivedBlockSlots = append(receivedBlockSlots, blk.Block.Slot)
}
if missing := sliceutil.NotUint64(sliceutil.IntersectionUint64(tt.expectedBlockSlots, receivedBlockSlots), tt.expectedBlockSlots); len(missing) > 0 {
t.Errorf("Missing blocks at slots %v", missing)
}
dbtest.TeardownDB(t, beaconDB)
})
}
}
// Connect peers with local host. This method sets up peer statuses and the appropriate handlers
// for each test peer.
func connectPeers(t *testing.T, host *p2pt.TestP2P, data []*peerData, peerStatus *peers.Status) {
const topic = "/eth2/beacon_chain/req/beacon_blocks_by_range/1/ssz"
for _, d := range data {
peer := p2pt.NewTestP2P(t)
// Copy pointer for callback scope.
var datum = d
peer.SetStreamHandler(topic, func(stream network.Stream) {
defer stream.Close()
req := &p2ppb.BeaconBlocksByRangeRequest{}
if err := peer.Encoding().DecodeWithLength(stream, req); err != nil {
t.Error(err)
}
requestedBlocks := makeSequence(req.StartSlot, req.StartSlot+(req.Count*req.Step))
// Expected failure range
if len(sliceutil.IntersectionUint64(datum.failureSlots, requestedBlocks)) > 0 {
if _, err := stream.Write([]byte{0x01}); err != nil {
t.Error(err)
}
if _, err := peer.Encoding().EncodeWithLength(stream, "bad"); err != nil {
t.Error(err)
}
return
}
// Determine the correct subset of blocks to return as dictated by the test scenario.
blocks := sliceutil.IntersectionUint64(datum.blocks, requestedBlocks)
ret := make([]*eth.SignedBeaconBlock, 0)
for _, slot := range blocks {
if (slot-req.StartSlot)%req.Step != 0 {
continue
}
cache.RLock()
parentRoot := cache.rootCache[cache.parentSlotCache[slot]]
cache.RUnlock()
blk := &eth.SignedBeaconBlock{
Block: &eth.BeaconBlock{
Slot: slot,
ParentRoot: parentRoot[:],
},
}
// If forked peer, give a different parent root.
if datum.forkedPeer {
newRoot := hashutil.Hash(parentRoot[:])
blk.Block.ParentRoot = newRoot[:]
}
ret = append(ret, blk)
currRoot, _ := ssz.HashTreeRoot(blk.Block)
logrus.Infof("block with slot %d , signing root %#x and parent root %#x", slot, currRoot, parentRoot)
}
if uint64(len(ret)) > req.Count {
ret = ret[:req.Count]
}
for i := 0; i < len(ret); i++ {
if err := beaconsync.WriteChunk(stream, peer.Encoding(), ret[i]); err != nil {
t.Error(err)
}
}
})
peer.Connect(host)
peerStatus.Add(peer.PeerID(), nil, network.DirOutbound)
peerStatus.SetConnectionState(peer.PeerID(), peers.PeerConnected)
peerStatus.SetChainState(peer.PeerID(), &p2ppb.Status{
HeadForkVersion: params.BeaconConfig().GenesisForkVersion,
FinalizedRoot: []byte(fmt.Sprintf("finalized_root %d", datum.finalizedEpoch)),
FinalizedEpoch: datum.finalizedEpoch,
HeadRoot: []byte("head_root"),
HeadSlot: datum.headSlot,
})
}
}
// makeGenesisTime where now is the current slot.
func makeGenesisTime(currentSlot uint64) time.Time {
return roughtime.Now().Add(-1 * time.Second * time.Duration(currentSlot) * time.Duration(params.BeaconConfig().SecondsPerSlot))
}
// sanity test on helper function
func TestMakeGenesisTime(t *testing.T) {
currentSlot := uint64(64)
gt := makeGenesisTime(currentSlot)
if helpers.SlotsSince(gt) != currentSlot {
t.Fatalf("Wanted %d, got %d", currentSlot, helpers.SlotsSince(gt))
}
}
// helper function for sequences of block slots
func makeSequence(start, end uint64) []uint64 {
if end < start {
panic("cannot make sequence where end is before start")
}
seq := make([]uint64, 0, end-start+1)
for i := start; i <= end; i++ {
seq = append(seq, i)
}
return seq
}
func (c *testCache) initializeRootCache(reqSlots []uint64, t *testing.T) {
c.Lock()
defer c.Unlock()
c.rootCache = make(map[uint64][32]byte)
c.parentSlotCache = make(map[uint64]uint64)
parentSlot := uint64(0)
genesisBlock := &eth.BeaconBlock{
Slot: 0,
}
genesisRoot, err := ssz.HashTreeRoot(genesisBlock)
if err != nil {
t.Fatal(err)
}
c.rootCache[0] = genesisRoot
parentRoot := genesisRoot
for _, slot := range reqSlots {
currentBlock := &eth.BeaconBlock{
Slot: slot,
ParentRoot: parentRoot[:],
}
parentRoot, err = ssz.HashTreeRoot(currentBlock)
if err != nil {
t.Fatal(err)
}
c.rootCache[slot] = parentRoot
c.parentSlotCache[slot] = parentSlot
parentSlot = slot
}
}
// sanity test on helper function
func TestMakeSequence(t *testing.T) {
got := makeSequence(3, 5)
want := []uint64{3, 4, 5}
if !reflect.DeepEqual(got, want) {
t.Fatalf("Wanted %v, got %v", want, got)
}
}

View File

@@ -0,0 +1,191 @@
package initialsyncold
import (
"context"
"time"
"github.com/kevinms/leakybucket-go"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
blockfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/block"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/shared"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/roughtime"
"github.com/sirupsen/logrus"
)
var _ = shared.Service(&Service{})
type blockchainService interface {
blockchain.BlockReceiver
blockchain.HeadFetcher
ClearCachedStates()
blockchain.FinalizationFetcher
}
const (
handshakePollingInterval = 5 * time.Second // Polling interval for checking the number of received handshakes.
allowedBlocksPerSecond = 32.0
)
// Config to set up the initial sync service.
type Config struct {
P2P p2p.P2P
DB db.ReadOnlyDatabase
Chain blockchainService
StateNotifier statefeed.Notifier
BlockNotifier blockfeed.Notifier
}
// Service service.
type Service struct {
ctx context.Context
chain blockchainService
p2p p2p.P2P
db db.ReadOnlyDatabase
synced bool
chainStarted bool
stateNotifier statefeed.Notifier
blockNotifier blockfeed.Notifier
blocksRateLimiter *leakybucket.Collector
}
// NewInitialSync configures the initial sync service responsible for bringing the node up to the
// latest head of the blockchain.
func NewInitialSync(cfg *Config) *Service {
return &Service{
ctx: context.Background(),
chain: cfg.Chain,
p2p: cfg.P2P,
db: cfg.DB,
stateNotifier: cfg.StateNotifier,
blockNotifier: cfg.BlockNotifier,
blocksRateLimiter: leakybucket.NewCollector(allowedBlocksPerSecond, allowedBlocksPerSecond, false /* deleteEmptyBuckets */),
}
}
// Start the initial sync service.
func (s *Service) Start() {
var genesis time.Time
headState, err := s.chain.HeadState(s.ctx)
if headState == nil || err != nil {
// Wait for state to be initialized.
stateChannel := make(chan *feed.Event, 1)
stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel)
defer stateSub.Unsubscribe()
genesisSet := false
for !genesisSet {
select {
case event := <-stateChannel:
if event.Type == statefeed.Initialized {
data := event.Data.(*statefeed.InitializedData)
log.WithField("starttime", data.StartTime).Debug("Received state initialized event")
genesis = data.StartTime
genesisSet = true
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting goroutine")
return
case err := <-stateSub.Err():
log.WithError(err).Error("Subscription to state notifier failed")
return
}
}
stateSub.Unsubscribe()
} else {
genesis = time.Unix(int64(headState.GenesisTime()), 0)
}
if genesis.After(roughtime.Now()) {
log.WithField(
"genesis time",
genesis,
).Warn("Genesis time is in the future - waiting to start sync...")
time.Sleep(roughtime.Until(genesis))
}
s.chainStarted = true
currentSlot := helpers.SlotsSince(genesis)
if helpers.SlotToEpoch(currentSlot) == 0 {
log.Info("Chain started within the last epoch - not syncing")
s.synced = true
return
}
log.Info("Starting initial chain sync...")
// Are we already in sync, or close to it?
if helpers.SlotToEpoch(s.chain.HeadSlot()) == helpers.SlotToEpoch(currentSlot) {
log.Info("Already synced to the current chain head")
s.synced = true
return
}
s.waitForMinimumPeers()
if err := s.roundRobinSync(genesis); err != nil {
panic(err)
}
log.Infof("Synced up to slot %d", s.chain.HeadSlot())
s.synced = true
}
// Stop initial sync.
func (s *Service) Stop() error {
return nil
}
// Status of initial sync.
func (s *Service) Status() error {
if !s.synced && s.chainStarted {
return errors.New("syncing")
}
return nil
}
// Syncing returns true if initial sync is still running.
func (s *Service) Syncing() bool {
return !s.synced
}
// Resync allows a node to start syncing again if it has fallen
// behind the current network head.
func (s *Service) Resync() error {
// set it to false since we are syncing again
s.synced = false
defer func() { s.synced = true }() // Reset it at the end of the method.
headState, err := s.chain.HeadState(context.Background())
if err != nil {
return errors.Wrap(err, "could not retrieve head state")
}
genesis := time.Unix(int64(headState.GenesisTime()), 0)
s.waitForMinimumPeers()
err = s.roundRobinSync(genesis)
if err != nil {
log = log.WithError(err)
}
log.WithField("slot", s.chain.HeadSlot()).Info("Resync attempt complete")
return nil
}
func (s *Service) waitForMinimumPeers() {
required := params.BeaconConfig().MaxPeersToSync
if flags.Get().MinimumSyncPeers < required {
required = flags.Get().MinimumSyncPeers
}
for {
_, _, peers := s.p2p.Peers().BestFinalized(params.BeaconConfig().MaxPeersToSync, s.chain.FinalizedCheckpt().Epoch)
if len(peers) >= required {
break
}
log.WithFields(logrus.Fields{
"suitable": len(peers),
"required": required}).Info("Waiting for enough suitable peers before syncing")
time.Sleep(handshakePollingInterval)
}
}

View File

@@ -4,6 +4,7 @@ go_library(
name = "go_default_library",
srcs = [
"blocks_fetcher.go",
"blocks_queue.go",
"log.go",
"round_robin.go",
"service.go",
@@ -41,6 +42,7 @@ go_test(
name = "go_default_test",
srcs = [
"blocks_fetcher_test.go",
"blocks_queue_test.go",
"round_robin_test.go",
],
embed = [":go_default_library"],
@@ -48,6 +50,8 @@ go_test(
tags = ["race_on"],
deps = [
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/block:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/testing:go_default_library",
@@ -56,6 +60,8 @@ go_test(
"//beacon-chain/state:go_default_library",
"//beacon-chain/sync:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/hashutil:go_default_library",
"//shared/params:go_default_library",
"//shared/roughtime:go_default_library",

View File

@@ -24,8 +24,6 @@ import (
"go.opencensus.io/trace"
)
const fetchRequestsBuffer = 8 // number of pending fetch requests
var (
errNoPeersAvailable = errors.New("no peers available, waiting for reconnect")
errFetcherCtxIsDone = errors.New("fetcher's context is done, reinitialize")
@@ -74,7 +72,7 @@ func newBlocksFetcher(ctx context.Context, cfg *blocksFetcherConfig) *blocksFetc
rateLimiter := leakybucket.NewCollector(
allowedBlocksPerSecond, /* rate */
allowedBlocksPerSecond, /* capacity */
false /* deleteEmptyBuckets */)
false /* deleteEmptyBuckets */)
return &blocksFetcher{
ctx: ctx,
@@ -82,8 +80,8 @@ func newBlocksFetcher(ctx context.Context, cfg *blocksFetcherConfig) *blocksFetc
headFetcher: cfg.headFetcher,
p2p: cfg.p2p,
rateLimiter: rateLimiter,
fetchRequests: make(chan *fetchRequestParams, fetchRequestsBuffer),
fetchResponses: make(chan *fetchRequestResponse),
fetchRequests: make(chan *fetchRequestParams, queueMaxPendingRequests),
fetchResponses: make(chan *fetchRequestResponse, queueMaxPendingRequests),
quit: make(chan struct{}),
}
}
@@ -130,8 +128,10 @@ func (f *blocksFetcher) loop() {
wg.Add(1)
go func() {
defer wg.Done()
f.handleRequest(req.ctx, req.start, req.count)
select {
case <-f.ctx.Done():
case f.fetchResponses <- f.handleRequest(req.ctx, req.start, req.count):
}
}()
}
}
@@ -139,82 +139,65 @@ func (f *blocksFetcher) loop() {
// scheduleRequest adds request to incoming queue.
func (f *blocksFetcher) scheduleRequest(ctx context.Context, start, count uint64) error {
if ctx.Err() != nil {
return ctx.Err()
}
request := &fetchRequestParams{
ctx: ctx,
start: start,
count: count,
}
select {
case <-f.ctx.Done():
return errFetcherCtxIsDone
default:
f.fetchRequests <- &fetchRequestParams{
ctx: ctx,
start: start,
count: count,
}
case f.fetchRequests <- request:
}
return nil
}
// handleRequest parses fetch request and forwards it to response builder.
func (f *blocksFetcher) handleRequest(ctx context.Context, start, count uint64) {
func (f *blocksFetcher) handleRequest(ctx context.Context, start, count uint64) *fetchRequestResponse {
ctx, span := trace.StartSpan(ctx, "initialsync.handleRequest")
defer span.End()
// sendResponse ensures that response is not sent to a closed channel (when context is done).
sendResponse := func(ctx context.Context, response *fetchRequestResponse) {
if ctx.Err() != nil {
log.WithError(ctx.Err()).Debug("Can not send fetch request response")
return
}
f.fetchResponses <- response
response := &fetchRequestResponse{
start: start,
count: count,
blocks: []*eth.SignedBeaconBlock{},
err: nil,
peers: []peer.ID{},
}
if ctx.Err() != nil {
sendResponse(ctx, nil)
return
response.err = ctx.Err()
return response
}
headEpoch := helpers.SlotToEpoch(f.headFetcher.HeadSlot())
root, finalizedEpoch, peers := f.p2p.Peers().BestFinalized(params.BeaconConfig().MaxPeersToSync, headEpoch)
log.WithFields(logrus.Fields{
"start": start,
"count": count,
"finalizedEpoch": finalizedEpoch,
"numPeers": len(peers),
}).Debug("Block fetcher received a request")
if len(peers) == 0 {
log.Error(errNoPeersAvailable)
return
response.err = errNoPeersAvailable
return response
}
// Short circuit start far exceeding the highest finalized epoch in some infinite loop.
highestFinalizedSlot := helpers.StartSlot(finalizedEpoch + 1)
if start > highestFinalizedSlot {
log.WithError(errStartSlotIsTooHigh).Debug("Block fetch request failed")
sendResponse(ctx, &fetchRequestResponse{
start: start,
count: count,
err: errStartSlotIsTooHigh,
})
return
response.err = errStartSlotIsTooHigh
return response
}
resp, err := f.collectPeerResponses(ctx, root, finalizedEpoch, start, 1, count, peers)
blocks, err := f.collectPeerResponses(ctx, root, finalizedEpoch, start, 1, count, peers)
if err != nil {
log.WithError(err).Debug("Block fetch request failed")
sendResponse(ctx, &fetchRequestResponse{
start: start,
count: count,
err: err,
})
return
response.err = err
return response
}
sendResponse(ctx, &fetchRequestResponse{
start: start,
count: count,
blocks: resp,
peers: peers,
})
response.blocks = blocks
response.peers = peers
return response
}
// collectPeerResponses orchestrates block fetching from the available peers.
@@ -268,12 +251,6 @@ func (f *blocksFetcher) collectPeerResponses(
// Spread load evenly among available peers.
perPeerCount := count / uint64(len(peers))
remainder := int(count % uint64(len(peers)))
log.WithFields(logrus.Fields{
"start": start,
"count": count,
"perPeerCount": perPeerCount,
"remainder": remainder,
}).Debug("Distribute request among available peers")
for i, pid := range peers {
start, step := start+uint64(i)*step, step*uint64(len(peers))
@@ -283,10 +260,10 @@ func (f *blocksFetcher) collectPeerResponses(
if i < remainder {
count++
}
// Asking for no blocks may cause the client to hang. This should never happen and
// the peer may return an error anyway, but we'll ask for at least one block.
// Asking for no blocks may cause the client to hang.
if count == 0 {
count++
p2pRequests.Done()
continue
}
go func(ctx context.Context, pid peer.ID) {
@@ -294,16 +271,24 @@ func (f *blocksFetcher) collectPeerResponses(
blocks, err := f.requestBeaconBlocksByRange(ctx, pid, root, start, step, count)
if err != nil {
errChan <- err
return
select {
case <-ctx.Done():
case errChan <- err:
return
}
}
select {
case <-ctx.Done():
case blocksChan <- blocks:
}
blocksChan <- blocks
}(ctx, pid)
}
var unionRespBlocks []*eth.SignedBeaconBlock
for {
select {
case <-ctx.Done():
return nil, ctx.Err()
case err := <-errChan:
return nil, err
case resp, ok := <-blocksChan:
@@ -360,7 +345,6 @@ func (f *blocksFetcher) requestBeaconBlocksByRange(
return f.requestBeaconBlocksByRange(ctx, newPID, root, start, step, count)
}
log.WithField("peer", pid).WithField("count", len(resp)).Debug("Received blocks")
return resp, nil
}
@@ -384,7 +368,7 @@ func (f *blocksFetcher) requestBlocks(
}).Debug("Requesting blocks")
stream, err := f.p2p.Send(ctx, req, pid)
if err != nil {
return nil, errors.Wrap(err, "failed to send request to peer")
return nil, err
}
defer stream.Close()
@@ -395,7 +379,7 @@ func (f *blocksFetcher) requestBlocks(
break
}
if err != nil {
return nil, errors.Wrap(err, "failed to read chunked block")
return nil, err
}
resp = append(resp, blk)
}

View File

@@ -283,13 +283,15 @@ func TestBlocksFetcherRoundRobin(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
initializeRootCache(tt.expectedBlockSlots, t)
cache.initializeRootCache(tt.expectedBlockSlots, t)
beaconDB := dbtest.SetupDB(t)
p := p2pt.NewTestP2P(t)
connectPeers(t, p, tt.peers, p.Peers())
genesisRoot := rootCache[0]
cache.RLock()
genesisRoot := cache.rootCache[0]
cache.RUnlock()
err := beaconDB.SaveBlock(context.Background(), &eth.SignedBeaconBlock{
Block: &eth.BeaconBlock{
@@ -439,12 +441,10 @@ func TestBlocksFetcherHandleRequest(t *testing.T) {
},
}
hook := logTest.NewGlobal()
mc, p2p, beaconDB := initializeTestServices(t, chainConfig.expectedBlockSlots, chainConfig.peers)
defer dbtest.TeardownDB(t, beaconDB)
t.Run("context cancellation", func(t *testing.T) {
hook.Reset()
ctx, cancel := context.WithCancel(context.Background())
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
headFetcher: mc,
@@ -452,13 +452,13 @@ func TestBlocksFetcherHandleRequest(t *testing.T) {
})
cancel()
fetcher.handleRequest(ctx, 1, blockBatchSize)
testutil.AssertLogsContain(t, hook, "Can not send fetch request response")
testutil.AssertLogsContain(t, hook, "context canceled")
response := fetcher.handleRequest(ctx, 1, blockBatchSize)
if response.err == nil {
t.Errorf("expected error: %v", errFetcherCtxIsDone)
}
})
t.Run("receive blocks", func(t *testing.T) {
hook.Reset()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
@@ -467,7 +467,13 @@ func TestBlocksFetcherHandleRequest(t *testing.T) {
})
requestCtx, _ := context.WithTimeout(context.Background(), 2*time.Second)
go fetcher.handleRequest(requestCtx, 1 /* start */, blockBatchSize /* count */)
go func() {
response := fetcher.handleRequest(requestCtx, 1 /* start */, blockBatchSize /* count */)
select {
case <-ctx.Done():
case fetcher.fetchResponses <- response:
}
}()
var blocks []*eth.SignedBeaconBlock
select {
@@ -483,7 +489,6 @@ func TestBlocksFetcherHandleRequest(t *testing.T) {
if len(blocks) != blockBatchSize {
t.Errorf("incorrect number of blocks returned, expected: %v, got: %v", blockBatchSize, len(blocks))
}
testutil.AssertLogsContain(t, hook, "Received blocks")
var receivedBlockSlots []uint64
for _, blk := range blocks {
@@ -647,12 +652,14 @@ func TestBlocksFetcherSelectFailOverPeer(t *testing.T) {
}
func initializeTestServices(t *testing.T, blocks []uint64, peers []*peerData) (*mock.ChainService, *p2pt.TestP2P, db.Database) {
initializeRootCache(blocks, t)
cache.initializeRootCache(blocks, t)
beaconDB := dbtest.SetupDB(t)
p := p2pt.NewTestP2P(t)
connectPeers(t, p, peers, p.Peers())
genesisRoot := rootCache[0]
cache.RLock()
genesisRoot := cache.rootCache[0]
cache.RUnlock()
err := beaconDB.SaveBlock(context.Background(), &eth.SignedBeaconBlock{
Block: &eth.BeaconBlock{

View File

@@ -0,0 +1,417 @@
package initialsync
import (
"context"
"errors"
"sync"
"time"
eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/shared/mathutil"
"go.opencensus.io/trace"
)
const (
// queueMaxPendingRequests limits how many concurrent fetch request queue can initiate.
queueMaxPendingRequests = 8
// queueFetchRequestTimeout caps maximum amount of time before fetch requests is cancelled.
queueFetchRequestTimeout = 60 * time.Second
// queueMaxCachedBlocks hard limit on how many queue items to cache before forced dequeue.
queueMaxCachedBlocks = 8 * queueMaxPendingRequests * blockBatchSize
// queueStopCallTimeout is time allowed for queue to release resources when quitting.
queueStopCallTimeout = 1 * time.Second
)
var (
errQueueCtxIsDone = errors.New("queue's context is done, reinitialize")
errQueueTakesTooLongToStop = errors.New("queue takes too long to stop")
)
// blocksProvider exposes enough methods for queue to fetch incoming blocks.
type blocksProvider interface {
requestResponses() <-chan *fetchRequestResponse
scheduleRequest(ctx context.Context, start, count uint64) error
start() error
stop()
}
// blocksQueueConfig is a config to setup block queue service.
type blocksQueueConfig struct {
blocksFetcher blocksProvider
headFetcher blockchain.HeadFetcher
startSlot uint64
highestExpectedSlot uint64
p2p p2p.P2P
}
// blocksQueueState holds internal queue state (for easier management of state transitions).
type blocksQueueState struct {
scheduler *schedulerState
sender *senderState
cachedBlocks map[uint64]*cachedBlock
}
// blockState enums possible queue block states.
type blockState uint8
const (
// pendingBlock is a default block status when just added to queue.
pendingBlock = iota
// validBlock represents block that can be processed.
validBlock
// skippedBlock is a block for a slot that is not found on any available peers.
skippedBlock
// failedBlock represents block that can not be processed at the moment.
failedBlock
// blockStateLen is a sentinel to know number of possible block states.
blockStateLen
)
// schedulerState a state of scheduling process.
type schedulerState struct {
sync.Mutex
currentSlot uint64
blockBatchSize uint64
requestedBlocks map[blockState]uint64
}
// senderState is a state of block sending process.
type senderState struct {
sync.Mutex
}
// cachedBlock is a container for signed beacon block.
type cachedBlock struct {
*eth.SignedBeaconBlock
}
// blocksQueue is a priority queue that serves as a intermediary between block fetchers (producers)
// and block processing goroutine (consumer). Consumer can rely on order of incoming blocks.
type blocksQueue struct {
ctx context.Context
cancel context.CancelFunc
highestExpectedSlot uint64
state *blocksQueueState
blocksFetcher blocksProvider
headFetcher blockchain.HeadFetcher
fetchedBlocks chan *eth.SignedBeaconBlock // output channel for ready blocks
pendingFetchRequests chan struct{} // pending requests semaphore
pendingFetchedBlocks chan struct{} // notifier, pings block sending handler
quit chan struct{} // termination notifier
}
// newBlocksQueue creates initialized priority queue.
func newBlocksQueue(ctx context.Context, cfg *blocksQueueConfig) *blocksQueue {
ctx, cancel := context.WithCancel(ctx)
blocksFetcher := cfg.blocksFetcher
if blocksFetcher == nil {
blocksFetcher = newBlocksFetcher(ctx, &blocksFetcherConfig{
headFetcher: cfg.headFetcher,
p2p: cfg.p2p,
})
}
return &blocksQueue{
ctx: ctx,
cancel: cancel,
highestExpectedSlot: cfg.highestExpectedSlot,
state: &blocksQueueState{
scheduler: &schedulerState{
currentSlot: cfg.startSlot,
blockBatchSize: blockBatchSize,
requestedBlocks: make(map[blockState]uint64, blockStateLen),
},
sender: &senderState{},
cachedBlocks: make(map[uint64]*cachedBlock, queueMaxCachedBlocks),
},
blocksFetcher: blocksFetcher,
headFetcher: cfg.headFetcher,
fetchedBlocks: make(chan *eth.SignedBeaconBlock, blockBatchSize),
pendingFetchRequests: make(chan struct{}, queueMaxPendingRequests),
pendingFetchedBlocks: make(chan struct{}, queueMaxPendingRequests),
quit: make(chan struct{}),
}
}
// start boots up the queue processing.
func (q *blocksQueue) start() error {
select {
case <-q.ctx.Done():
return errQueueCtxIsDone
default:
go q.loop()
return nil
}
}
// stop terminates all queue operations.
func (q *blocksQueue) stop() error {
q.cancel()
select {
case <-q.quit:
return nil
case <-time.After(queueStopCallTimeout):
return errQueueTakesTooLongToStop
}
}
// loop is a main queue loop.
func (q *blocksQueue) loop() {
defer close(q.quit)
// Wait for all goroutines to wrap up (forced by cancelled context), and do a cleanup.
wg := &sync.WaitGroup{}
defer func() {
wg.Wait()
q.blocksFetcher.stop()
close(q.fetchedBlocks)
}()
if err := q.blocksFetcher.start(); err != nil {
log.WithError(err).Debug("Can not start blocks provider")
}
// Reads from semaphore channel, thus allowing next goroutine to grab it and schedule next request.
releaseTicket := func() {
select {
case <-q.ctx.Done():
case <-q.pendingFetchRequests:
}
}
for {
if q.headFetcher.HeadSlot() >= q.highestExpectedSlot {
log.Debug("Highest expected slot reached")
q.cancel()
}
select {
case <-q.ctx.Done():
log.Debug("Context closed, exiting goroutine (blocks queue)")
return
case q.pendingFetchRequests <- struct{}{}:
wg.Add(1)
go func() {
defer wg.Done()
// Schedule request.
if err := q.scheduleFetchRequests(q.ctx); err != nil {
q.state.scheduler.incrementCounter(failedBlock, blockBatchSize)
releaseTicket()
}
}()
case response, ok := <-q.blocksFetcher.requestResponses():
if !ok {
log.Debug("Fetcher closed output channel")
q.cancel()
return
}
// Release semaphore ticket.
go releaseTicket()
// Process incoming response into blocks.
wg.Add(1)
go func() {
defer func() {
select {
case <-q.ctx.Done():
case q.pendingFetchedBlocks <- struct{}{}: // notify sender of data availability
}
wg.Done()
}()
skippedBlocks, err := q.parseFetchResponse(q.ctx, response)
if err != nil {
q.state.scheduler.incrementCounter(failedBlock, response.count)
return
}
q.state.scheduler.incrementCounter(skippedBlock, skippedBlocks)
}()
case <-q.pendingFetchedBlocks:
wg.Add(1)
go func() {
defer wg.Done()
if err := q.sendFetchedBlocks(q.ctx); err != nil {
log.WithError(err).Debug("Error sending received blocks")
}
}()
}
}
}
// scheduleFetchRequests enqueues block fetch requests to block fetcher.
func (q *blocksQueue) scheduleFetchRequests(ctx context.Context) error {
q.state.scheduler.Lock()
defer q.state.scheduler.Unlock()
if ctx.Err() != nil {
return ctx.Err()
}
s := q.state.scheduler
blocks := q.state.scheduler.requestedBlocks
func() {
resetStateCounters := func() {
for i := 0; i < blockStateLen; i++ {
blocks[blockState(i)] = 0
}
s.currentSlot = q.headFetcher.HeadSlot()
}
// Update state's current slot pointer.
count := blocks[pendingBlock] + blocks[skippedBlock] + blocks[failedBlock] + blocks[validBlock]
if count == 0 {
s.currentSlot = q.headFetcher.HeadSlot()
return
}
// Too many failures (blocks that can't be processed at this time).
if blocks[failedBlock] >= s.blockBatchSize/2 {
s.blockBatchSize *= 2
resetStateCounters()
return
}
// Given enough valid blocks, we can set back block batch size.
if blocks[validBlock] >= blockBatchSize && s.blockBatchSize != blockBatchSize {
blocks[skippedBlock], blocks[validBlock] = blocks[skippedBlock]+blocks[validBlock], 0
s.blockBatchSize = blockBatchSize
}
// Too many items in scheduler, time to update current slot to point to current head's slot.
count = blocks[pendingBlock] + blocks[skippedBlock] + blocks[failedBlock] + blocks[validBlock]
if count >= queueMaxCachedBlocks {
s.blockBatchSize = blockBatchSize
resetStateCounters()
return
}
// All blocks processed, no pending blocks.
count = blocks[skippedBlock] + blocks[failedBlock] + blocks[validBlock]
if count > 0 && blocks[pendingBlock] == 0 {
s.blockBatchSize = blockBatchSize
resetStateCounters()
return
}
}()
offset := blocks[pendingBlock] + blocks[skippedBlock] + blocks[failedBlock] + blocks[validBlock]
start := q.state.scheduler.currentSlot + offset + 1
count := mathutil.Min(q.state.scheduler.blockBatchSize, q.highestExpectedSlot-start+1)
if count <= 0 {
return errStartSlotIsTooHigh
}
ctx, _ = context.WithTimeout(ctx, queueFetchRequestTimeout)
if err := q.blocksFetcher.scheduleRequest(ctx, start, count); err != nil {
return err
}
q.state.scheduler.requestedBlocks[pendingBlock] += count
return nil
}
// parseFetchResponse processes incoming responses.
func (q *blocksQueue) parseFetchResponse(ctx context.Context, response *fetchRequestResponse) (uint64, error) {
q.state.sender.Lock()
defer q.state.sender.Unlock()
if ctx.Err() != nil {
return 0, ctx.Err()
}
if response.err != nil {
return 0, response.err
}
// Extract beacon blocks.
responseBlocks := make(map[uint64]*eth.SignedBeaconBlock, len(response.blocks))
for _, blk := range response.blocks {
responseBlocks[blk.Block.Slot] = blk
}
// Cache blocks in [start, start + count) range, include skipped blocks.
var skippedBlocks uint64
end := response.start + mathutil.Max(response.count, uint64(len(response.blocks)))
for slot := response.start; slot < end; slot++ {
if block, ok := responseBlocks[slot]; ok {
q.state.cachedBlocks[slot] = &cachedBlock{
SignedBeaconBlock: block,
}
delete(responseBlocks, slot)
continue
}
q.state.cachedBlocks[slot] = &cachedBlock{}
skippedBlocks++
}
// If there are any items left in incoming response, cache them too.
for slot, block := range responseBlocks {
q.state.cachedBlocks[slot] = &cachedBlock{
SignedBeaconBlock: block,
}
}
return skippedBlocks, nil
}
// sendFetchedBlocks analyses available blocks, and sends them downstream in a correct slot order.
// Blocks are checked starting from the current head slot, and up until next consecutive block is available.
func (q *blocksQueue) sendFetchedBlocks(ctx context.Context) error {
q.state.sender.Lock()
defer q.state.sender.Unlock()
ctx, span := trace.StartSpan(ctx, "initialsync.sendFetchedBlocks")
defer span.End()
startSlot := q.headFetcher.HeadSlot() + 1
nonSkippedSlot := uint64(0)
for slot := startSlot; slot <= q.highestExpectedSlot; slot++ {
if ctx.Err() != nil {
return ctx.Err()
}
blockData, ok := q.state.cachedBlocks[slot]
if !ok {
break
}
if blockData.SignedBeaconBlock != nil && blockData.Block != nil {
select {
case <-ctx.Done():
return ctx.Err()
case q.fetchedBlocks <- blockData.SignedBeaconBlock:
}
nonSkippedSlot = slot
}
}
// Remove processed blocks.
if nonSkippedSlot > 0 {
for slot := range q.state.cachedBlocks {
if slot <= nonSkippedSlot {
delete(q.state.cachedBlocks, slot)
}
}
}
return nil
}
// incrementCounter increments particular scheduler counter.
func (s *schedulerState) incrementCounter(counter blockState, n uint64) {
s.Lock()
defer s.Unlock()
// Assert that counter is within acceptable boundaries.
if counter < 1 || counter >= blockStateLen {
return
}
n = mathutil.Min(s.requestedBlocks[pendingBlock], n)
s.requestedBlocks[counter] += n
s.requestedBlocks[pendingBlock] -= n
}

View File

@@ -0,0 +1,980 @@
package initialsync
import (
"context"
"fmt"
"testing"
eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-ssz"
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
blockfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/block"
dbtest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/sliceutil"
)
type blocksProviderMock struct {
}
func (f *blocksProviderMock) start() error {
return nil
}
func (f *blocksProviderMock) stop() {
}
func (f *blocksProviderMock) scheduleRequest(ctx context.Context, start, count uint64) error {
return nil
}
func (f *blocksProviderMock) requestResponses() <-chan *fetchRequestResponse {
return nil
}
func TestBlocksQueueInitStartStop(t *testing.T) {
mc, p2p, beaconDB := initializeTestServices(t, []uint64{}, []*peerData{})
defer dbtest.TeardownDB(t, beaconDB)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
headFetcher: mc,
p2p: p2p,
})
t.Run("stop without start", func(t *testing.T) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
queue := newBlocksQueue(ctx, &blocksQueueConfig{
headFetcher: mc,
highestExpectedSlot: blockBatchSize,
})
if err := queue.stop(); err == nil {
t.Errorf("expected error: %v", errQueueTakesTooLongToStop)
}
})
t.Run("use default fetcher", func(t *testing.T) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
queue := newBlocksQueue(ctx, &blocksQueueConfig{
headFetcher: mc,
highestExpectedSlot: blockBatchSize,
})
if err := queue.start(); err != nil {
t.Error(err)
}
})
t.Run("stop timeout", func(t *testing.T) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
queue := newBlocksQueue(ctx, &blocksQueueConfig{
headFetcher: mc,
highestExpectedSlot: blockBatchSize,
})
if err := queue.start(); err != nil {
t.Error(err)
}
if err := queue.stop(); err == nil {
t.Errorf("expected error: %v", errQueueTakesTooLongToStop)
}
})
t.Run("check for leaked goroutines", func(t *testing.T) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
queue := newBlocksQueue(ctx, &blocksQueueConfig{
blocksFetcher: fetcher,
headFetcher: mc,
highestExpectedSlot: blockBatchSize,
})
if err := queue.start(); err != nil {
t.Error(err)
}
// Blocks up until all resources are reclaimed (or timeout is called)
if err := queue.stop(); err != nil {
t.Error(err)
}
select {
case <-queue.fetchedBlocks:
default:
t.Error("queue.fetchedBlocks channel is leaked")
}
select {
case <-fetcher.fetchResponses:
default:
t.Error("fetcher.fetchResponses channel is leaked")
}
})
t.Run("re-starting of stopped queue", func(t *testing.T) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
queue := newBlocksQueue(ctx, &blocksQueueConfig{
blocksFetcher: fetcher,
headFetcher: mc,
highestExpectedSlot: blockBatchSize,
})
if err := queue.start(); err != nil {
t.Error(err)
}
if err := queue.stop(); err != nil {
t.Error(err)
}
if err := queue.start(); err == nil {
t.Errorf("expected error not returned: %v", errQueueCtxIsDone)
}
})
t.Run("multiple stopping attempts", func(t *testing.T) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
queue := newBlocksQueue(ctx, &blocksQueueConfig{
blocksFetcher: fetcher,
headFetcher: mc,
highestExpectedSlot: blockBatchSize,
})
if err := queue.start(); err != nil {
t.Error(err)
}
if err := queue.stop(); err != nil {
t.Error(err)
}
if err := queue.stop(); err != nil {
t.Error(err)
}
})
t.Run("cancellation", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
queue := newBlocksQueue(ctx, &blocksQueueConfig{
blocksFetcher: fetcher,
headFetcher: mc,
highestExpectedSlot: blockBatchSize,
})
if err := queue.start(); err != nil {
t.Error(err)
}
cancel()
if err := queue.stop(); err != nil {
t.Error(err)
}
})
}
func TestBlocksQueueUpdateSchedulerState(t *testing.T) {
chainConfig := struct {
expectedBlockSlots []uint64
peers []*peerData
}{
expectedBlockSlots: makeSequence(1, 241),
peers: []*peerData{},
}
mc, _, beaconDB := initializeTestServices(t, chainConfig.expectedBlockSlots, chainConfig.peers)
defer dbtest.TeardownDB(t, beaconDB)
setupQueue := func(ctx context.Context) *blocksQueue {
queue := newBlocksQueue(ctx, &blocksQueueConfig{
blocksFetcher: &blocksProviderMock{},
headFetcher: mc,
highestExpectedSlot: uint64(len(chainConfig.expectedBlockSlots)),
})
return queue
}
t.Run("cancelled context", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
queue := setupQueue(ctx)
cancel()
if err := assertState(queue.state.scheduler, 0, 0, 0, 0); err != nil {
t.Error(err)
}
if err := queue.scheduleFetchRequests(ctx); err != ctx.Err() {
t.Errorf("expected error: %v", ctx.Err())
}
})
t.Run("empty state on pristine node", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
queue := setupQueue(ctx)
state := queue.state.scheduler
if err := assertState(queue.state.scheduler, 0, 0, 0, 0); err != nil {
t.Error(err)
}
if err := queue.scheduleFetchRequests(ctx); err != nil {
t.Error(err)
}
if state.currentSlot != 0 {
t.Errorf("invalid current slot, want: %v, got: %v", 0, state.currentSlot)
}
})
t.Run("empty state on pre-synced node", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
queue := setupQueue(ctx)
state := queue.state.scheduler
syncToSlot := uint64(7)
setBlocksFromCache(ctx, t, mc, syncToSlot)
if err := assertState(queue.state.scheduler, 0, 0, 0, 0); err != nil {
t.Error(err)
}
if err := queue.scheduleFetchRequests(ctx); err != nil {
t.Error(err)
}
if state.currentSlot != syncToSlot {
t.Errorf("invalid current slot, want: %v, got: %v", syncToSlot, state.currentSlot)
}
})
t.Run("reset block batch size to default", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
queue := setupQueue(ctx)
state := queue.state.scheduler
if err := assertState(queue.state.scheduler, 0, 0, 0, 0); err != nil {
t.Error(err)
}
// On enough valid blocks, batch size should get back to default value.
state.blockBatchSize *= 2
state.requestedBlocks[validBlock] = blockBatchSize
state.requestedBlocks[pendingBlock] = 13
state.requestedBlocks[skippedBlock] = 17
state.requestedBlocks[failedBlock] = 19
if err := assertState(queue.state.scheduler, 13, blockBatchSize, 17, 19); err != nil {
t.Error(err)
}
if state.blockBatchSize != 2*blockBatchSize {
t.Errorf("unexpected batch size, want: %v, got: %v", 2*blockBatchSize, state.blockBatchSize)
}
if err := queue.scheduleFetchRequests(ctx); err != nil {
t.Error(err)
}
if err := assertState(queue.state.scheduler, 13+state.blockBatchSize, 0, 17+blockBatchSize, 19); err != nil {
t.Error(err)
}
if state.blockBatchSize != blockBatchSize {
t.Errorf("unexpected batch size, want: %v, got: %v", blockBatchSize, state.blockBatchSize)
}
})
t.Run("increase block batch size on too many failures", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
queue := setupQueue(ctx)
state := queue.state.scheduler
if err := assertState(queue.state.scheduler, 0, 0, 0, 0); err != nil {
t.Error(err)
}
// On too many failures, batch size should get doubled and counters reset.
state.requestedBlocks[validBlock] = 19
state.requestedBlocks[pendingBlock] = 13
state.requestedBlocks[skippedBlock] = 17
state.requestedBlocks[failedBlock] = blockBatchSize
if err := assertState(queue.state.scheduler, 13, 19, 17, blockBatchSize); err != nil {
t.Error(err)
}
if state.blockBatchSize != blockBatchSize {
t.Errorf("unexpected batch size, want: %v, got: %v", blockBatchSize, state.blockBatchSize)
}
if err := queue.scheduleFetchRequests(ctx); err != nil {
t.Error(err)
}
if state.blockBatchSize != 2*blockBatchSize {
t.Errorf("unexpected batch size, want: %v, got: %v", 2*blockBatchSize, state.blockBatchSize)
}
if err := assertState(queue.state.scheduler, state.blockBatchSize, 0, 0, 0); err != nil {
t.Error(err)
}
})
t.Run("reset counters and block batch size on too many cached items", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
queue := setupQueue(ctx)
state := queue.state.scheduler
if err := assertState(queue.state.scheduler, 0, 0, 0, 0); err != nil {
t.Error(err)
}
// On too many cached items, batch size and counters should reset.
state.requestedBlocks[validBlock] = queueMaxCachedBlocks
state.requestedBlocks[pendingBlock] = 13
state.requestedBlocks[skippedBlock] = 17
state.requestedBlocks[failedBlock] = 19
if err := assertState(queue.state.scheduler, 13, queueMaxCachedBlocks, 17, 19); err != nil {
t.Error(err)
}
if state.blockBatchSize != blockBatchSize {
t.Errorf("unexpected batch size, want: %v, got: %v", blockBatchSize, state.blockBatchSize)
}
// This call should trigger resetting.
if err := queue.scheduleFetchRequests(ctx); err != nil {
t.Error(err)
}
if state.blockBatchSize != blockBatchSize {
t.Errorf("unexpected batch size, want: %v, got: %v", blockBatchSize, state.blockBatchSize)
}
if err := assertState(queue.state.scheduler, state.blockBatchSize, 0, 0, 0); err != nil {
t.Error(err)
}
})
t.Run("no pending blocks left", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
queue := setupQueue(ctx)
state := queue.state.scheduler
if err := assertState(queue.state.scheduler, 0, 0, 0, 0); err != nil {
t.Error(err)
}
// On too many cached items, batch size and counters should reset.
state.blockBatchSize = 2 * blockBatchSize
state.requestedBlocks[pendingBlock] = 0
state.requestedBlocks[validBlock] = 1
state.requestedBlocks[skippedBlock] = 1
state.requestedBlocks[failedBlock] = 1
if err := assertState(queue.state.scheduler, 0, 1, 1, 1); err != nil {
t.Error(err)
}
if state.blockBatchSize != 2*blockBatchSize {
t.Errorf("unexpected batch size, want: %v, got: %v", 2*blockBatchSize, state.blockBatchSize)
}
// This call should trigger resetting.
if err := queue.scheduleFetchRequests(ctx); err != nil {
t.Error(err)
}
if state.blockBatchSize != blockBatchSize {
t.Errorf("unexpected batch size, want: %v, got: %v", blockBatchSize, state.blockBatchSize)
}
if err := assertState(queue.state.scheduler, state.blockBatchSize, 0, 0, 0); err != nil {
t.Error(err)
}
})
}
func TestBlocksQueueScheduleFetchRequests(t *testing.T) {
chainConfig := struct {
expectedBlockSlots []uint64
peers []*peerData
}{
expectedBlockSlots: makeSequence(1, 241),
peers: []*peerData{
{
blocks: makeSequence(1, 320),
finalizedEpoch: 8,
headSlot: 320,
},
{
blocks: makeSequence(1, 320),
finalizedEpoch: 8,
headSlot: 320,
},
},
}
mc, _, beaconDB := initializeTestServices(t, chainConfig.expectedBlockSlots, chainConfig.peers)
defer dbtest.TeardownDB(t, beaconDB)
setupQueue := func(ctx context.Context) *blocksQueue {
queue := newBlocksQueue(ctx, &blocksQueueConfig{
blocksFetcher: &blocksProviderMock{},
headFetcher: mc,
highestExpectedSlot: uint64(len(chainConfig.expectedBlockSlots)),
})
return queue
}
t.Run("check start/count boundaries", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
queue := setupQueue(ctx)
state := queue.state.scheduler
// Move sliding window normally.
if err := assertState(state, 0, 0, 0, 0); err != nil {
t.Error(err)
}
end := queue.highestExpectedSlot / state.blockBatchSize
for i := uint64(0); i < end; i++ {
if err := queue.scheduleFetchRequests(ctx); err != nil {
t.Error(err)
}
if err := assertState(state, (i+1)*blockBatchSize, 0, 0, 0); err != nil {
t.Error(err)
}
}
// Make sure that the last request is up to highest expected slot.
if err := queue.scheduleFetchRequests(ctx); err != nil {
t.Error(err)
}
if err := assertState(state, queue.highestExpectedSlot, 0, 0, 0); err != nil {
t.Error(err)
}
// Try schedule beyond the highest slot.
if err := queue.scheduleFetchRequests(ctx); err == nil {
t.Errorf("expected error: %v", errStartSlotIsTooHigh)
}
if err := assertState(state, queue.highestExpectedSlot, 0, 0, 0); err != nil {
t.Error(err)
}
})
t.Run("too many failures", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
queue := setupQueue(ctx)
state := queue.state.scheduler
// Schedule enough items.
if err := assertState(state, 0, 0, 0, 0); err != nil {
t.Error(err)
}
end := queue.highestExpectedSlot / state.blockBatchSize
for i := uint64(0); i < end; i++ {
if err := queue.scheduleFetchRequests(ctx); err != nil {
t.Error(err)
}
if err := assertState(state, (i+1)*blockBatchSize, 0, 0, 0); err != nil {
t.Error(err)
}
}
// "Process" some items and reschedule.
if err := assertState(state, end*blockBatchSize, 0, 0, 0); err != nil {
t.Error(err)
}
state.incrementCounter(failedBlock, 25)
if err := assertState(state, end*blockBatchSize-25, 0, 0, 25); err != nil {
t.Error(err)
}
state.incrementCounter(failedBlock, 500) // too high value shouldn't cause issues
if err := assertState(state, 0, 0, 0, end*blockBatchSize); err != nil {
t.Error(err)
}
// Due to failures, resetting is expected.
if err := queue.scheduleFetchRequests(ctx); err != nil {
t.Error(err)
}
if err := assertState(state, 2*blockBatchSize, 0, 0, 0); err != nil {
t.Error(err)
}
if state.blockBatchSize != 2*blockBatchSize {
t.Errorf("unexpeced block batch size, want: %v, got: %v", 2*blockBatchSize, state.blockBatchSize)
}
})
t.Run("too many skipped", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
queue := setupQueue(ctx)
state := queue.state.scheduler
// Schedule enough items.
if err := assertState(state, 0, 0, 0, 0); err != nil {
t.Error(err)
}
end := queue.highestExpectedSlot / state.blockBatchSize
for i := uint64(0); i < end; i++ {
if err := queue.scheduleFetchRequests(ctx); err != nil {
t.Error(err)
}
if err := assertState(state, (i+1)*blockBatchSize, 0, 0, 0); err != nil {
t.Error(err)
}
}
// "Process" some items and reschedule.
if err := assertState(state, end*blockBatchSize, 0, 0, 0); err != nil {
t.Error(err)
}
state.incrementCounter(skippedBlock, 25)
if err := assertState(state, end*blockBatchSize-25, 0, 25, 0); err != nil {
t.Error(err)
}
state.incrementCounter(skippedBlock, 500) // too high value shouldn't cause issues
if err := assertState(state, 0, 0, end*blockBatchSize, 0); err != nil {
t.Error(err)
}
// No pending items, resetting is expected (both counters and block batch size).
state.blockBatchSize = 2 * blockBatchSize
if err := queue.scheduleFetchRequests(ctx); err != nil {
t.Error(err)
}
if err := assertState(state, blockBatchSize, 0, 0, 0); err != nil {
t.Error(err)
}
if state.blockBatchSize != blockBatchSize {
t.Errorf("unexpeced block batch size, want: %v, got: %v", blockBatchSize, state.blockBatchSize)
}
})
t.Run("reset block batch size", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
queue := setupQueue(ctx)
state := queue.state.scheduler
state.requestedBlocks[failedBlock] = blockBatchSize
// Increase block batch size.
if err := queue.scheduleFetchRequests(ctx); err != nil {
t.Error(err)
}
if err := assertState(state, 2*blockBatchSize, 0, 0, 0); err != nil {
t.Error(err)
}
if state.blockBatchSize != 2*blockBatchSize {
t.Errorf("unexpeced block batch size, want: %v, got: %v", 2*blockBatchSize, state.blockBatchSize)
}
// Reset block batch size.
state.requestedBlocks[validBlock] = blockBatchSize
state.requestedBlocks[pendingBlock] = 1
state.requestedBlocks[failedBlock] = 1
state.requestedBlocks[skippedBlock] = 1
if err := assertState(state, 1, blockBatchSize, 1, 1); err != nil {
t.Error(err)
}
if err := queue.scheduleFetchRequests(ctx); err != nil {
t.Error(err)
}
if err := assertState(state, blockBatchSize+1, 0, blockBatchSize+1, 1); err != nil {
t.Error(err)
}
if state.blockBatchSize != blockBatchSize {
t.Errorf("unexpeced block batch size, want: %v, got: %v", blockBatchSize, state.blockBatchSize)
}
})
t.Run("overcrowded scheduler", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
queue := setupQueue(ctx)
state := queue.state.scheduler
state.requestedBlocks[pendingBlock] = queueMaxCachedBlocks
if err := queue.scheduleFetchRequests(ctx); err != nil {
t.Error(err)
}
if err := assertState(state, blockBatchSize, 0, 0, 0); err != nil {
t.Error(err)
}
if state.blockBatchSize != blockBatchSize {
t.Errorf("unexpeced block batch size, want: %v, got: %v", blockBatchSize, state.blockBatchSize)
}
})
}
func TestBlocksQueueParseFetchResponse(t *testing.T) {
chainConfig := struct {
expectedBlockSlots []uint64
peers []*peerData
}{
expectedBlockSlots: makeSequence(1, 2*blockBatchSize*queueMaxPendingRequests+31),
peers: []*peerData{
{
blocks: makeSequence(1, 320),
finalizedEpoch: 8,
headSlot: 320,
},
{
blocks: makeSequence(1, 320),
finalizedEpoch: 8,
headSlot: 320,
},
},
}
mc, _, beaconDB := initializeTestServices(t, chainConfig.expectedBlockSlots, chainConfig.peers)
defer dbtest.TeardownDB(t, beaconDB)
setupQueue := func(ctx context.Context) *blocksQueue {
queue := newBlocksQueue(ctx, &blocksQueueConfig{
blocksFetcher: &blocksProviderMock{},
headFetcher: mc,
highestExpectedSlot: uint64(len(chainConfig.expectedBlockSlots)),
})
return queue
}
var blocks []*eth.SignedBeaconBlock
for i := 1; i <= blockBatchSize; i++ {
blocks = append(blocks, &eth.SignedBeaconBlock{
Block: &eth.BeaconBlock{
Slot: uint64(i),
},
})
}
t.Run("response error", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
queue := setupQueue(ctx)
response := &fetchRequestResponse{
start: 1,
count: blockBatchSize,
blocks: blocks,
err: errStartSlotIsTooHigh,
}
if _, err := queue.parseFetchResponse(ctx, response); err != errStartSlotIsTooHigh {
t.Errorf("expected error not thrown, want: %v, got: %v", errStartSlotIsTooHigh, err)
}
})
t.Run("context error", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
queue := setupQueue(ctx)
cancel()
response := &fetchRequestResponse{
start: 1,
count: blockBatchSize,
blocks: blocks,
err: ctx.Err(),
}
if _, err := queue.parseFetchResponse(ctx, response); err != ctx.Err() {
t.Errorf("expected error not thrown, want: %v, got: %v", ctx.Err(), err)
}
})
t.Run("no skipped blocks", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
queue := setupQueue(ctx)
for i := uint64(1); i <= blockBatchSize; i++ {
if _, ok := queue.state.cachedBlocks[i]; ok {
t.Errorf("unexpeced block found: %v", i)
}
}
response := &fetchRequestResponse{
start: 1,
count: blockBatchSize,
blocks: blocks,
}
if _, err := queue.parseFetchResponse(ctx, response); err != nil {
t.Error(err)
}
// All blocks should be saved at this point.
for i := uint64(1); i <= blockBatchSize; i++ {
block, ok := queue.state.cachedBlocks[i]
if !ok {
t.Errorf("expeced block not found: %v", i)
}
if block.SignedBeaconBlock == nil {
t.Errorf("unexpectedly marked as skipped: %v", i)
}
}
})
t.Run("with skipped blocks", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
queue := setupQueue(ctx)
for i := uint64(1); i <= blockBatchSize; i++ {
if _, ok := queue.state.cachedBlocks[i]; ok {
t.Errorf("unexpeced block found: %v", i)
}
}
response := &fetchRequestResponse{
start: 1,
count: blockBatchSize,
blocks: blocks,
}
skipStart, skipEnd := uint64(5), uint64(15)
response.blocks = append(response.blocks[:skipStart], response.blocks[skipEnd:]...)
if _, err := queue.parseFetchResponse(ctx, response); err != nil {
t.Error(err)
}
for i := skipStart + 1; i <= skipEnd; i++ {
block, ok := queue.state.cachedBlocks[i]
if !ok {
t.Errorf("expeced block not found: %v", i)
}
if block.SignedBeaconBlock != nil {
t.Errorf("unexpectedly marked as not skipped: %v", i)
}
}
for i := uint64(1); i <= skipStart; i++ {
block, ok := queue.state.cachedBlocks[i]
if !ok {
t.Errorf("expeced block not found: %v", i)
}
if block.SignedBeaconBlock == nil {
t.Errorf("unexpectedly marked as skipped: %v", i)
}
}
for i := skipEnd + 1; i <= blockBatchSize; i++ {
block, ok := queue.state.cachedBlocks[i]
if !ok {
t.Errorf("expeced block not found: %v", i)
}
if block.SignedBeaconBlock == nil {
t.Errorf("unexpectedly marked as skipped: %v", i)
}
}
})
}
func TestBlocksQueueLoop(t *testing.T) {
tests := []struct {
name string
highestExpectedSlot uint64
expectedBlockSlots []uint64
peers []*peerData
}{
{
name: "Single peer with all blocks",
highestExpectedSlot: 251,
expectedBlockSlots: makeSequence(1, 251),
peers: []*peerData{
{
blocks: makeSequence(1, 320),
finalizedEpoch: 8,
headSlot: 320,
},
},
},
{
name: "Multiple peers with all blocks",
highestExpectedSlot: 251,
expectedBlockSlots: makeSequence(1, 251),
peers: []*peerData{
{
blocks: makeSequence(1, 320),
finalizedEpoch: 8,
headSlot: 320,
},
{
blocks: makeSequence(1, 320),
finalizedEpoch: 8,
headSlot: 320,
},
{
blocks: makeSequence(1, 320),
finalizedEpoch: 8,
headSlot: 320,
},
{
blocks: makeSequence(1, 320),
finalizedEpoch: 8,
headSlot: 320,
},
{
blocks: makeSequence(1, 320),
finalizedEpoch: 8,
headSlot: 320,
},
},
},
{
name: "Multiple peers with skipped slots",
highestExpectedSlot: 576,
expectedBlockSlots: append(makeSequence(1, 64), makeSequence(500, 576)...), // up to 18th epoch
peers: []*peerData{
{
blocks: append(makeSequence(1, 64), makeSequence(500, 640)...),
finalizedEpoch: 18,
headSlot: 640,
},
{
blocks: append(makeSequence(1, 64), makeSequence(500, 640)...),
finalizedEpoch: 18,
headSlot: 640,
},
{
blocks: append(makeSequence(1, 64), makeSequence(500, 640)...),
finalizedEpoch: 18,
headSlot: 640,
},
},
},
{
name: "Multiple peers with failures",
highestExpectedSlot: 128,
expectedBlockSlots: makeSequence(1, 128),
peers: []*peerData{
{
blocks: makeSequence(1, 320),
finalizedEpoch: 8,
headSlot: 320,
failureSlots: makeSequence(32*3+1, 32*3+32),
},
{
blocks: makeSequence(1, 320),
finalizedEpoch: 8,
headSlot: 320,
failureSlots: makeSequence(1, 32*3),
},
{
blocks: makeSequence(1, 320),
finalizedEpoch: 8,
headSlot: 320,
},
{
blocks: makeSequence(1, 320),
finalizedEpoch: 8,
headSlot: 320,
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
mc, p2p, beaconDB := initializeTestServices(t, tt.expectedBlockSlots, tt.peers)
defer dbtest.TeardownDB(t, beaconDB)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
headFetcher: mc,
p2p: p2p,
})
queue := newBlocksQueue(ctx, &blocksQueueConfig{
blocksFetcher: fetcher,
headFetcher: mc,
highestExpectedSlot: tt.highestExpectedSlot,
})
if err := queue.start(); err != nil {
t.Error(err)
}
processBlock := func(block *eth.SignedBeaconBlock) error {
if !beaconDB.HasBlock(ctx, bytesutil.ToBytes32(block.Block.ParentRoot)) {
return fmt.Errorf("beacon node doesn't have a block in db with root %#x", block.Block.ParentRoot)
}
if featureconfig.Get().InitSyncNoVerify {
if err := mc.ReceiveBlockNoVerify(ctx, block); err != nil {
return err
}
} else {
if err := mc.ReceiveBlockNoPubsubForkchoice(ctx, block); err != nil {
return err
}
}
return nil
}
var blocks []*eth.SignedBeaconBlock
for block := range queue.fetchedBlocks {
if err := processBlock(block); err != nil {
queue.state.scheduler.incrementCounter(failedBlock, 1)
continue
}
blocks = append(blocks, block)
queue.state.scheduler.incrementCounter(validBlock, 1)
}
if err := queue.stop(); err != nil {
t.Error(err)
}
if queue.headFetcher.HeadSlot() < uint64(len(tt.expectedBlockSlots)) {
t.Errorf("Not enough slots synced, want: %v, got: %v",
len(tt.expectedBlockSlots), queue.headFetcher.HeadSlot())
}
if len(blocks) != len(tt.expectedBlockSlots) {
t.Errorf("Processes wrong number of blocks. Wanted %d got %d", len(tt.expectedBlockSlots), len(blocks))
}
var receivedBlockSlots []uint64
for _, blk := range blocks {
receivedBlockSlots = append(receivedBlockSlots, blk.Block.Slot)
}
if missing := sliceutil.NotUint64(sliceutil.IntersectionUint64(tt.expectedBlockSlots, receivedBlockSlots), tt.expectedBlockSlots); len(missing) > 0 {
t.Errorf("Missing blocks at slots %v", missing)
}
})
}
}
func setBlocksFromCache(ctx context.Context, t *testing.T, mc *mock.ChainService, highestSlot uint64) {
cache.RLock()
parentRoot := cache.rootCache[0]
cache.RUnlock()
for slot := uint64(0); slot <= highestSlot; slot++ {
blk := &eth.SignedBeaconBlock{
Block: &eth.BeaconBlock{
Slot: slot,
ParentRoot: parentRoot[:],
},
}
mc.BlockNotifier().BlockFeed().Send(&feed.Event{
Type: blockfeed.ReceivedBlock,
Data: blockfeed.ReceivedBlockData{
SignedBlock: blk,
},
})
if err := mc.ReceiveBlockNoPubsubForkchoice(ctx, blk); err != nil {
t.Error(err)
}
currRoot, _ := ssz.HashTreeRoot(blk.Block)
parentRoot = currRoot
}
}
func assertState(state *schedulerState, pending, valid, skipped, failed uint64) error {
s := state.requestedBlocks
res := s[pendingBlock] != pending || s[validBlock] != valid ||
s[skippedBlock] != skipped || s[failedBlock] != failed
if res {
b := struct{ pending, valid, skipped, failed uint64 }{pending, valid, skipped, failed}
return fmt.Errorf("invalid state, want: %+v, got: %+v", b, state.requestedBlocks)
}
return nil
}

Some files were not shown because too many files have changed in this diff Show More